From 2cbdf562674491f58defa1690cce10714cfa4424 Mon Sep 17 00:00:00 2001 From: dehnert Date: Wed, 20 Nov 2013 12:45:31 +0100 Subject: [PATCH] Fixed some bugs in bit vector and vector set that prevented the MEC decomposition from functioning correctly. Former-commit-id: 51b6d7eb1838380e204631bf3094c05e115ff923 --- src/models/AbstractNondeterministicModel.h | 2 +- src/models/MarkovAutomaton.h | 225 ++++++++++++------ .../MarkovAutomatonSparseTransitionParser.cpp | 23 +- src/storage/BitVector.h | 4 +- .../MaximalEndComponentDecomposition.cpp | 6 +- src/storage/VectorSet.cpp | 4 +- src/storm.cpp | 4 +- 7 files changed, 176 insertions(+), 92 deletions(-) diff --git a/src/models/AbstractNondeterministicModel.h b/src/models/AbstractNondeterministicModel.h index 21de66e12..258fe74d0 100644 --- a/src/models/AbstractNondeterministicModel.h +++ b/src/models/AbstractNondeterministicModel.h @@ -237,7 +237,7 @@ namespace storm { this->choiceLabeling.reset(newChoiceLabeling); } - private: + protected: /*! A vector of indices mapping states to the choices (rows) in the transition matrix. */ std::vector nondeterministicChoiceIndices; }; diff --git a/src/models/MarkovAutomaton.h b/src/models/MarkovAutomaton.h index 1c19dbcc8..a26c39892 100644 --- a/src/models/MarkovAutomaton.h +++ b/src/models/MarkovAutomaton.h @@ -28,7 +28,7 @@ namespace storm { boost::optional> const& optionalStateRewardVector, boost::optional> const& optionalTransitionRewardMatrix, boost::optional>> const& optionalChoiceLabeling) : AbstractNondeterministicModel(transitionMatrix, stateLabeling, nondeterministicChoiceIndices, optionalStateRewardVector, optionalTransitionRewardMatrix, optionalChoiceLabeling), - markovianStates(markovianStates), exitRates(exitRates), isClosed(false) { + markovianStates(markovianStates), exitRates(exitRates), closed(false) { if (this->hasTransitionRewards()) { if (!this->getTransitionRewardMatrix().isSubmatrixOf(this->getTransitionMatrix())) { LOG4CPLUS_ERROR(logger, "Transition reward matrix is not a submatrix of the transition matrix, i.e. there are rewards for transitions that do not exist."); @@ -45,7 +45,7 @@ namespace storm { boost::optional>&& optionalTransitionRewardMatrix, boost::optional>>&& optionalChoiceLabeling) : AbstractNondeterministicModel(std::move(transitionMatrix), std::move(stateLabeling), std::move(nondeterministicChoiceIndices), std::move(optionalStateRewardVector), std::move(optionalTransitionRewardMatrix), - std::move(optionalChoiceLabeling)), markovianStates(markovianStates), exitRates(std::move(exitRates)), isClosed(false) { + std::move(optionalChoiceLabeling)), markovianStates(markovianStates), exitRates(std::move(exitRates)), closed(false) { if (this->hasTransitionRewards()) { if (!this->getTransitionRewardMatrix().isSubmatrixOf(this->getTransitionMatrix())) { LOG4CPLUS_ERROR(logger, "Transition reward matrix is not a submatrix of the transition matrix, i.e. there are rewards for transitions that do not exist."); @@ -54,11 +54,11 @@ namespace storm { } } - MarkovAutomaton(MarkovAutomaton const& markovAutomaton) : AbstractNondeterministicModel(markovAutomaton), markovianStates(markovAutomaton.markovianStates), exitRates(markovAutomaton.exitRates), isClosed(markovAutomaton.isClosed) { + MarkovAutomaton(MarkovAutomaton const& markovAutomaton) : AbstractNondeterministicModel(markovAutomaton), markovianStates(markovAutomaton.markovianStates), exitRates(markovAutomaton.exitRates), closed(markovAutomaton.closed) { // Intentionally left empty. } - MarkovAutomaton(MarkovAutomaton&& markovAutomaton) : AbstractNondeterministicModel(std::move(markovAutomaton)), markovianStates(std::move(markovAutomaton.markovianStates)), exitRates(std::move(markovAutomaton.exitRates)), isClosed(markovAutomaton.isClosed) { + MarkovAutomaton(MarkovAutomaton&& markovAutomaton) : AbstractNondeterministicModel(std::move(markovAutomaton)), markovianStates(std::move(markovAutomaton.markovianStates)), exitRates(std::move(markovAutomaton.exitRates)), closed(markovAutomaton.closed) { // Intentionally left empty. } @@ -69,88 +69,81 @@ namespace storm { storm::models::ModelType getType() const { return MA; } - - virtual void writeDotToStream(std::ostream& outStream, bool includeLabeling = true, storm::storage::BitVector const* subsystem = nullptr, std::vector const* firstValue = nullptr, std::vector const* secondValue = nullptr, std::vector const* stateColoring = nullptr, std::vector const* colors = nullptr, std::vector* scheduler = nullptr, bool finalizeOutput = true) const override { - AbstractModel::writeDotToStream(outStream, includeLabeling, subsystem, firstValue, secondValue, stateColoring, colors, scheduler, false); - - // Write the probability distributions for all the states. - auto rowIt = this->transitionMatrix.begin(); - for (uint_fast64_t state = 0, highestStateIndex = this->getNumberOfStates() - 1; state <= highestStateIndex; ++state) { - uint_fast64_t rowCount = this->getNondeterministicChoiceIndices()[state + 1] - this->getNondeterministicChoiceIndices()[state]; - bool highlightChoice = true; - - // For this, we need to iterate over all available nondeterministic choices in the current state. - for (uint_fast64_t row = 0; row < rowCount; ++row, ++rowIt) { - if (scheduler != nullptr) { - // If the scheduler picked the current choice, we will not make it dotted, but highlight it. - if ((*scheduler)[state] == row) { - highlightChoice = true; - } else { - highlightChoice = false; - } - } - - // If it's not a Markovian state or the current row is the first choice for this state, then we - // are dealing with a probabilitic choice. - if (!markovianStates.get(state) || row != 0) { - // For each nondeterministic choice, we draw an arrow to an intermediate node to better display - // the grouping of transitions. - outStream << "\t\"" << state << "c" << row << "\" [shape = \"point\""; - - // If we were given a scheduler to highlight, we do so now. - if (scheduler != nullptr) { - if (highlightChoice) { - outStream << ", fillcolor=\"red\""; - } - } - outStream << "];" << std::endl; + + bool isClosed() const { + return closed; + } + + bool isHybridState(uint_fast64_t state) const { + return isMarkovianState(state) && (this->getNondeterministicChoiceIndices()[state + 1] - this->getNondeterministicChoiceIndices()[state] > 1); + } - outStream << "\t" << state << " -> \"" << state << "c" << row << "\""; + bool isMarkovianState(uint_fast64_t state) const { + return this->markovianStates.get(state); + } + + bool isProbabilisticState(uint_fast64_t state) const { + return !this->markovianStates.get(state); + } - // If we were given a scheduler to highlight, we do so now. - if (scheduler != nullptr) { - if (highlightChoice) { - outStream << " [color=\"red\", penwidth = 2]"; - } else { - outStream << " [style = \"dotted\"]"; - } - } - outStream << ";" << std::endl; + void close() { + if (!closed) { + // First, count the number of hybrid states to know how many Markovian choices + // will be removed. + uint_fast64_t numberOfHybridStates = 0; + for (uint_fast64_t state = 0; state < this->getNumberOfStates(); ++state) { + if (this->isHybridState(state)) { + ++numberOfHybridStates; + } + } + + // Then compute how many rows the new matrix is going to have. + uint_fast64_t newNumberOfRows = this->getNumberOfChoices() - numberOfHybridStates; + + // Create the matrix for the new transition relation and the corresponding nondeterministic choice vector. + storm::storage::SparseMatrix newTransitionMatrix(newNumberOfRows, this->getNumberOfStates()); + newTransitionMatrix.initialize(); + std::vector newNondeterministicChoiceIndices(this->getNumberOfStates() + 1); + + // Now copy over all choices that need to be kept. + uint_fast64_t currentChoice = 0; + for (uint_fast64_t state = 0; state < this->getNumberOfStates(); ++state) { + // Record the new beginning of choices of this state. + newNondeterministicChoiceIndices[state] = currentChoice; + + typename storm::storage::SparseMatrix::ConstRowIterator rowIt = this->transitionMatrix.begin(this->nondeterministicChoiceIndices[state]), rowIte = this->transitionMatrix.end(this->nondeterministicChoiceIndices[state + 1] - 1); + + // If we are currently treating a hybrid state, we need to skip its first choice. + if (this->isHybridState(state)) { + ++rowIt; - // Now draw all probabilitic arcs that belong to this nondeterminstic choice. - for (auto transitionIt = rowIt.begin(), transitionIte = rowIt.end(); transitionIt != transitionIte; ++transitionIt) { - if (subsystem == nullptr || subsystem->get(transitionIt.column())) { - outStream << "\t\"" << state << "c" << row << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << "\" ]"; - - // If we were given a scheduler to highlight, we do so now. - if (scheduler != nullptr) { - if (highlightChoice) { - outStream << " [color=\"red\", penwidth = 2]"; - } else { - outStream << " [style = \"dotted\"]"; - } - } - outStream << ";" << std::endl; - } - } - } else { - // In this case we are emitting a Markovian choice, so draw the arrows directly to the target states. - for (auto transitionIt = rowIt.begin(), transitionIte = rowIt.end(); transitionIt != transitionIte; ++transitionIt) { - if (subsystem == nullptr || subsystem->get(transitionIt.column())) { - outStream << "\t\"" << state << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << "\" ]"; - } + // Remove the Markovian state marking. + this->markovianStates.set(state, false); + } + + for (; rowIt != rowIte; ++rowIt) { + for (typename storm::storage::SparseMatrix::ConstIterator succIt = rowIt.begin(), succIte = rowIt.end(); succIt != succIte; ++succIt) { + newTransitionMatrix.insertNextValue(currentChoice, succIt.column(), succIt.value()); } + ++currentChoice; } } - } - - if (finalizeOutput) { - outStream << "}" << std::endl; + + // Put a sentinel element at the end. + newNondeterministicChoiceIndices.back() = currentChoice; + + // Finalize the matrix and put the new transition data in place. + newTransitionMatrix.finalize(); + this->transitionMatrix = std::move(newTransitionMatrix); + this->nondeterministicChoiceIndices = std::move(newNondeterministicChoiceIndices); + + // Mark the automaton as closed. + closed = true; } } virtual std::shared_ptr> applyScheduler(storm::storage::Scheduler const& scheduler) const override { - if (!isClosed) { + if (!closed) { throw storm::exceptions::InvalidStateException() << "Applying a scheduler to a non-closed Markov automaton is illegal; it needs to be closed first."; } @@ -165,12 +158,90 @@ namespace storm { return std::shared_ptr>(new MarkovAutomaton(newTransitionMatrix, this->getStateLabeling(), nondeterministicChoiceIndices, markovianStates, exitRates, this->hasStateRewards() ? this->getStateRewardVector() : boost::optional>(), this->hasTransitionRewards() ? this->getTransitionRewardMatrix() : boost::optional>(), this->hasChoiceLabeling() ? this->getChoiceLabeling() : boost::optional>>())); } + + virtual void writeDotToStream(std::ostream& outStream, bool includeLabeling = true, storm::storage::BitVector const* subsystem = nullptr, std::vector const* firstValue = nullptr, std::vector const* secondValue = nullptr, std::vector const* stateColoring = nullptr, std::vector const* colors = nullptr, std::vector* scheduler = nullptr, bool finalizeOutput = true) const override { + AbstractModel::writeDotToStream(outStream, includeLabeling, subsystem, firstValue, secondValue, stateColoring, colors, scheduler, false); + + // Write the probability distributions for all the states. + auto rowIt = this->transitionMatrix.begin(); + for (uint_fast64_t state = 0, highestStateIndex = this->getNumberOfStates() - 1; state <= highestStateIndex; ++state) { + uint_fast64_t rowCount = this->getNondeterministicChoiceIndices()[state + 1] - this->getNondeterministicChoiceIndices()[state]; + bool highlightChoice = true; + + // For this, we need to iterate over all available nondeterministic choices in the current state. + for (uint_fast64_t row = 0; row < rowCount; ++row, ++rowIt) { + if (scheduler != nullptr) { + // If the scheduler picked the current choice, we will not make it dotted, but highlight it. + if ((*scheduler)[state] == row) { + highlightChoice = true; + } else { + highlightChoice = false; + } + } + + // If it's not a Markovian state or the current row is the first choice for this state, then we + // are dealing with a probabilitic choice. + if (!markovianStates.get(state) || row != 0) { + // For each nondeterministic choice, we draw an arrow to an intermediate node to better display + // the grouping of transitions. + outStream << "\t\"" << state << "c" << row << "\" [shape = \"point\""; + + // If we were given a scheduler to highlight, we do so now. + if (scheduler != nullptr) { + if (highlightChoice) { + outStream << ", fillcolor=\"red\""; + } + } + outStream << "];" << std::endl; + + outStream << "\t" << state << " -> \"" << state << "c" << row << "\""; + + // If we were given a scheduler to highlight, we do so now. + if (scheduler != nullptr) { + if (highlightChoice) { + outStream << " [color=\"red\", penwidth = 2]"; + } else { + outStream << " [style = \"dotted\"]"; + } + } + outStream << ";" << std::endl; + + // Now draw all probabilitic arcs that belong to this nondeterminstic choice. + for (auto transitionIt = rowIt.begin(), transitionIte = rowIt.end(); transitionIt != transitionIte; ++transitionIt) { + if (subsystem == nullptr || subsystem->get(transitionIt.column())) { + outStream << "\t\"" << state << "c" << row << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << "\" ]"; + // If we were given a scheduler to highlight, we do so now. + if (scheduler != nullptr) { + if (highlightChoice) { + outStream << " [color=\"red\", penwidth = 2]"; + } else { + outStream << " [style = \"dotted\"]"; + } + } + outStream << ";" << std::endl; + } + } + } else { + // In this case we are emitting a Markovian choice, so draw the arrows directly to the target states. + for (auto transitionIt = rowIt.begin(), transitionIte = rowIt.end(); transitionIt != transitionIte; ++transitionIt) { + if (subsystem == nullptr || subsystem->get(transitionIt.column())) { + outStream << "\t\"" << state << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << "\" ]"; + } + } + } + } + } + + if (finalizeOutput) { + outStream << "}" << std::endl; + } + } private: storm::storage::BitVector markovianStates; std::vector exitRates; - bool isClosed; + bool closed; }; } diff --git a/src/parser/MarkovAutomatonSparseTransitionParser.cpp b/src/parser/MarkovAutomatonSparseTransitionParser.cpp index 5838a6fb3..3b3fce470 100644 --- a/src/parser/MarkovAutomatonSparseTransitionParser.cpp +++ b/src/parser/MarkovAutomatonSparseTransitionParser.cpp @@ -37,6 +37,9 @@ namespace storm { LOG4CPLUS_ERROR(logger, "Found deadlock states (e.g. " << lastsource + 1 << ") during parsing. Please fix them or set the appropriate flag."); throw storm::exceptions::WrongFormatException() << "Found deadlock states (e.g. " << lastsource + 1 << ") during parsing. Please fix them or set the appropriate flag."; } + } else if (source < lastsource) { + LOG4CPLUS_ERROR(logger, "Illegal state choice order. A choice of state " << source << " appears at an illegal position."); + throw storm::exceptions::WrongFormatException() << "Illegal state choice order. A choice of state " << source << " appears at an illegal position."; } ++result.numberOfChoices; @@ -78,6 +81,7 @@ namespace storm { // Now that we have the source state and the information whether or not the current choice is probabilistic or Markovian, we need to read the list of successors and the probabilities/rates. bool hasSuccessorState = false; bool encounteredNewDistribution = false; + uint_fast64_t lastSuccessorState = 0; // At this point, we need to check whether there is an additional successor or we have reached the next choice for the same or a different state. do { @@ -91,9 +95,6 @@ namespace storm { encounteredEOF = true; } } else if (buf[0] == '*') { - // We need to record that we found at least one successor state for the current choice. - hasSuccessorState = true; - // As we have encountered a "*", we know that there is an additional successor state for the current choice. ++buf; @@ -102,14 +103,22 @@ namespace storm { if (target > result.highestStateIndex) { result.highestStateIndex = target; } - + if (hasSuccessorState && target <= lastSuccessorState) { + LOG4CPLUS_ERROR(logger, "Illegal transition order for source state " << source << "."); + throw storm::exceptions::WrongFormatException() << "Illegal transition order for source state " << source << "."; + } + // And the corresponding probability/rate. double val = checked_strtod(buf, &buf); if (val <= 0.0) { - LOG4CPLUS_ERROR(logger, "Illegal probability/rate value " << val << "."); - throw storm::exceptions::WrongFormatException() << "Illegal probability/rate value " << val << "."; + LOG4CPLUS_ERROR(logger, "Illegal probability/rate value for transition from " << source << " to " << target << ": " << val << "."); + throw storm::exceptions::WrongFormatException() << "Illegal probability/rate value for transition from " << source << " to " << target << ": " << val << "."; } - + + // We need to record that we found at least one successor state for the current choice. + hasSuccessorState = true; + lastSuccessorState = target; + // As we found a new successor, we need to increase the number of nonzero entries. ++result.numberOfNonzeroEntries; diff --git a/src/storage/BitVector.h b/src/storage/BitVector.h index 66f07dcf2..ea6c0a3b4 100644 --- a/src/storage/BitVector.h +++ b/src/storage/BitVector.h @@ -580,10 +580,10 @@ public: bool empty() const { for (uint_fast64_t i = 0; i < this->bucketCount; ++i) { if (this->bucketArray[i] != 0) { - return true; + return false; } } - return false; + return true; } /*! diff --git a/src/storage/MaximalEndComponentDecomposition.cpp b/src/storage/MaximalEndComponentDecomposition.cpp index 26035baf7..e033c06af 100644 --- a/src/storage/MaximalEndComponentDecomposition.cpp +++ b/src/storage/MaximalEndComponentDecomposition.cpp @@ -112,8 +112,10 @@ namespace storm { if (mecChanged) { std::list::const_iterator eraseIterator(mecIterator); for (StateBlock& scc : sccs) { - endComponentStateSets.push_back(std::move(scc)); - ++mecIterator; + if (!scc.empty()) { + endComponentStateSets.push_back(std::move(scc)); + ++mecIterator; + } } endComponentStateSets.erase(eraseIterator); } else { diff --git a/src/storage/VectorSet.cpp b/src/storage/VectorSet.cpp index 5fcffae4a..9c963a214 100644 --- a/src/storage/VectorSet.cpp +++ b/src/storage/VectorSet.cpp @@ -241,7 +241,7 @@ namespace storm { template void VectorSet::erase(VectorSet const& eraseSet) { - if (eraseSet.size() > 0) { + if (eraseSet.size() > 0 && this->size() > 0) { ensureSet(); eraseSet.ensureSet(); @@ -249,7 +249,7 @@ namespace storm { while (setIt != eraseSet.data.rend() && *setIt > *delIt) { ++setIt; } - if (setIt != data.rend()) break; + if (setIt == data.rend()) break; if (*setIt == *delIt) { data.erase((setIt + 1).base()); diff --git a/src/storm.cpp b/src/storm.cpp index 6531bd008..880bccb46 100644 --- a/src/storm.cpp +++ b/src/storm.cpp @@ -469,7 +469,9 @@ int main(const int argc, const char* argv[]) { break; case storm::models::MA: { LOG4CPLUS_INFO(logger, "Model is a Markov automaton."); - storm::storage::MaximalEndComponentDecomposition mecDecomposition(*parser.getModel>()); + std::shared_ptr> markovAutomaton = parser.getModel>(); + markovAutomaton->close(); + storm::storage::MaximalEndComponentDecomposition mecDecomposition(*markovAutomaton); break; } case storm::models::Unknown: