From dabfb5e1dd77cbc1f11b94013adb32fad8616b04 Mon Sep 17 00:00:00 2001 From: dehnert Date: Wed, 27 Nov 2013 16:07:39 +0100 Subject: [PATCH] First working version of LRA computation for Markov automata. Former-commit-id: d6c6870fd8a1ce7f5c27e19a14c8c7f4ffee05e1 --- .../SparseMarkovAutomatonCslModelChecker.h | 164 ++++++++++++++++-- src/models/MarkovAutomaton.h | 2 +- src/storm.cpp | 4 +- 3 files changed, 157 insertions(+), 13 deletions(-) diff --git a/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h b/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h index 8a3b594ff..b648d3865 100644 --- a/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h +++ b/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h @@ -79,9 +79,16 @@ namespace storm { // Now compute the long-run average for all end components in isolation. std::vector lraValuesForEndComponents; - for (storm::storage::MaximalEndComponent const& mec : mecDecomposition) { - std::unique_ptr solver = storm::utility::solver::getLpSolver("Long-Run Average"); - solver->setModelSense(storm::solver::LpSolver::MAXIMIZE); + + // While doing so, we already gather some information for the following steps. + std::vector stateToMecIndexMap(this->getModel().getNumberOfStates()); + storm::storage::BitVector statesInMecs(this->getModel().getNumberOfStates()); + + for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { + storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; + + std::unique_ptr solver = storm::utility::solver::getLpSolver("LRA for MEC " + std::to_string(currentMecIndex)); + solver->setModelSense(min ? storm::solver::LpSolver::MAXIMIZE : storm::solver::LpSolver::MINIMIZE); // First, we need to create the variables for the problem. std::map stateToVariableIndexMap; @@ -96,6 +103,10 @@ namespace storm { for (auto const& stateChoicesPair : mec) { uint_fast64_t state = stateChoicesPair.first; + // Gather information for later use. + statesInMecs.set(state); + stateToMecIndexMap[state] = currentMecIndex; + // Now, based on the type of the state, create a suitable constraint. if (this->getModel().isMarkovianState(state)) { variables.clear(); @@ -111,9 +122,9 @@ namespace storm { } variables.push_back(lraValueVariableIndex); - coefficients.push_back(this->getModel().getExitRate(state)); + coefficients.push_back(storm::utility::constGetOne() / this->getModel().getExitRate(state)); - solver->addConstraint("state" + std::to_string(state), variables, coefficients, storm::solver::LpSolver::LESS_EQUAL, storm::utility::constGetOne() / this->getModel().getExitRate(state)); + solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, goalStates.get(state) ? storm::utility::constGetOne() / this->getModel().getExitRate(state) : storm::utility::constGetZero()); } else { // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action // and the sum ranges over all states s'. @@ -130,21 +141,154 @@ namespace storm { coefficients.push_back(-element.value()); } - solver->addConstraint("state" + std::to_string(state), variables, coefficients, storm::solver::LpSolver::LESS_EQUAL, storm::utility::constGetZero()); + solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, storm::utility::constGetZero()); } } } - + solver->optimize(); lraValuesForEndComponents.push_back(solver->getContinuousValue(lraValueVariableIndex)); } - return lraValuesForEndComponents; + // For fast transition rewriting, we build some auxiliary data structures. + storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; + uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); + uint_fast64_t lastStateNotInMecs = 0; + uint_fast64_t numberOfStatesNotInMecs = 0; + std::vector statesNotInMecsBeforeIndex; + statesNotInMecsBeforeIndex.reserve(this->getModel().getNumberOfStates()); + for (auto state : statesNotContainedInAnyMec) { + while (lastStateNotInMecs <= state) { + statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); + ++lastStateNotInMecs; + } + ++numberOfStatesNotInMecs; + } + + // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. + std::vector b; + std::vector sspNondeterministicChoiceIndices; + sspNondeterministicChoiceIndices.reserve(numberOfStatesNotInMecs + mecDecomposition.size() + 1); + typename storm::storage::SparseMatrix sspMatrix; + sspMatrix.initialize(); + + // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). + uint_fast64_t currentChoice = 0; + for (auto state : statesNotContainedInAnyMec) { + sspNondeterministicChoiceIndices.push_back(currentChoice); + + for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { + std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); + b.push_back(storm::utility::constGetZero()); + + typename storm::storage::SparseMatrix::Rows row = transitionMatrix.getRow(choice); + for (auto element : row) { + if (statesNotContainedInAnyMec.get(element.column())) { + // If the target state is not contained in an MEC, we can copy over the entry. + sspMatrix.insertNextValue(currentChoice, statesNotInMecsBeforeIndex[element.column()], element.value()); + } else { + // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector + // so that we are able to write the cumulative probability to the MEC into the matrix. + auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.column()]] += element.value(); + } + } + + // Now insert all (cumulative) probability values that target an MEC. + for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { + if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { + sspMatrix.insertNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); + } + } + } + } + + // Now we are ready to construct the choices for the auxiliary states. + for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { + storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; + sspNondeterministicChoiceIndices.push_back(currentChoice); + + for (auto const& stateChoicesPair : mec) { + uint_fast64_t state = stateChoicesPair.first; + storm::storage::VectorSet const& choicesInMec = stateChoicesPair.second; + + for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { + std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); + + // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. + if (!choicesInMec.contains(choice)) { + b.push_back(storm::utility::constGetZero()); + + typename storm::storage::SparseMatrix::Rows row = transitionMatrix.getRow(choice); + for (auto element : row) { + if (statesNotContainedInAnyMec.get(element.column())) { + // If the target state is not contained in an MEC, we can copy over the entry. + sspMatrix.insertNextValue(currentChoice, statesNotInMecsBeforeIndex[element.column()], element.value()); + } else { + // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector + // so that we are able to write the cumulative probability to the MEC into the matrix. + auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.column()]] += element.value(); + } + } + + // Now insert all (cumulative) probability values that target an MEC. + for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { + if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { + // If the target MEC is the same as the current one, instead of adding a transition, we need to add the weighted reward + // to the right-hand side vector of the SSP. + if (mecIndex == targetMecIndex) { + b.back() += auxiliaryStateToProbabilityMap[mecIndex] * lraValuesForEndComponents[mecIndex]; + } else { + // Otherwise, we add a transition to the auxiliary state that is associated with the target MEC. + sspMatrix.insertNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); + } + } + } + + ++currentChoice; + } + } + } + + // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. + sspMatrix.insertEmptyRow(); + ++currentChoice; + b.push_back(lraValuesForEndComponents[mecIndex]); + } + + // Add the sentinel element at the end. + sspNondeterministicChoiceIndices.push_back(currentChoice); + + // Finalize the matrix and solve the corresponding system of equations. + sspMatrix.finalize(); + std::vector x(numberOfStatesNotInMecs + mecDecomposition.size()); + if (linearEquationSolver != nullptr) { + this->linearEquationSolver->solveEquationSystem(min, sspMatrix, x, b, sspNondeterministicChoiceIndices); + } else { + throw storm::exceptions::InvalidStateException() << "No valid linear equation solver available."; + } + + // Prepare result vector. + std::vector result(this->getModel().getNumberOfStates()); + + // Set the values for states not contained in MECs. + uint_fast64_t stateIndex = 0; + for (auto state : statesNotContainedInAnyMec) { + result[state] = x[stateIndex]; + ++stateIndex; + } + + // Set the values for all states in MECs. + for (auto state : statesInMecs) { + result[state] = lraValuesForEndComponents[stateToMecIndexMap[state]]; + } + + return result; } std::vector checkExpectedTime(bool min, storm::storage::BitVector const& goalStates) const { - // TODO: check whether the Markov automaton is closed once again? Or should that rather be done when constructing the model checker? - // For now we just assume that it is closed already. + if (!this->getModel().isClosed()) { + throw storm::exceptions::InvalidArgumentException() << "Unable to compute expected time on non-closed Markov automaton."; + } // First, we need to check which states have infinite expected time (by definition). storm::storage::BitVector infinityStates; diff --git a/src/models/MarkovAutomaton.h b/src/models/MarkovAutomaton.h index 9ea225f59..67e2a8acf 100644 --- a/src/models/MarkovAutomaton.h +++ b/src/models/MarkovAutomaton.h @@ -249,7 +249,7 @@ namespace storm { // In this case we are emitting a Markovian choice, so draw the arrows directly to the target states. for (auto transitionIt = rowIt.begin(), transitionIte = rowIt.end(); transitionIt != transitionIte; ++transitionIt) { if (subsystem == nullptr || subsystem->get(transitionIt.column())) { - outStream << "\t\"" << state << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << "\" ]"; + outStream << "\t\"" << state << "\" -> " << transitionIt.column() << " [ label= \"" << transitionIt.value() << " (" << this->exitRates[state] << ")\" ]"; } } } diff --git a/src/storm.cpp b/src/storm.cpp index e70721b4a..8668f01f5 100644 --- a/src/storm.cpp +++ b/src/storm.cpp @@ -438,7 +438,7 @@ int main(const int argc, const char* argv[]) { } //Should there be a counterexample generated in case the formula is not satisfied? - if(s->isSet("counterExample")) { + if(s->isSet("counterexample")) { generateCounterExample(parser); @@ -476,8 +476,8 @@ int main(const int argc, const char* argv[]) { storm::modelchecker::csl::SparseMarkovAutomatonCslModelChecker mc(*markovAutomaton, new storm::solver::AbstractNondeterministicLinearEquationSolver()); std::cout << mc.checkExpectedTime(true, markovAutomaton->getLabeledStates("goal")) << std::endl; std::cout << mc.checkExpectedTime(false, markovAutomaton->getLabeledStates("goal")) << std::endl; + std::cout << mc.checkLongRunAverage(true, markovAutomaton->getLabeledStates("goal")) << std::endl; std::cout << mc.checkLongRunAverage(false, markovAutomaton->getLabeledStates("goal")) << std::endl; - break; } case storm::models::Unknown: