diff --git a/src/formula/Csl/CslFilter.h b/src/formula/Csl/CslFilter.h index db59a59a7..1507d1c2b 100644 --- a/src/formula/Csl/CslFilter.h +++ b/src/formula/Csl/CslFilter.h @@ -12,13 +12,15 @@ #include "src/formula/Csl/AbstractCslFormula.h" #include "src/formula/Csl/AbstractPathFormula.h" #include "src/formula/Csl/AbstractStateFormula.h" +#include "src/modelchecker/csl/AbstractModelChecker.h" +#include "src/formula/Actions/MinMaxAction.h" namespace storm { namespace property { namespace csl { template -class CslFilter : storm::property::AbstractFilter { +class CslFilter : public storm::property::AbstractFilter { public: @@ -26,24 +28,28 @@ public: // Intentionally left empty. } - CslFilter(AbstractCslFormula* child) : child(child) { + CslFilter(AbstractCslFormula* child) : child(child) { // Intentionally left empty. } - CslFilter(AbstractCslFormula* child, action::Action* action) : child(child) { - actions.push_back(action); + CslFilter(AbstractCslFormula* child, bool minimize) : child(child) { + this->actions.push_back(new storm::property::action::MinMaxAction(minimize)); } - CslFilter(AbstractCslFormula* child, std::vector*> actions) : child(child), actions(actions) { + CslFilter(AbstractCslFormula* child, action::AbstractAction* action) : child(child) { + this->actions.push_back(action); + } + + CslFilter(AbstractCslFormula* child, std::vector*> actions) : AbstractFilter(actions), child(child) { // Intentionally left empty. } virtual ~CslFilter() { - actions.clear(); + this->actions.clear(); delete child; } - void check(AbstractModelChecker const & modelchecker) const { + void check(storm::modelchecker::csl::AbstractModelChecker const & modelchecker) const { // Write out the formula to be checked. std::cout << std::endl; @@ -68,13 +74,13 @@ public: // Now write out the result. - if(actions.empty()) { + if(this->actions.empty()) { // There is no filter action given. So provide legacy support: // Return the results for all states labeled with "init". LOG4CPLUS_INFO(logger, "Result for initial states:"); std::cout << "Result for initial states:" << std::endl; - for (auto initialState : modelchecker.getModel>().getInitialStates()) { + for (auto initialState : modelchecker.getModel().getInitialStates()) { LOG4CPLUS_INFO(logger, "\t" << initialState << ": " << (result.get(initialState) ? "satisfied" : "not satisfied")); std::cout << "\t" << initialState << ": " << result.get(initialState) << std::endl; } @@ -100,13 +106,13 @@ public: // Now write out the result. - if(actions.empty()) { + if(this->actions.empty()) { // There is no filter action given. So provide legacy support: // Return the results for all states labeled with "init". LOG4CPLUS_INFO(logger, "Result for initial states:"); std::cout << "Result for initial states:" << std::endl; - for (auto initialState : modelchecker.getModel>().getInitialStates()) { + for (auto initialState : modelchecker.getModel().getInitialStates()) { LOG4CPLUS_INFO(logger, "\t" << initialState << ": " << result[initialState]); std::cout << "\t" << initialState << ": " << result[initialState] << std::endl; } @@ -147,61 +153,61 @@ public: std::string toString() const { std::string desc = "Filter: "; desc += "\nActions:"; - for(auto action : actions) { + for(auto action : this->actions) { desc += "\n\t" + action.toString(); } desc += "\nFormula:\n\t" + child->toString(); return desc; } - void setChild(AbstractCslFormula* child) { + void setChild(AbstractCslFormula* child) { this->child = child; } - AbstractFormula* getChild() const { + AbstractCslFormula* getChild() const { return child; } private: - BitVector evaluate(AbstractModelChecker const & modelchecker, AbstractStateFormula* formula) const { + storm::storage::BitVector evaluate(storm::modelchecker::csl::AbstractModelChecker const & modelchecker, AbstractStateFormula* formula) const { // First, get the model checking result. - BitVector result = modelchecker.checkMinMaxOperator(formula); + storm::storage::BitVector result = modelchecker.checkMinMaxOperator(formula); - if(getActionCount() != 0 && dynamic_cast*>(getAction(0)) != nullptr) { + if(this->getActionCount() != 0 && dynamic_cast*>(this->getAction(0)) != nullptr) { // If there is an action specifying that min/max probabilities should be computed, call the appropriate method of the model checker. - result = modelchecker.checkMinMaxOperator(formula, static_cast*>(getAction(0))->getMinimize()); + result = modelchecker.checkMinMaxOperator(formula, static_cast*>(this->getAction(0))->getMinimize()); } else { result = formula->check(modelchecker); } // Now apply all filter actions and return the result. - for(auto action : actions) { + for(auto action : this->actions) { result = action->evaluate(result); } return result; } - std::vector evaluate(AbstractModelChecker const & modelchecker, AbstractPathFormula* formula) const { + std::vector evaluate(storm::modelchecker::csl::AbstractModelChecker const & modelchecker, AbstractPathFormula* formula) const { // First, get the model checking result. std::vector result; - if(getActionCount() != 0 && dynamic_cast*>(getAction(0)) != nullptr) { + if(this->getActionCount() != 0 && dynamic_cast*>(this->getAction(0)) != nullptr) { // If there is an action specifying that min/max probabilities should be computed, call the appropriate method of the model checker. - result = modelchecker.checkMinMaxOperator(formula, static_cast*>(getAction(0))->getMinimize()); + result = modelchecker.checkMinMaxOperator(formula, static_cast*>(this->getAction(0))->getMinimize()); } else { result = formula->check(modelchecker, false); } // Now apply all filter actions and return the result. - for(auto action : actions) { + for(auto action : this->actions) { result = action->evaluate(result); } return result; } - AbstractCslFormula* child; + AbstractCslFormula* child; }; } //namespace csl diff --git a/src/formula/Ltl/LtlFilter.h b/src/formula/Ltl/LtlFilter.h index 6fbb6398e..cbc31f71b 100644 --- a/src/formula/Ltl/LtlFilter.h +++ b/src/formula/Ltl/LtlFilter.h @@ -8,12 +8,15 @@ #ifndef STORM_FORMULA_LTL_LTLFILTER_H_ #define STORM_FORMULA_LTL_LTLFILTER_H_ +#include "src/formula/AbstractFilter.h" +#include "src/modelchecker/ltl/AbstractModelChecker.h" + namespace storm { namespace property { namespace ltl { template -class LtlFilter : storm::property::AbstractFilter { +class LtlFilter : public storm::property::AbstractFilter { public: @@ -26,15 +29,15 @@ public: } LtlFilter(AbstractLtlFormula* child, action::Action* action) : child(child) { - actions.push_back(action); + this->actions.push_back(action); } - LtlFilter(AbstractLtlFormula* child, std::vector*> actions) : child(child), actions(actions) { + LtlFilter(AbstractLtlFormula* child, std::vector*> actions) : AbstractFilter(actions), child(child) { // Intentionally left empty. } virtual ~LtlFilter() { - actions.clear(); + this->actions.clear(); delete child; } @@ -45,7 +48,7 @@ public: * * @param stateFormula The formula to be checked. */ - void check(AbstractModelChecker const & modelchecker) const { + void check(storm::modelchecker::ltl::AbstractModelChecker const & modelchecker) const { // Write out the formula to be checked. std::cout << std::endl; @@ -68,7 +71,7 @@ public: // Now write out the result. - if(actions.empty()) { + if(this->actions.empty()) { // There is no filter action given. So provide legacy support: // Return the results for all states labeled with "init". @@ -99,35 +102,35 @@ public: std::string toString() const { std::string desc = "Filter: "; desc += "\nActions:"; - for(auto action : actions) { + for(auto action : this->actions) { desc += "\n\t" + action.toString(); } desc += "\nFormula:\n\t" + child->toString(); return desc; } - void setChild(AbstractLtlFormula* child) { + void setChild(AbstractLtlFormula* child) { this->child = child; } - AbstractFormula* getChild() const { + AbstractLtlFormula* getChild() const { return child; } private: - storm::storage::BitVector evaluate(AbstractModelChecker const & modelchecker, AbstractLtlFormula* formula) const { + storm::storage::BitVector evaluate(storm::modelchecker::ltl::AbstractModelChecker const & modelchecker, AbstractLtlFormula* formula) const { // First, get the model checking result. storm::storage::BitVector result = formula->check(modelchecker); // Now apply all filter actions and return the result. - for(auto action : actions) { + for(auto action : this->actions) { result = action->evaluate(result); } return result; } - AbstractLtlFormula* child; + AbstractLtlFormula* child; }; diff --git a/src/modelchecker/csl/AbstractModelChecker.h b/src/modelchecker/csl/AbstractModelChecker.h index a80b39445..3e6fcf546 100644 --- a/src/modelchecker/csl/AbstractModelChecker.h +++ b/src/modelchecker/csl/AbstractModelChecker.h @@ -172,7 +172,7 @@ public: * @param formula The formula to check. * @returns The set of states satisfying the formula represented by a bit vector. */ - storm::storage::BitVector checkProbabilisticBoundOperator(storm::property::csl::ProbabilisticBoundOperator const& formula) const { + virtual storm::storage::BitVector checkProbabilisticBoundOperator(storm::property::csl::ProbabilisticBoundOperator const& formula) const { // First, we need to compute the probability for satisfying the path formula for each state. std::vector quantitativeResult = formula.getPathFormula().check(*this, false); diff --git a/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h b/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h index 629e809af..e5aa4d075 100644 --- a/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h +++ b/src/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h @@ -15,583 +15,627 @@ #include "src/exceptions/NotImplementedException.h" namespace storm { - namespace modelchecker { - namespace csl { - - template - class SparseMarkovAutomatonCslModelChecker : public AbstractModelChecker { - public: - explicit SparseMarkovAutomatonCslModelChecker(storm::models::MarkovAutomaton const& model, std::shared_ptr> nondeterministicLinearEquationSolver) : AbstractModelChecker(model), nondeterministicLinearEquationSolver(nondeterministicLinearEquationSolver) { - // Intentionally left empty. - } - - /* - This Second constructor is NEEDED and a workaround for a common Bug in C++ with nested templates - See: http://stackoverflow.com/questions/14401308/visual-c-cannot-deduce-given-template-arguments-for-function-used-as-defaul - */ - explicit SparseMarkovAutomatonCslModelChecker(storm::models::MarkovAutomaton const& model) : AbstractModelChecker(model), nondeterministicLinearEquationSolver(storm::utility::solver::getNondeterministicLinearEquationSolver()) { - // Intentionally left empty. +namespace modelchecker { +namespace csl { + +template +class SparseMarkovAutomatonCslModelChecker : public AbstractModelChecker { +public: + explicit SparseMarkovAutomatonCslModelChecker(storm::models::MarkovAutomaton const& model, std::shared_ptr> nondeterministicLinearEquationSolver) : AbstractModelChecker(model), nondeterministicLinearEquationSolver(nondeterministicLinearEquationSolver) { + // Intentionally left empty. + } + + /* + This Second constructor is NEEDED and a workaround for a common Bug in C++ with nested templates + See: http://stackoverflow.com/questions/14401308/visual-c-cannot-deduce-given-template-arguments-for-function-used-as-defaul + */ + explicit SparseMarkovAutomatonCslModelChecker(storm::models::MarkovAutomaton const& model) : AbstractModelChecker(model), nondeterministicLinearEquationSolver(storm::utility::solver::getNondeterministicLinearEquationSolver()) { + // Intentionally left empty. + } + + /*! + * Virtual destructor. Needs to be virtual, because this class has virtual methods. + */ + virtual ~SparseMarkovAutomatonCslModelChecker() { + // Intentionally left empty. + } + + /*! + * Returns a constant reference to the MDP associated with this model checker. + * @returns A constant reference to the MDP associated with this model checker. + */ + storm::models::MarkovAutomaton const& getModel() const { + return AbstractModelChecker::template getModel>(); + } + + /*! + * Checks the given formula that is a P operator over a path formula featuring a value bound. + * + * @param formula The formula to check. + * @returns The set of states satisfying the formula represented by a bit vector. + */ + virtual storm::storage::BitVector checkProbabilisticBoundOperator(storm::property::csl::ProbabilisticBoundOperator const& formula) const override{ + // For P< and P<= the MA satisfies the formula iff the probability maximizing scheduler is used. + // For P> and P>= " iff the probability minimizing " . + if(formula.getComparisonOperator() == storm::property::LESS || formula.getComparisonOperator() == storm::property::LESS_EQUAL) { + this->minimumOperatorStack.push(false); + } + else { + this->minimumOperatorStack.push(true); + } + + // First, we need to compute the probability for satisfying the path formula for each state. + std::vector quantitativeResult = formula.getPathFormula().check(*this, false); + + //Remove the minimizing operator entry from the stack. + this->minimumOperatorStack.pop(); + + // Create resulting bit vector that will hold the yes/no-answer for every state. + storm::storage::BitVector result(quantitativeResult.size()); + + // Now, we can compute which states meet the bound specified in this operator and set the + // corresponding bits to true in the resulting vector. + for (uint_fast64_t i = 0; i < quantitativeResult.size(); ++i) { + if (formula.meetsBound(quantitativeResult[i])) { + result.set(i, true); + } + } + + return result; + } + + std::vector checkUntil(storm::property::csl::Until const& formula, bool qualitative) const { + // Test whether it is specified if the minimum or maximum probabilities are to be computed. + if(this->minimumOperatorStack.empty()) { + LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); + throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + } + + storm::storage::BitVector leftStates = formula.getLeft().check(*this); + storm::storage::BitVector rightStates = formula.getRight().check(*this); + return computeUnboundedUntilProbabilities(this->minimumOperatorStack.top(), leftStates, rightStates, qualitative).first; + } + + std::pair, storm::storage::TotalScheduler> computeUnboundedUntilProbabilities(bool min, storm::storage::BitVector const& leftStates, storm::storage::BitVector const& rightStates, bool qualitative) const { + return storm::modelchecker::prctl::SparseMdpPrctlModelChecker::computeUnboundedUntilProbabilities(min, this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getInitialStates(), leftStates, rightStates, nondeterministicLinearEquationSolver, qualitative); + } + + std::vector checkTimeBoundedUntil(storm::property::csl::TimeBoundedUntil const& formula, bool qualitative) const { + throw storm::exceptions::NotImplementedException() << "Model checking Until formulas on Markov automata is not yet implemented."; + } + + std::vector checkTimeBoundedEventually(storm::property::csl::TimeBoundedEventually const& formula, bool qualitative) const { + // Test whether it is specified if the minimum or maximum probabilities are to be computed. + if(this->minimumOperatorStack.empty()) { + LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); + throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + } + + storm::storage::BitVector goalStates = formula.getChild().check(*this); + return this->checkTimeBoundedEventually(this->minimumOperatorStack.top(), goalStates, formula.getLowerBound(), formula.getUpperBound()); + } + + std::vector checkGlobally(storm::property::csl::Globally const& formula, bool qualitative) const { + throw storm::exceptions::NotImplementedException() << "Model checking Globally formulas on Markov automata is not yet implemented."; + } + + std::vector checkEventually(storm::property::csl::Eventually const& formula, bool qualitative) const { + // Test whether it is specified if the minimum or maximum probabilities are to be computed. + if(this->minimumOperatorStack.empty()) { + LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); + throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + } + + storm::storage::BitVector subFormulaStates = formula.getChild().check(*this); + return computeUnboundedUntilProbabilities(this->minimumOperatorStack.top(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), subFormulaStates, qualitative).first; + } + + std::vector checkNext(storm::property::csl::Next const& formula, bool qualitative) const { + throw storm::exceptions::NotImplementedException() << "Model checking Next formulas on Markov automata is not yet implemented."; + } + + static void computeBoundedReachabilityProbabilities(bool min, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, ValueType delta, uint_fast64_t numberOfSteps) { + // Start by computing four sparse matrices: + // * a matrix aMarkovian with all (discretized) transitions from Markovian non-goal states to all Markovian non-goal states. + // * a matrix aMarkovianToProbabilistic with all (discretized) transitions from Markovian non-goal states to all probabilistic non-goal states. + // * a matrix aProbabilistic with all (non-discretized) transitions from probabilistic non-goal states to other probabilistic non-goal states. + // * a matrix aProbabilisticToMarkovian with all (non-discretized) transitions from probabilistic non-goal states to all Markovian non-goal states. + typename storm::storage::SparseMatrix aMarkovian = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, markovianNonGoalStates, true); + typename storm::storage::SparseMatrix aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, probabilisticNonGoalStates); + typename storm::storage::SparseMatrix aProbabilistic = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, probabilisticNonGoalStates); + typename storm::storage::SparseMatrix aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, markovianNonGoalStates); + + // The matrices with transitions from Markovian states need to be digitized. + // Digitize aMarkovian. Based on whether the transition is a self-loop or not, we apply the two digitization rules. + uint_fast64_t rowIndex = 0; + for (auto state : markovianNonGoalStates) { + for (auto& element : aMarkovian.getRow(rowIndex)) { + ValueType eTerm = std::exp(-exitRates[state] * delta); + if (element.first == rowIndex) { + element.second = (storm::utility::constantOne() - eTerm) * element.second + eTerm; + } else { + element.second = (storm::utility::constantOne() - eTerm) * element.second; } + } + ++rowIndex; + } + + // Digitize aMarkovianToProbabilistic. As there are no self-loops in this case, we only need to apply the digitization formula for regular successors. + rowIndex = 0; + for (auto state : markovianNonGoalStates) { + for (auto& element : aMarkovianToProbabilistic.getRow(rowIndex)) { + element.second = (1 - std::exp(-exitRates[state] * delta)) * element.second; + } + ++rowIndex; + } + + // Initialize the two vectors that hold the variable one-step probabilities to all target states for probabilistic and Markovian (non-goal) states. + std::vector bProbabilistic(aProbabilistic.getRowCount()); + std::vector bMarkovian(markovianNonGoalStates.getNumberOfSetBits()); + + // Compute the two fixed right-hand side vectors, one for Markovian states and one for the probabilistic ones. + std::vector bProbabilisticFixed = transitionMatrix.getConstrainedRowSumVector(probabilisticNonGoalStates, goalStates); + std::vector bMarkovianFixed; + bMarkovianFixed.reserve(markovianNonGoalStates.getNumberOfSetBits()); + for (auto state : markovianNonGoalStates) { + bMarkovianFixed.push_back(storm::utility::constantZero()); + + for (auto& element : transitionMatrix.getRowGroup(state)) { + if (goalStates.get(element.first)) { + bMarkovianFixed.back() += (1 - std::exp(-exitRates[state] * delta)) * element.second; + } + } + } + + std::shared_ptr> nondeterministiclinearEquationSolver = storm::utility::solver::getNondeterministicLinearEquationSolver(); + + // Perform the actual value iteration + // * loop until the step bound has been reached + // * in the loop: + // * perform value iteration using A_PSwG, v_PS and the vector b where b = (A * 1_G)|PS + A_PStoMS * v_MS + // and 1_G being the characteristic vector for all goal states. + // * perform one timed-step using v_MS := A_MSwG * v_MS + A_MStoPS * v_PS + (A * 1_G)|MS + std::vector markovianNonGoalValuesSwap(markovianNonGoalValues); + std::vector multiplicationResultScratchMemory(aProbabilistic.getRowCount()); + std::vector aProbabilisticScratchMemory(probabilisticNonGoalValues.size()); + for (uint_fast64_t currentStep = 0; currentStep < numberOfSteps; ++currentStep) { + // Start by (re-)computing bProbabilistic = bProbabilisticFixed + aProbabilisticToMarkovian * vMarkovian. + aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); + storm::utility::vector::addVectorsInPlace(bProbabilistic, bProbabilisticFixed); + + // Now perform the inner value iteration for probabilistic states. + nondeterministiclinearEquationSolver->solveEquationSystem(min, aProbabilistic, probabilisticNonGoalValues, bProbabilistic, &multiplicationResultScratchMemory, &aProbabilisticScratchMemory); + + // (Re-)compute bMarkovian = bMarkovianFixed + aMarkovianToProbabilistic * vProbabilistic. + aMarkovianToProbabilistic.multiplyWithVector(probabilisticNonGoalValues, bMarkovian); + storm::utility::vector::addVectorsInPlace(bMarkovian, bMarkovianFixed); + aMarkovian.multiplyWithVector(markovianNonGoalValues, markovianNonGoalValuesSwap); + std::swap(markovianNonGoalValues, markovianNonGoalValuesSwap); + storm::utility::vector::addVectorsInPlace(markovianNonGoalValues, bMarkovian); + } + + // After the loop, perform one more step of the value iteration for PS states. + aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); + storm::utility::vector::addVectorsInPlace(bProbabilistic, bProbabilisticFixed); + nondeterministiclinearEquationSolver->solveEquationSystem(min, aProbabilistic, probabilisticNonGoalValues, bProbabilistic, &multiplicationResultScratchMemory, &aProbabilisticScratchMemory); + } + + std::vector checkTimeBoundedEventually(bool min, storm::storage::BitVector const& goalStates, ValueType lowerBound, ValueType upperBound) const { + // Check whether the automaton is closed. + if (!this->getModel().isClosed()) { + throw storm::exceptions::InvalidArgumentException() << "Unable to compute time-bounded reachability on non-closed Markov automaton."; + } + + // Check whether the given bounds were valid. + if (lowerBound < storm::utility::constantZero() || upperBound < storm::utility::constantZero() || upperBound < lowerBound) { + throw storm::exceptions::InvalidArgumentException() << "Illegal interval ["; + } + + // Get some data fields for convenient access. + typename storm::storage::SparseMatrix const& transitionMatrix = this->getModel().getTransitionMatrix(); + std::vector const& exitRates = this->getModel().getExitRates(); + storm::storage::BitVector const& markovianStates = this->getModel().getMarkovianStates(); + + // (1) Compute the accuracy we need to achieve the required error bound. + ValueType maxExitRate = this->getModel().getMaximalExitRate(); + ValueType delta = (2 * storm::settings::Settings::getInstance()->getOptionByLongName("digiprecision").getArgument(0).getValueAsDouble()) / (upperBound * maxExitRate * maxExitRate); + + // (2) Compute the number of steps we need to make for the interval. + uint_fast64_t numberOfSteps = static_cast(std::ceil((upperBound - lowerBound) / delta)); + std::cout << "Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [" << lowerBound << ", " << upperBound << "]." << std::endl; + + // (3) Compute the non-goal states and initialize two vectors + // * vProbabilistic holds the probability values of probabilistic non-goal states. + // * vMarkovian holds the probability values of Markovian non-goal states. + storm::storage::BitVector const& markovianNonGoalStates = markovianStates & ~goalStates; + storm::storage::BitVector const& probabilisticNonGoalStates = ~markovianStates & ~goalStates; + std::vector vProbabilistic(probabilisticNonGoalStates.getNumberOfSetBits()); + std::vector vMarkovian(markovianNonGoalStates.getNumberOfSetBits()); + + computeBoundedReachabilityProbabilities(min, transitionMatrix, exitRates, markovianStates, goalStates, markovianNonGoalStates, probabilisticNonGoalStates, vMarkovian, vProbabilistic, delta, numberOfSteps); + + // (4) If the lower bound of interval was non-zero, we need to take the current values as the starting values for a subsequent value iteration. + if (lowerBound != storm::utility::constantZero()) { + std::vector vAllProbabilistic((~markovianStates).getNumberOfSetBits()); + std::vector vAllMarkovian(markovianStates.getNumberOfSetBits()); + + // Create the starting value vectors for the next value iteration based on the results of the previous one. + storm::utility::vector::setVectorValues(vAllProbabilistic, goalStates % ~markovianStates, storm::utility::constantOne()); + storm::utility::vector::setVectorValues(vAllProbabilistic, ~goalStates % ~markovianStates, vProbabilistic); + storm::utility::vector::setVectorValues(vAllMarkovian, goalStates % markovianStates, storm::utility::constantOne()); + storm::utility::vector::setVectorValues(vAllMarkovian, ~goalStates % markovianStates, vMarkovian); + + // Compute the number of steps to reach the target interval. + numberOfSteps = static_cast(std::ceil(lowerBound / delta)); + std::cout << "Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [0, " << lowerBound << "]." << std::endl; + + // Compute the bounded reachability for interval [0, b-a]. + computeBoundedReachabilityProbabilities(min, transitionMatrix, exitRates, markovianStates, storm::storage::BitVector(this->getModel().getNumberOfStates()), markovianStates, ~markovianStates, vAllMarkovian, vAllProbabilistic, delta, numberOfSteps); + + // Create the result vector out of vAllProbabilistic and vAllMarkovian and return it. + std::vector result(this->getModel().getNumberOfStates()); + storm::utility::vector::setVectorValues(result, ~markovianStates, vAllProbabilistic); + storm::utility::vector::setVectorValues(result, markovianStates, vAllMarkovian); + + return result; + } else { + // Create the result vector out of 1_G, vProbabilistic and vMarkovian and return it. + std::vector result(this->getModel().getNumberOfStates()); + storm::utility::vector::setVectorValues(result, goalStates, storm::utility::constantOne()); + storm::utility::vector::setVectorValues(result, probabilisticNonGoalStates, vProbabilistic); + storm::utility::vector::setVectorValues(result, markovianNonGoalStates, vMarkovian); + return result; + } + } + + std::vector checkLongRunAverage(bool min, storm::storage::BitVector const& goalStates) const { + // Check whether the automaton is closed. + if (!this->getModel().isClosed()) { + throw storm::exceptions::InvalidArgumentException() << "Unable to compute long-run average on non-closed Markov automaton."; + } + + // If there are no goal states, we avoid the computation and directly return zero. + if (goalStates.empty()) { + return std::vector(this->getModel().getNumberOfStates(), storm::utility::constantZero()); + } + + // Likewise, if all bits are set, we can avoid the computation and set. + if ((~goalStates).empty()) { + return std::vector(this->getModel().getNumberOfStates(), storm::utility::constantOne()); + } + + // Start by decomposing the Markov automaton into its MECs. + storm::storage::MaximalEndComponentDecomposition mecDecomposition(this->getModel()); + + // Get some data members for convenience. + typename storm::storage::SparseMatrix const& transitionMatrix = this->getModel().getTransitionMatrix(); + std::vector const& nondeterministicChoiceIndices = this->getModel().getNondeterministicChoiceIndices(); + + // Now start with compute the long-run average for all end components in isolation. + std::vector lraValuesForEndComponents; + + // While doing so, we already gather some information for the following steps. + std::vector stateToMecIndexMap(this->getModel().getNumberOfStates()); + storm::storage::BitVector statesInMecs(this->getModel().getNumberOfStates()); + + for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { + storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; + + // Gather information for later use. + for (auto const& stateChoicesPair : mec) { + uint_fast64_t state = stateChoicesPair.first; + + statesInMecs.set(state); + stateToMecIndexMap[state] = currentMecIndex; + } + + // Compute the LRA value for the current MEC. + lraValuesForEndComponents.push_back(this->computeLraForMaximalEndComponent(min, transitionMatrix, nondeterministicChoiceIndices, this->getModel().getMarkovianStates(), this->getModel().getExitRates(), goalStates, mec, currentMecIndex)); + } - /*! - * Returns a constant reference to the MDP associated with this model checker. - * @returns A constant reference to the MDP associated with this model checker. - */ - storm::models::MarkovAutomaton const& getModel() const { - return AbstractModelChecker::template getModel>(); - } - - std::vector checkUntil(storm::property::csl::Until const& formula, bool qualitative) const { - // Test whether it is specified if the minimum or maximum probabilities are to be computed. - if(this->minimumOperatorStack.empty()) { - LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); - throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + // For fast transition rewriting, we build some auxiliary data structures. + storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; + uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); + uint_fast64_t lastStateNotInMecs = 0; + uint_fast64_t numberOfStatesNotInMecs = 0; + std::vector statesNotInMecsBeforeIndex; + statesNotInMecsBeforeIndex.reserve(this->getModel().getNumberOfStates()); + for (auto state : statesNotContainedInAnyMec) { + while (lastStateNotInMecs <= state) { + statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); + ++lastStateNotInMecs; + } + ++numberOfStatesNotInMecs; + } + + // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. + std::vector b; + typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, 0, 0, true, numberOfStatesNotInMecs + mecDecomposition.size() + 1); + + // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). + uint_fast64_t currentChoice = 0; + for (auto state : statesNotContainedInAnyMec) { + sspMatrixBuilder.newRowGroup(currentChoice); + + for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { + std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); + b.push_back(storm::utility::constantZero()); + + for (auto element : transitionMatrix.getRow(choice)) { + if (statesNotContainedInAnyMec.get(element.first)) { + // If the target state is not contained in an MEC, we can copy over the entry. + sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.first], element.second); + } else { + // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector + // so that we are able to write the cumulative probability to the MEC into the matrix. + auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.first]] += element.second; } + } - storm::storage::BitVector leftStates = formula.getLeft().check(*this); - storm::storage::BitVector rightStates = formula.getRight().check(*this); - return computeUnboundedUntilProbabilities(this->minimumOperatorStack.top(), leftStates, rightStates, qualitative).first; - } - - std::pair, storm::storage::TotalScheduler> computeUnboundedUntilProbabilities(bool min, storm::storage::BitVector const& leftStates, storm::storage::BitVector const& rightStates, bool qualitative) const { - return storm::modelchecker::prctl::SparseMdpPrctlModelChecker::computeUnboundedUntilProbabilities(min, this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getInitialStates(), leftStates, rightStates, nondeterministicLinearEquationSolver, qualitative); - } - - std::vector checkTimeBoundedUntil(storm::property::csl::TimeBoundedUntil const& formula, bool qualitative) const { - throw storm::exceptions::NotImplementedException() << "Model checking Until formulas on Markov automata is not yet implemented."; - } - - std::vector checkTimeBoundedEventually(storm::property::csl::TimeBoundedEventually const& formula, bool qualitative) const { - // Test whether it is specified if the minimum or maximum probabilities are to be computed. - if(this->minimumOperatorStack.empty()) { - LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); - throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + // Now insert all (cumulative) probability values that target an MEC. + for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { + if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { + sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); } + } + } + } + + // Now we are ready to construct the choices for the auxiliary states. + for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { + storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; + sspMatrixBuilder.newRowGroup(currentChoice); - storm::storage::BitVector goalStates = formula.getChild().check(*this); - return this->checkTimeBoundedEventually(this->minimumOperatorStack.top(), goalStates, formula.getLowerBound(), formula.getUpperBound()); - } - - std::vector checkGlobally(storm::property::csl::Globally const& formula, bool qualitative) const { - throw storm::exceptions::NotImplementedException() << "Model checking Globally formulas on Markov automata is not yet implemented."; - } - - std::vector checkEventually(storm::property::csl::Eventually const& formula, bool qualitative) const { - // Test whether it is specified if the minimum or maximum probabilities are to be computed. - if(this->minimumOperatorStack.empty()) { - LOG4CPLUS_ERROR(logger, "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."); - throw storm::exceptions::InvalidArgumentException() << "Formula does not specify neither min nor max optimality, which is not meaningful over nondeterministic models."; + for (auto const& stateChoicesPair : mec) { + uint_fast64_t state = stateChoicesPair.first; + boost::container::flat_set const& choicesInMec = stateChoicesPair.second; + + for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { + std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); + + // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. + if (choicesInMec.find(choice) == choicesInMec.end()) { + b.push_back(storm::utility::constantZero()); + + for (auto element : transitionMatrix.getRow(choice)) { + if (statesNotContainedInAnyMec.get(element.first)) { + // If the target state is not contained in an MEC, we can copy over the entry. + sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.first], element.second); + } else { + // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector + // so that we are able to write the cumulative probability to the MEC into the matrix. + auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.first]] += element.second; + } + } + + // Now insert all (cumulative) probability values that target an MEC. + for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { + if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { + // If the target MEC is the same as the current one, instead of adding a transition, we need to add the weighted reward + // to the right-hand side vector of the SSP. + if (mecIndex == targetMecIndex) { + b.back() += auxiliaryStateToProbabilityMap[mecIndex] * lraValuesForEndComponents[mecIndex]; + } else { + // Otherwise, we add a transition to the auxiliary state that is associated with the target MEC. + sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); + } + } + } + + ++currentChoice; } + } + } + + // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. + ++currentChoice; + b.push_back(lraValuesForEndComponents[mecIndex]); + } + + // Finalize the matrix and solve the corresponding system of equations. + storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice + 1); + + std::vector x(numberOfStatesNotInMecs + mecDecomposition.size()); + nondeterministicLinearEquationSolver->solveEquationSystem(min, sspMatrix, x, b); + + // Prepare result vector. + std::vector result(this->getModel().getNumberOfStates()); + + // Set the values for states not contained in MECs. + storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x); + + // Set the values for all states in MECs. + for (auto state : statesInMecs) { + result[state] = lraValuesForEndComponents[stateToMecIndexMap[state]]; + } + + return result; + } + + std::vector checkExpectedTime(bool min, storm::storage::BitVector const& goalStates) const { + // Reduce the problem of computing the expected time to computing expected rewards where the rewards + // for all probabilistic states are zero and the reward values of Markovian states is 1. + std::vector rewardValues(this->getModel().getNumberOfStates(), storm::utility::constantZero()); + storm::utility::vector::setVectorValues(rewardValues, this->getModel().getMarkovianStates(), storm::utility::constantOne()); + return this->computeExpectedRewards(min, goalStates, rewardValues); + } + +protected: + /*! + * Computes the long-run average value for the given maximal end component of a Markov automaton. + * + * @param min Sets whether the long-run average is to be minimized or maximized. + * @param transitionMatrix The transition matrix of the underlying Markov automaton. + * @param nondeterministicChoiceIndices A vector indicating at which row the choice of a given state begins. + * @param markovianStates A bit vector storing all markovian states. + * @param exitRates A vector with exit rates for all states. Exit rates of probabilistic states are assumed to be zero. + * @param goalStates A bit vector indicating which states are to be considered as goal states. + * @param mec The maximal end component to consider for computing the long-run average. + * @param mecIndex The index of the MEC. + * @return The long-run average of being in a goal state for the given MEC. + */ + static ValueType computeLraForMaximalEndComponent(bool min, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& nondeterministicChoiceIndices, storm::storage::BitVector const& markovianStates, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::MaximalEndComponent const& mec, uint_fast64_t mecIndex = 0) { + std::shared_ptr solver = storm::utility::solver::getLpSolver("LRA for MEC"); + solver->setModelSense(min ? storm::solver::LpSolver::MAXIMIZE : storm::solver::LpSolver::MINIMIZE); + + // First, we need to create the variables for the problem. + std::map stateToVariableIndexMap; + for (auto const& stateChoicesPair : mec) { + stateToVariableIndexMap[stateChoicesPair.first] = solver->createContinuousVariable("x" + std::to_string(stateChoicesPair.first), storm::solver::LpSolver::UNBOUNDED, 0, 0, 0); + } + uint_fast64_t lraValueVariableIndex = solver->createContinuousVariable("k", storm::solver::LpSolver::UNBOUNDED, 0, 0, 1); + solver->update(); + + // Now we encode the problem as constraints. + std::vector variables; + std::vector coefficients; + for (auto const& stateChoicesPair : mec) { + uint_fast64_t state = stateChoicesPair.first; + + // Now, based on the type of the state, create a suitable constraint. + if (markovianStates.get(state)) { + variables.clear(); + coefficients.clear(); + + variables.push_back(stateToVariableIndexMap.at(state)); + coefficients.push_back(1); + + for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) { + variables.push_back(stateToVariableIndexMap.at(element.first)); + coefficients.push_back(-element.second); + } + + variables.push_back(lraValueVariableIndex); + coefficients.push_back(storm::utility::constantOne() / exitRates[state]); + + solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, goalStates.get(state) ? storm::utility::constantOne() / exitRates[state] : storm::utility::constantZero()); + } else { + // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action + // and the sum ranges over all states s'. + for (auto choice : stateChoicesPair.second) { + variables.clear(); + coefficients.clear(); + + variables.push_back(stateToVariableIndexMap.at(state)); + coefficients.push_back(1); + + for (auto element : transitionMatrix.getRow(choice)) { + variables.push_back(stateToVariableIndexMap.at(element.first)); + coefficients.push_back(-element.second); + } + + solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, storm::utility::constantZero()); + } + } + } + + solver->optimize(); + return solver->getContinuousValue(lraValueVariableIndex); + } + + /*! + * Computes the expected reward that is gained from each state before entering any of the goal states. + * + * @param min Indicates whether minimal or maximal rewards are to be computed. + * @param goalStates The goal states that define until which point rewards are gained. + * @param stateRewards A vector that defines the reward gained in each state. For probabilistic states, this is an instantaneous reward + * that is fully gained and for Markovian states the actually gained reward is dependent on the expected time to stay in the + * state, i.e. it is gouverned by the exit rate of the state. + * @return A vector that contains the expected reward for each state of the model. + */ + std::vector computeExpectedRewards(bool min, storm::storage::BitVector const& goalStates, std::vector const& stateRewards) const { + // Check whether the automaton is closed. + if (!this->getModel().isClosed()) { + throw storm::exceptions::InvalidArgumentException() << "Unable to compute expected time on non-closed Markov automaton."; + } + + // First, we need to check which states have infinite expected time (by definition). + storm::storage::BitVector infinityStates; + if (min) { + // If we need to compute the minimum expected times, we have to set the values of those states to infinity that, under all schedulers, + // reach a bottom SCC without a goal state. + + // So we start by computing all bottom SCCs without goal states. + storm::storage::StronglyConnectedComponentDecomposition sccDecomposition(this->getModel(), ~goalStates, true, true); + + // Now form the union of all these SCCs. + storm::storage::BitVector unionOfNonGoalBSccs(this->getModel().getNumberOfStates()); + for (auto const& scc : sccDecomposition) { + for (auto state : scc) { + unionOfNonGoalBSccs.set(state); + } + } + + // Finally, if this union is non-empty, compute the states such that all schedulers reach some state of the union. + if (!unionOfNonGoalBSccs.empty()) { + infinityStates = storm::utility::graph::performProbGreater0A(this->getModel().getTransitionMatrix(), this->getModel().getNondeterministicChoiceIndices(), this->getModel().getBackwardTransitions(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), unionOfNonGoalBSccs); + } else { + // Otherwise, we have no infinity states. + infinityStates = storm::storage::BitVector(this->getModel().getNumberOfStates()); + } + } else { + // If we maximize the property, the expected time of a state is infinite, if an end-component without any goal state is reachable. + + // So we start by computing all MECs that have no goal state. + storm::storage::MaximalEndComponentDecomposition mecDecomposition(this->getModel(), ~goalStates); + + // Now we form the union of all states in these end components. + storm::storage::BitVector unionOfNonGoalMaximalEndComponents(this->getModel().getNumberOfStates()); + for (auto const& mec : mecDecomposition) { + for (auto const& stateActionPair : mec) { + unionOfNonGoalMaximalEndComponents.set(stateActionPair.first); + } + } + + if (!unionOfNonGoalMaximalEndComponents.empty()) { + // Now we need to check for which states there exists a scheduler that reaches one of the previously computed states. + infinityStates = storm::utility::graph::performProbGreater0E(this->getModel().getTransitionMatrix(), this->getModel().getNondeterministicChoiceIndices(), this->getModel().getBackwardTransitions(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), unionOfNonGoalMaximalEndComponents); + } else { + // Otherwise, we have no infinity states. + infinityStates = storm::storage::BitVector(this->getModel().getNumberOfStates()); + } + } + + // Now we identify the states for which values need to be computed. + storm::storage::BitVector maybeStates = ~(goalStates | infinityStates); + + // Then, we can eliminate the rows and columns for all states whose values are already known to be 0. + std::vector x(maybeStates.getNumberOfSetBits()); + storm::storage::SparseMatrix submatrix = this->getModel().getTransitionMatrix().getSubmatrix(true, maybeStates, maybeStates); + + // Now prepare the expected reward values for all states so they can be used as the right-hand side of the equation system. + std::vector rewardValues(stateRewards); + for (auto state : this->getModel().getMarkovianStates()) { + rewardValues[state] = rewardValues[state] / this->getModel().getExitRates()[state]; + } + + // Finally, prepare the actual right-hand side. + std::vector b(submatrix.getRowCount()); + storm::utility::vector::selectVectorValuesRepeatedly(b, maybeStates, this->getModel().getNondeterministicChoiceIndices(), rewardValues); + + // Solve the corresponding system of equations. + std::shared_ptr> nondeterministiclinearEquationSolver = storm::utility::solver::getNondeterministicLinearEquationSolver(); + nondeterministiclinearEquationSolver->solveEquationSystem(min, submatrix, x, b); + + // Create resulting vector. + std::vector result(this->getModel().getNumberOfStates()); + + // Set values of resulting vector according to previous result and return the result. + storm::utility::vector::setVectorValues(result, maybeStates, x); + storm::utility::vector::setVectorValues(result, goalStates, storm::utility::constantZero()); + storm::utility::vector::setVectorValues(result, infinityStates, storm::utility::constantInfinity()); + + return result; + } + + /*! + * A solver that is used for solving systems of linear equations that are the result of nondeterministic choices. + */ + std::shared_ptr> nondeterministicLinearEquationSolver; +}; - storm::storage::BitVector subFormulaStates = formula.getChild().check(*this); - return computeUnboundedUntilProbabilities(this->minimumOperatorStack.top(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), subFormulaStates, qualitative).first; - } - - std::vector checkNext(storm::property::csl::Next const& formula, bool qualitative) const { - throw storm::exceptions::NotImplementedException() << "Model checking Next formulas on Markov automata is not yet implemented."; - } - - static void computeBoundedReachabilityProbabilities(bool min, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, ValueType delta, uint_fast64_t numberOfSteps) { - // Start by computing four sparse matrices: - // * a matrix aMarkovian with all (discretized) transitions from Markovian non-goal states to all Markovian non-goal states. - // * a matrix aMarkovianToProbabilistic with all (discretized) transitions from Markovian non-goal states to all probabilistic non-goal states. - // * a matrix aProbabilistic with all (non-discretized) transitions from probabilistic non-goal states to other probabilistic non-goal states. - // * a matrix aProbabilisticToMarkovian with all (non-discretized) transitions from probabilistic non-goal states to all Markovian non-goal states. - typename storm::storage::SparseMatrix aMarkovian = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, markovianNonGoalStates, true); - typename storm::storage::SparseMatrix aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, probabilisticNonGoalStates); - typename storm::storage::SparseMatrix aProbabilistic = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, probabilisticNonGoalStates); - typename storm::storage::SparseMatrix aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, markovianNonGoalStates); - - // The matrices with transitions from Markovian states need to be digitized. - // Digitize aMarkovian. Based on whether the transition is a self-loop or not, we apply the two digitization rules. - uint_fast64_t rowIndex = 0; - for (auto state : markovianNonGoalStates) { - for (auto& element : aMarkovian.getRow(rowIndex)) { - ValueType eTerm = std::exp(-exitRates[state] * delta); - if (element.first == rowIndex) { - element.second = (storm::utility::constantOne() - eTerm) * element.second + eTerm; - } else { - element.second = (storm::utility::constantOne() - eTerm) * element.second; - } - } - ++rowIndex; - } - - // Digitize aMarkovianToProbabilistic. As there are no self-loops in this case, we only need to apply the digitization formula for regular successors. - rowIndex = 0; - for (auto state : markovianNonGoalStates) { - for (auto& element : aMarkovianToProbabilistic.getRow(rowIndex)) { - element.second = (1 - std::exp(-exitRates[state] * delta)) * element.second; - } - ++rowIndex; - } - - // Initialize the two vectors that hold the variable one-step probabilities to all target states for probabilistic and Markovian (non-goal) states. - std::vector bProbabilistic(aProbabilistic.getRowCount()); - std::vector bMarkovian(markovianNonGoalStates.getNumberOfSetBits()); - - // Compute the two fixed right-hand side vectors, one for Markovian states and one for the probabilistic ones. - std::vector bProbabilisticFixed = transitionMatrix.getConstrainedRowSumVector(probabilisticNonGoalStates, goalStates); - std::vector bMarkovianFixed; - bMarkovianFixed.reserve(markovianNonGoalStates.getNumberOfSetBits()); - for (auto state : markovianNonGoalStates) { - bMarkovianFixed.push_back(storm::utility::constantZero()); - - for (auto& element : transitionMatrix.getRowGroup(state)) { - if (goalStates.get(element.first)) { - bMarkovianFixed.back() += (1 - std::exp(-exitRates[state] * delta)) * element.second; - } - } - } - - std::shared_ptr> nondeterministiclinearEquationSolver = storm::utility::solver::getNondeterministicLinearEquationSolver(); - - // Perform the actual value iteration - // * loop until the step bound has been reached - // * in the loop: - // * perform value iteration using A_PSwG, v_PS and the vector b where b = (A * 1_G)|PS + A_PStoMS * v_MS - // and 1_G being the characteristic vector for all goal states. - // * perform one timed-step using v_MS := A_MSwG * v_MS + A_MStoPS * v_PS + (A * 1_G)|MS - std::vector markovianNonGoalValuesSwap(markovianNonGoalValues); - std::vector multiplicationResultScratchMemory(aProbabilistic.getRowCount()); - std::vector aProbabilisticScratchMemory(probabilisticNonGoalValues.size()); - for (uint_fast64_t currentStep = 0; currentStep < numberOfSteps; ++currentStep) { - // Start by (re-)computing bProbabilistic = bProbabilisticFixed + aProbabilisticToMarkovian * vMarkovian. - aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); - storm::utility::vector::addVectorsInPlace(bProbabilistic, bProbabilisticFixed); - - // Now perform the inner value iteration for probabilistic states. - nondeterministiclinearEquationSolver->solveEquationSystem(min, aProbabilistic, probabilisticNonGoalValues, bProbabilistic, &multiplicationResultScratchMemory, &aProbabilisticScratchMemory); - - // (Re-)compute bMarkovian = bMarkovianFixed + aMarkovianToProbabilistic * vProbabilistic. - aMarkovianToProbabilistic.multiplyWithVector(probabilisticNonGoalValues, bMarkovian); - storm::utility::vector::addVectorsInPlace(bMarkovian, bMarkovianFixed); - aMarkovian.multiplyWithVector(markovianNonGoalValues, markovianNonGoalValuesSwap); - std::swap(markovianNonGoalValues, markovianNonGoalValuesSwap); - storm::utility::vector::addVectorsInPlace(markovianNonGoalValues, bMarkovian); - } - - // After the loop, perform one more step of the value iteration for PS states. - aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); - storm::utility::vector::addVectorsInPlace(bProbabilistic, bProbabilisticFixed); - nondeterministiclinearEquationSolver->solveEquationSystem(min, aProbabilistic, probabilisticNonGoalValues, bProbabilistic, &multiplicationResultScratchMemory, &aProbabilisticScratchMemory); - } - - std::vector checkTimeBoundedEventually(bool min, storm::storage::BitVector const& goalStates, ValueType lowerBound, ValueType upperBound) const { - // Check whether the automaton is closed. - if (!this->getModel().isClosed()) { - throw storm::exceptions::InvalidArgumentException() << "Unable to compute time-bounded reachability on non-closed Markov automaton."; - } - - // Check whether the given bounds were valid. - if (lowerBound < storm::utility::constantZero() || upperBound < storm::utility::constantZero() || upperBound < lowerBound) { - throw storm::exceptions::InvalidArgumentException() << "Illegal interval ["; - } - - // Get some data fields for convenient access. - typename storm::storage::SparseMatrix const& transitionMatrix = this->getModel().getTransitionMatrix(); - std::vector const& exitRates = this->getModel().getExitRates(); - storm::storage::BitVector const& markovianStates = this->getModel().getMarkovianStates(); - - // (1) Compute the accuracy we need to achieve the required error bound. - ValueType maxExitRate = this->getModel().getMaximalExitRate(); - ValueType delta = (2 * storm::settings::Settings::getInstance()->getOptionByLongName("digiprecision").getArgument(0).getValueAsDouble()) / (upperBound * maxExitRate * maxExitRate); - - // (2) Compute the number of steps we need to make for the interval. - uint_fast64_t numberOfSteps = static_cast(std::ceil((upperBound - lowerBound) / delta)); - std::cout << "Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [" << lowerBound << ", " << upperBound << "]." << std::endl; - - // (3) Compute the non-goal states and initialize two vectors - // * vProbabilistic holds the probability values of probabilistic non-goal states. - // * vMarkovian holds the probability values of Markovian non-goal states. - storm::storage::BitVector const& markovianNonGoalStates = markovianStates & ~goalStates; - storm::storage::BitVector const& probabilisticNonGoalStates = ~markovianStates & ~goalStates; - std::vector vProbabilistic(probabilisticNonGoalStates.getNumberOfSetBits()); - std::vector vMarkovian(markovianNonGoalStates.getNumberOfSetBits()); - - computeBoundedReachabilityProbabilities(min, transitionMatrix, exitRates, markovianStates, goalStates, markovianNonGoalStates, probabilisticNonGoalStates, vMarkovian, vProbabilistic, delta, numberOfSteps); - - // (4) If the lower bound of interval was non-zero, we need to take the current values as the starting values for a subsequent value iteration. - if (lowerBound != storm::utility::constantZero()) { - std::vector vAllProbabilistic((~markovianStates).getNumberOfSetBits()); - std::vector vAllMarkovian(markovianStates.getNumberOfSetBits()); - - // Create the starting value vectors for the next value iteration based on the results of the previous one. - storm::utility::vector::setVectorValues(vAllProbabilistic, goalStates % ~markovianStates, storm::utility::constantOne()); - storm::utility::vector::setVectorValues(vAllProbabilistic, ~goalStates % ~markovianStates, vProbabilistic); - storm::utility::vector::setVectorValues(vAllMarkovian, goalStates % markovianStates, storm::utility::constantOne()); - storm::utility::vector::setVectorValues(vAllMarkovian, ~goalStates % markovianStates, vMarkovian); - - // Compute the number of steps to reach the target interval. - numberOfSteps = static_cast(std::ceil(lowerBound / delta)); - std::cout << "Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [0, " << lowerBound << "]." << std::endl; - - // Compute the bounded reachability for interval [0, b-a]. - computeBoundedReachabilityProbabilities(min, transitionMatrix, exitRates, markovianStates, storm::storage::BitVector(this->getModel().getNumberOfStates()), markovianStates, ~markovianStates, vAllMarkovian, vAllProbabilistic, delta, numberOfSteps); - - // Create the result vector out of vAllProbabilistic and vAllMarkovian and return it. - std::vector result(this->getModel().getNumberOfStates()); - storm::utility::vector::setVectorValues(result, ~markovianStates, vAllProbabilistic); - storm::utility::vector::setVectorValues(result, markovianStates, vAllMarkovian); - - return result; - } else { - // Create the result vector out of 1_G, vProbabilistic and vMarkovian and return it. - std::vector result(this->getModel().getNumberOfStates()); - storm::utility::vector::setVectorValues(result, goalStates, storm::utility::constantOne()); - storm::utility::vector::setVectorValues(result, probabilisticNonGoalStates, vProbabilistic); - storm::utility::vector::setVectorValues(result, markovianNonGoalStates, vMarkovian); - return result; - } - } - - std::vector checkLongRunAverage(bool min, storm::storage::BitVector const& goalStates) const { - // Check whether the automaton is closed. - if (!this->getModel().isClosed()) { - throw storm::exceptions::InvalidArgumentException() << "Unable to compute long-run average on non-closed Markov automaton."; - } - - // If there are no goal states, we avoid the computation and directly return zero. - if (goalStates.empty()) { - return std::vector(this->getModel().getNumberOfStates(), storm::utility::constantZero()); - } - - // Likewise, if all bits are set, we can avoid the computation and set. - if ((~goalStates).empty()) { - return std::vector(this->getModel().getNumberOfStates(), storm::utility::constantOne()); - } - - // Start by decomposing the Markov automaton into its MECs. - storm::storage::MaximalEndComponentDecomposition mecDecomposition(this->getModel()); - - // Get some data members for convenience. - typename storm::storage::SparseMatrix const& transitionMatrix = this->getModel().getTransitionMatrix(); - std::vector const& nondeterministicChoiceIndices = this->getModel().getNondeterministicChoiceIndices(); - - // Now start with compute the long-run average for all end components in isolation. - std::vector lraValuesForEndComponents; - - // While doing so, we already gather some information for the following steps. - std::vector stateToMecIndexMap(this->getModel().getNumberOfStates()); - storm::storage::BitVector statesInMecs(this->getModel().getNumberOfStates()); - - for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; - - // Gather information for later use. - for (auto const& stateChoicesPair : mec) { - uint_fast64_t state = stateChoicesPair.first; - - statesInMecs.set(state); - stateToMecIndexMap[state] = currentMecIndex; - } - - // Compute the LRA value for the current MEC. - lraValuesForEndComponents.push_back(this->computeLraForMaximalEndComponent(min, transitionMatrix, nondeterministicChoiceIndices, this->getModel().getMarkovianStates(), this->getModel().getExitRates(), goalStates, mec, currentMecIndex)); - } - - // For fast transition rewriting, we build some auxiliary data structures. - storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; - uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); - uint_fast64_t lastStateNotInMecs = 0; - uint_fast64_t numberOfStatesNotInMecs = 0; - std::vector statesNotInMecsBeforeIndex; - statesNotInMecsBeforeIndex.reserve(this->getModel().getNumberOfStates()); - for (auto state : statesNotContainedInAnyMec) { - while (lastStateNotInMecs <= state) { - statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); - ++lastStateNotInMecs; - } - ++numberOfStatesNotInMecs; - } - - // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. - std::vector b; - typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, 0, 0, true, numberOfStatesNotInMecs + mecDecomposition.size() + 1); - - // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). - uint_fast64_t currentChoice = 0; - for (auto state : statesNotContainedInAnyMec) { - sspMatrixBuilder.newRowGroup(currentChoice); - - for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - b.push_back(storm::utility::constantZero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.first)) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.first], element.second); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.first]] += element.second; - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { - if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); - } - } - } - } - - // Now we are ready to construct the choices for the auxiliary states. - for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; - sspMatrixBuilder.newRowGroup(currentChoice); - - for (auto const& stateChoicesPair : mec) { - uint_fast64_t state = stateChoicesPair.first; - boost::container::flat_set const& choicesInMec = stateChoicesPair.second; - - for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - - // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. - if (choicesInMec.find(choice) == choicesInMec.end()) { - b.push_back(storm::utility::constantZero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.first)) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.first], element.second); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.first]] += element.second; - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { - if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { - // If the target MEC is the same as the current one, instead of adding a transition, we need to add the weighted reward - // to the right-hand side vector of the SSP. - if (mecIndex == targetMecIndex) { - b.back() += auxiliaryStateToProbabilityMap[mecIndex] * lraValuesForEndComponents[mecIndex]; - } else { - // Otherwise, we add a transition to the auxiliary state that is associated with the target MEC. - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); - } - } - } - - ++currentChoice; - } - } - } - - // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. - ++currentChoice; - b.push_back(lraValuesForEndComponents[mecIndex]); - } - - // Finalize the matrix and solve the corresponding system of equations. - storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice + 1); - - std::vector x(numberOfStatesNotInMecs + mecDecomposition.size()); - nondeterministicLinearEquationSolver->solveEquationSystem(min, sspMatrix, x, b); - - // Prepare result vector. - std::vector result(this->getModel().getNumberOfStates()); - - // Set the values for states not contained in MECs. - storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x); - - // Set the values for all states in MECs. - for (auto state : statesInMecs) { - result[state] = lraValuesForEndComponents[stateToMecIndexMap[state]]; - } - - return result; - } - - std::vector checkExpectedTime(bool min, storm::storage::BitVector const& goalStates) const { - // Reduce the problem of computing the expected time to computing expected rewards where the rewards - // for all probabilistic states are zero and the reward values of Markovian states is 1. - std::vector rewardValues(this->getModel().getNumberOfStates(), storm::utility::constantZero()); - storm::utility::vector::setVectorValues(rewardValues, this->getModel().getMarkovianStates(), storm::utility::constantOne()); - return this->computeExpectedRewards(min, goalStates, rewardValues); - } - - protected: - /*! - * Computes the long-run average value for the given maximal end component of a Markov automaton. - * - * @param min Sets whether the long-run average is to be minimized or maximized. - * @param transitionMatrix The transition matrix of the underlying Markov automaton. - * @param nondeterministicChoiceIndices A vector indicating at which row the choice of a given state begins. - * @param markovianStates A bit vector storing all markovian states. - * @param exitRates A vector with exit rates for all states. Exit rates of probabilistic states are assumed to be zero. - * @param goalStates A bit vector indicating which states are to be considered as goal states. - * @param mec The maximal end component to consider for computing the long-run average. - * @param mecIndex The index of the MEC. - * @return The long-run average of being in a goal state for the given MEC. - */ - static ValueType computeLraForMaximalEndComponent(bool min, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& nondeterministicChoiceIndices, storm::storage::BitVector const& markovianStates, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::MaximalEndComponent const& mec, uint_fast64_t mecIndex = 0) { - std::shared_ptr solver = storm::utility::solver::getLpSolver("LRA for MEC"); - solver->setModelSense(min ? storm::solver::LpSolver::MAXIMIZE : storm::solver::LpSolver::MINIMIZE); - - // First, we need to create the variables for the problem. - std::map stateToVariableIndexMap; - for (auto const& stateChoicesPair : mec) { - stateToVariableIndexMap[stateChoicesPair.first] = solver->createContinuousVariable("x" + std::to_string(stateChoicesPair.first), storm::solver::LpSolver::UNBOUNDED, 0, 0, 0); - } - uint_fast64_t lraValueVariableIndex = solver->createContinuousVariable("k", storm::solver::LpSolver::UNBOUNDED, 0, 0, 1); - solver->update(); - - // Now we encode the problem as constraints. - std::vector variables; - std::vector coefficients; - for (auto const& stateChoicesPair : mec) { - uint_fast64_t state = stateChoicesPair.first; - - // Now, based on the type of the state, create a suitable constraint. - if (markovianStates.get(state)) { - variables.clear(); - coefficients.clear(); - - variables.push_back(stateToVariableIndexMap.at(state)); - coefficients.push_back(1); - - for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) { - variables.push_back(stateToVariableIndexMap.at(element.first)); - coefficients.push_back(-element.second); - } - - variables.push_back(lraValueVariableIndex); - coefficients.push_back(storm::utility::constantOne() / exitRates[state]); - - solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, goalStates.get(state) ? storm::utility::constantOne() / exitRates[state] : storm::utility::constantZero()); - } else { - // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action - // and the sum ranges over all states s'. - for (auto choice : stateChoicesPair.second) { - variables.clear(); - coefficients.clear(); - - variables.push_back(stateToVariableIndexMap.at(state)); - coefficients.push_back(1); - - for (auto element : transitionMatrix.getRow(choice)) { - variables.push_back(stateToVariableIndexMap.at(element.first)); - coefficients.push_back(-element.second); - } - - solver->addConstraint("state" + std::to_string(state), variables, coefficients, min ? storm::solver::LpSolver::LESS_EQUAL : storm::solver::LpSolver::GREATER_EQUAL, storm::utility::constantZero()); - } - } - } - - solver->optimize(); - return solver->getContinuousValue(lraValueVariableIndex); - } - - /*! - * Computes the expected reward that is gained from each state before entering any of the goal states. - * - * @param min Indicates whether minimal or maximal rewards are to be computed. - * @param goalStates The goal states that define until which point rewards are gained. - * @param stateRewards A vector that defines the reward gained in each state. For probabilistic states, this is an instantaneous reward - * that is fully gained and for Markovian states the actually gained reward is dependent on the expected time to stay in the - * state, i.e. it is gouverned by the exit rate of the state. - * @return A vector that contains the expected reward for each state of the model. - */ - std::vector computeExpectedRewards(bool min, storm::storage::BitVector const& goalStates, std::vector const& stateRewards) const { - // Check whether the automaton is closed. - if (!this->getModel().isClosed()) { - throw storm::exceptions::InvalidArgumentException() << "Unable to compute expected time on non-closed Markov automaton."; - } - - // First, we need to check which states have infinite expected time (by definition). - storm::storage::BitVector infinityStates; - if (min) { - // If we need to compute the minimum expected times, we have to set the values of those states to infinity that, under all schedulers, - // reach a bottom SCC without a goal state. - - // So we start by computing all bottom SCCs without goal states. - storm::storage::StronglyConnectedComponentDecomposition sccDecomposition(this->getModel(), ~goalStates, true, true); - - // Now form the union of all these SCCs. - storm::storage::BitVector unionOfNonGoalBSccs(this->getModel().getNumberOfStates()); - for (auto const& scc : sccDecomposition) { - for (auto state : scc) { - unionOfNonGoalBSccs.set(state); - } - } - - // Finally, if this union is non-empty, compute the states such that all schedulers reach some state of the union. - if (!unionOfNonGoalBSccs.empty()) { - infinityStates = storm::utility::graph::performProbGreater0A(this->getModel().getTransitionMatrix(), this->getModel().getNondeterministicChoiceIndices(), this->getModel().getBackwardTransitions(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), unionOfNonGoalBSccs); - } else { - // Otherwise, we have no infinity states. - infinityStates = storm::storage::BitVector(this->getModel().getNumberOfStates()); - } - } else { - // If we maximize the property, the expected time of a state is infinite, if an end-component without any goal state is reachable. - - // So we start by computing all MECs that have no goal state. - storm::storage::MaximalEndComponentDecomposition mecDecomposition(this->getModel(), ~goalStates); - - // Now we form the union of all states in these end components. - storm::storage::BitVector unionOfNonGoalMaximalEndComponents(this->getModel().getNumberOfStates()); - for (auto const& mec : mecDecomposition) { - for (auto const& stateActionPair : mec) { - unionOfNonGoalMaximalEndComponents.set(stateActionPair.first); - } - } - - if (!unionOfNonGoalMaximalEndComponents.empty()) { - // Now we need to check for which states there exists a scheduler that reaches one of the previously computed states. - infinityStates = storm::utility::graph::performProbGreater0E(this->getModel().getTransitionMatrix(), this->getModel().getNondeterministicChoiceIndices(), this->getModel().getBackwardTransitions(), storm::storage::BitVector(this->getModel().getNumberOfStates(), true), unionOfNonGoalMaximalEndComponents); - } else { - // Otherwise, we have no infinity states. - infinityStates = storm::storage::BitVector(this->getModel().getNumberOfStates()); - } - } - - // Now we identify the states for which values need to be computed. - storm::storage::BitVector maybeStates = ~(goalStates | infinityStates); - - // Then, we can eliminate the rows and columns for all states whose values are already known to be 0. - std::vector x(maybeStates.getNumberOfSetBits()); - storm::storage::SparseMatrix submatrix = this->getModel().getTransitionMatrix().getSubmatrix(true, maybeStates, maybeStates); - - // Now prepare the expected reward values for all states so they can be used as the right-hand side of the equation system. - std::vector rewardValues(stateRewards); - for (auto state : this->getModel().getMarkovianStates()) { - rewardValues[state] = rewardValues[state] / this->getModel().getExitRates()[state]; - } - - // Finally, prepare the actual right-hand side. - std::vector b(submatrix.getRowCount()); - storm::utility::vector::selectVectorValuesRepeatedly(b, maybeStates, this->getModel().getNondeterministicChoiceIndices(), rewardValues); - - // Solve the corresponding system of equations. - std::shared_ptr> nondeterministiclinearEquationSolver = storm::utility::solver::getNondeterministicLinearEquationSolver(); - nondeterministiclinearEquationSolver->solveEquationSystem(min, submatrix, x, b); - - // Create resulting vector. - std::vector result(this->getModel().getNumberOfStates()); - - // Set values of resulting vector according to previous result and return the result. - storm::utility::vector::setVectorValues(result, maybeStates, x); - storm::utility::vector::setVectorValues(result, goalStates, storm::utility::constantZero()); - storm::utility::vector::setVectorValues(result, infinityStates, storm::utility::constantInfinity()); - - return result; - } - - /*! - * A solver that is used for solving systems of linear equations that are the result of nondeterministic choices. - */ - std::shared_ptr> nondeterministicLinearEquationSolver; - }; - } - } -} +} // namespace csl +} // namespace modelchecker +} // namespace storm #endif /* STORM_MODELCHECKER_CSL_SPARSEMARKOVAUTOMATONCSLMODELCHECKER_H_ */ diff --git a/src/parser/CslParser.cpp b/src/parser/CslParser.cpp index efca04e02..52c4012aa 100644 --- a/src/parser/CslParser.cpp +++ b/src/parser/CslParser.cpp @@ -9,6 +9,11 @@ #include "src/utility/OsDetection.h" #include "src/utility/constants.h" +// The action class headers. +#include "src/formula/Actions/AbstractAction.h" +#include "src/formula/Actions/MinMaxAction.h" +#include "src/formula/Actions/RangeAction.h" + // If the parser fails due to ill-formed data, this exception is thrown. #include "src/exceptions/WrongFormatException.h" @@ -38,7 +43,7 @@ namespace storm { namespace parser { template -struct CslGrammar : qi::grammar*(), Skipper > { +struct CslGrammar : qi::grammar*(), Skipper > { CslGrammar() : CslGrammar::base_type(start) { //This block contains helper rules that may be used several times freeIdentifierName = qi::lexeme[qi::alpha >> *(qi::alnum | qi::char_('_'))]; @@ -81,16 +86,6 @@ struct CslGrammar : qi::grammar> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> pathFormula >> qi::lit("]"))[qi::_val = - phoenix::new_ >(qi::_1)]; - probabilisticNoBoundOperator.name("no bound operator"); - steadyStateNoBoundOperator = (qi::lit("S") >> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> stateFormula >> qi::lit("]"))[qi::_val = - phoenix::new_ >(qi::_1)]; - steadyStateNoBoundOperator.name("no bound operator"); - //This block defines rules for parsing probabilistic path formulas pathFormula = (timeBoundedEventually | eventually | globally | next | timeBoundedUntil | until); pathFormula.name("path formula"); @@ -125,16 +120,61 @@ struct CslGrammar : qi::grammar>(phoenix::bind(&storm::property::csl::AbstractStateFormula::clone, phoenix::bind(&std::shared_ptr>::get, qi::_a)), qi::_2)]; until.name("path formula (for probabilistic operator)"); - formula = (noBoundOperator | stateFormula); + formula = (pathFormula | stateFormula); formula.name("CSL formula"); - start = (((formula) > (comment | qi::eps))[qi::_val = qi::_1] | - comment - ) > qi::eoi; - start.name("CSL formula"); + //This block defines rules for parsing formulas with noBoundOperators + noBoundOperator = (probabilisticNoBoundOperator | steadyStateNoBoundOperator); + noBoundOperator.name("no bound operator"); + probabilisticNoBoundOperator = + (qi::lit("P") >> qi::lit("min") >> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> pathFormula >> qi::lit("]"))[qi::_val = + phoenix::new_>(qi::_1, true)]; + (qi::lit("P") >> qi::lit("max") >> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> pathFormula >> qi::lit("]"))[qi::_val = + phoenix::new_>(qi::_1, false)]; + (qi::lit("P") >> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> pathFormula >> qi::lit("]"))[qi::_val = + phoenix::new_>(qi::_1)]; + probabilisticNoBoundOperator.name("no bound operator"); + steadyStateNoBoundOperator = (qi::lit("S") >> qi::lit("=") >> qi::lit("?") >> qi::lit("[") >> stateFormula >> qi::lit("]"))[qi::_val = + phoenix::new_>(qi::_1)]; + steadyStateNoBoundOperator.name("no bound operator"); + + minMaxAction = qi::lit("minmax") >> qi::lit(",") >> ( + qi::lit("min")[qi::_val = + phoenix::new_>(true)] | + qi::lit("min")[qi::_val = + phoenix::new_>(false)]); + minMaxAction.name("minmax action for the formula filter"); + + rangeAction = (qi::lit("range") >> qi::lit(",") >> qi::uint_ >> qi::lit(",") >> qi::uint_)[qi::_val = + phoenix::new_>(qi::_1, qi::_2)]; + rangeAction.name("range action for the formula filter"); + + abstractAction = (rangeAction | minMaxAction) >> (qi::eps | qi::lit(",")); + abstractAction.name("filter action"); + + filter = (qi::lit("filter") >> qi::lit("[") >> +abstractAction >> qi::lit("]") >> qi::lit("(") >> formula >> qi::lit(")"))[qi::_val = + phoenix::new_>(qi::_2, qi::_1)] | + (formula)[qi::_val = + phoenix::new_>(qi::_1)] | + (noBoundOperator)[qi::_val = + qi::_1]; + filter.name("PRCTL formula filter"); + + start = (((filter) > (comment | qi::eps))[qi::_val = qi::_1] | comment ) > qi::eoi; + start.name("CSL formula filter"); } - qi::rule*(), Skipper> start; + qi::rule*(), Skipper> start; + qi::rule*(), Skipper> filter; + + qi::rule*(), Skipper> noBoundOperator; + qi::rule*(), Skipper> probabilisticNoBoundOperator; + qi::rule*(), Skipper> steadyStateNoBoundOperator; + + qi::rule*(), Skipper> abstractAction; + qi::rule*(), Skipper> rangeAction; + qi::rule*(), Skipper> minMaxAction; + qi::rule*(), Skipper> formula; qi::rule*(), Skipper> comment; @@ -148,10 +188,6 @@ struct CslGrammar : qi::grammar*(), Skipper> probabilisticBoundOperator; qi::rule*(), Skipper> steadyStateBoundOperator; - qi::rule*(), Skipper> noBoundOperator; - qi::rule*(), Skipper> probabilisticNoBoundOperator; - qi::rule*(), Skipper> steadyStateNoBoundOperator; - qi::rule*(), Skipper> pathFormula; qi::rule*(), Skipper> timeBoundedEventually; qi::rule*(), Skipper> eventually; @@ -166,7 +202,7 @@ struct CslGrammar : qi::grammar* CslParser(std::string formulaString) { +storm::property::csl::CslFilter* CslParser(std::string formulaString) { // Prepare iterators to input. BaseIteratorType stringIteratorBegin = formulaString.begin(); BaseIteratorType stringIteratorEnd = formulaString.end(); @@ -175,7 +211,7 @@ storm::property::csl::AbstractCslFormula* CslParser(std::string formulaS // Prepare resulting intermediate representation of input. - storm::property::csl::AbstractCslFormula* result_pointer = nullptr; + storm::property::csl::CslFilter* result_pointer = nullptr; CslGrammar grammar; diff --git a/src/parser/CslParser.h b/src/parser/CslParser.h index 2ef4c64e3..179cde297 100644 --- a/src/parser/CslParser.h +++ b/src/parser/CslParser.h @@ -9,6 +9,7 @@ #define STORM_PARSER_CSLPARSER_H_ #include "src/formula/Csl.h" +#include "src/formula/Csl/CslFilter.h" #include namespace storm { @@ -23,7 +24,7 @@ namespace parser { * @param formulaString The string representation of the formula * @throw wrongFormatException If the input could not be parsed successfully */ -storm::property::csl::AbstractCslFormula* CslParser(std::string formulaString); +storm::property::csl::CslFilter* CslParser(std::string formulaString); /*! * Struct for the CSL grammar, that Boost::Spirit uses to parse the formulas.