#include "src/modelchecker/abstraction/GameBasedMdpModelChecker.h" #include "src/modelchecker/results/SymbolicQuantitativeCheckResult.h" #include "src/models/symbolic/StandardRewardModel.h" #include "src/storage/expressions/ExpressionManager.h" #include "src/storage/dd/DdManager.h" #include "src/abstraction/prism/PrismMenuGameAbstractor.h" #include "src/logic/FragmentSpecification.h" #include "src/utility/macros.h" #include "src/exceptions/NotSupportedException.h" #include "src/exceptions/InvalidPropertyException.h" #include "src/modelchecker/results/CheckResult.h" namespace storm { namespace modelchecker { namespace detail { template GameProb01Result::GameProb01Result(storm::utility::graph::GameProb01Result const& prob0Min, storm::utility::graph::GameProb01Result const& prob1Min, storm::utility::graph::GameProb01Result const& prob0Max, storm::utility::graph::GameProb01Result const& prob1Max) : min(std::make_pair(prob0Min, prob1Min)), max(std::make_pair(prob0Max, prob1Max)) { // Intentionally left empty. } } template GameBasedMdpModelChecker::GameBasedMdpModelChecker(storm::prism::Program const& program, std::shared_ptr const& smtSolverFactory) : originalProgram(program), smtSolverFactory(smtSolverFactory) { STORM_LOG_THROW(program.getModelType() == storm::prism::Program::ModelType::DTMC || program.getModelType() == storm::prism::Program::ModelType::MDP, storm::exceptions::NotSupportedException, "Currently only DTMCs/MDPs are supported by the game-based model checker."); // Start by preparing the program. That is, we flatten the modules if there is more than one. if (originalProgram.getNumberOfModules() > 1) { preprocessedProgram = originalProgram.flattenModules(this->smtSolverFactory); } else { preprocessedProgram = originalProgram; } } template GameBasedMdpModelChecker::~GameBasedMdpModelChecker() { // Intentionally left empty. } template bool GameBasedMdpModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); storm::logic::FragmentSpecification fragment = storm::logic::reachability(); return formula.isInFragment(fragment) && checkTask.isOnlyInitialStatesRelevantSet(); } template std::unique_ptr GameBasedMdpModelChecker::computeUntilProbabilities(CheckTask const& checkTask) { storm::logic::UntilFormula const& pathFormula = checkTask.getFormula(); return performGameBasedAbstractionRefinement(CheckTask(pathFormula), getExpression(pathFormula.getLeftSubformula()), getExpression(pathFormula.getRightSubformula())); } template std::unique_ptr GameBasedMdpModelChecker::computeReachabilityProbabilities(CheckTask const& checkTask) { storm::logic::EventuallyFormula const& pathFormula = checkTask.getFormula(); return performGameBasedAbstractionRefinement(checkTask.substituteFormula(pathFormula), originalProgram.getManager().boolean(true), getExpression(pathFormula.getSubformula())); } template bool getResultConsideringBound(ValueType const& value, storm::logic::Bound const& bound) { if (storm::logic::isLowerBound(bound.comparisonType)) { if (storm::logic::isStrict(bound.comparisonType)) { return value > bound.threshold; } else { return value >= bound.threshold; } } else { if (storm::logic::isStrict(bound.comparisonType)) { return value < bound.threshold; } else { return value <= bound.threshold; } } } template std::unique_ptr getResultAfterQualitativeCheck(CheckTask const& checkTask, storm::dd::DdManager const& ddManager, storm::dd::Bdd const& reachableStates, storm::dd::Bdd const& initialStates, bool prob0) { if (checkTask.isBoundSet()) { bool result = getResultConsideringBound(prob0 ? storm::utility::zero() : storm::utility::one(), checkTask.getBound()); return std::make_unique>(reachableStates, initialStates, result ? initialStates : ddManager.getBddZero()); } else { return std::make_unique>(reachableStates, initialStates, prob0 ? ddManager.template getAddZero() : initialStates.template toAdd()); } } template std::unique_ptr GameBasedMdpModelChecker::performGameBasedAbstractionRefinement(CheckTask const& checkTask, storm::expressions::Expression const& constraintExpression, storm::expressions::Expression const& targetStateExpression) { STORM_LOG_THROW(checkTask.isOnlyInitialStatesRelevantSet(), storm::exceptions::InvalidPropertyException, "The game-based abstraction refinement model checker can only compute the result for the initial states."); // Optimization: do not compute both bounds if not necessary (e.g. if bound given and exceeded, etc.) // Set up initial predicates. std::vector initialPredicates; initialPredicates.push_back(targetStateExpression); if (!constraintExpression.isTrue() && !constraintExpression.isFalse()) { initialPredicates.push_back(constraintExpression); } // Derive the optimization direction for player 1 (assuming menu-game abstraction). storm::OptimizationDirection player1Direction = checkTask.isOptimizationDirectionSet() ? checkTask.getOptimizationDirection() : storm::OptimizationDirection::Maximize; storm::abstraction::prism::PrismMenuGameAbstractor abstractor(preprocessedProgram, initialPredicates, smtSolverFactory); // 1. build initial abstraction based on the the constraint expression (if not 'true') and the target state expression. storm::abstraction::MenuGame game = abstractor.abstract(); STORM_LOG_DEBUG("Initial abstraction has " << game.getNumberOfStates() << " (player 1) states and " << game.getNumberOfTransitions() << " transitions."); // 2. compute all states with probability 0/1 wrt. to the two different player 2 goals (min/max). detail::GameProb01Result prob01 = computeProb01States(player1Direction, game, constraintExpression, targetStateExpression); // 3. compute the states for which we know the result/for which we know there is more work to be done. storm::dd::Bdd maybeMin = !(prob01.min.first.states || prob01.min.second.states) && game.getReachableStates(); storm::dd::Bdd maybeMax = !(prob01.max.first.states || prob01.max.second.states) && game.getReachableStates(); // 4. if the initial states are not maybe states, then we can refine at this point. storm::dd::Bdd initialMaybeStates = game.getInitialStates() && (maybeMin || maybeMax); if (initialMaybeStates.isZero()) { // In this case, we know the result for the initial states for both player 2 minimizing and maximizing. // If the result is 0 independent of the player 2 strategy, then we know the final result and can return it. storm::dd::Bdd tmp = game.getInitialStates() && prob01.min.first.states && prob01.max.first.states; if (tmp == game.getInitialStates()) { getResultAfterQualitativeCheck(checkTask, game.getManager(), game.getReachableStates(), game.getInitialStates(), true); } // If the result is 1 independent of the player 2 strategy, then we know the final result and can return it. tmp = game.getInitialStates() && prob01.min.second.states && prob01.max.second.states; if (tmp == game.getInitialStates()) { getResultAfterQualitativeCheck(checkTask, game.getManager(), game.getReachableStates(), game.getInitialStates(), false); } // If we get here, the initial states were all identified as prob0/1 states, but the value (0 or 1) // depends on whether player 2 is minimizing or maximizing. Therefore, we need to find a place to refine. } // std::unique_ptr> gameSolverFactory = std::make_unique>(); // gameSolverFactory->create(game.getTransitionMatrix(), game.getReachableStates()); // storm::dd::Add const& A, storm::dd::Bdd const& allRows, std::set const& rowMetaVariables, std::set const& columnMetaVariables, std::vector> const& rowColumnMetaVariablePairs, std::set const& player1Variables, std::set const& player2Variables // 3. if the bounds suffice to complete checkTask, return result now. // 4. if the bounds do not suffice return nullptr; } template detail::GameProb01Result GameBasedMdpModelChecker::computeProb01States(storm::OptimizationDirection player1Direction, storm::abstraction::MenuGame const& game, storm::expressions::Expression const& constraintExpression, storm::expressions::Expression const& targetStateExpression) { storm::dd::Bdd transitionMatrixBdd = game.getTransitionMatrix().toBdd(); storm::dd::Bdd bottomStatesBdd = game.getBottomStates(); storm::dd::Bdd targetStates = game.getStates(targetStateExpression); if (player1Direction == storm::OptimizationDirection::Minimize) { targetStates |= game.getBottomStates(); } // Start by computing the states with probability 0/1 when player 2 minimizes. storm::utility::graph::GameProb01Result prob0Min = storm::utility::graph::performProb0(game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates, player1Direction, storm::OptimizationDirection::Minimize, true); storm::utility::graph::GameProb01Result prob1Min = storm::utility::graph::performProb1(game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates, player1Direction, storm::OptimizationDirection::Minimize, false); // Now compute the states with probability 0/1 when player 2 maximizes. storm::utility::graph::GameProb01Result prob0Max = storm::utility::graph::performProb0(game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates, player1Direction, storm::OptimizationDirection::Maximize, false); storm::utility::graph::GameProb01Result prob1Max = storm::utility::graph::performProb1(game, transitionMatrixBdd, game.getStates(constraintExpression), targetStates, player1Direction, storm::OptimizationDirection::Maximize, true); STORM_LOG_DEBUG("Min: " << prob0Min.states.getNonZeroCount() << " no states, " << prob1Min.states.getNonZeroCount() << " yes states."); STORM_LOG_DEBUG("Max: " << prob0Max.states.getNonZeroCount() << " no states, " << prob1Max.states.getNonZeroCount() << " yes states."); return detail::GameProb01Result(prob0Min, prob1Min, prob0Max, prob1Max); } template storm::expressions::Expression GameBasedMdpModelChecker::getExpression(storm::logic::Formula const& formula) { STORM_LOG_THROW(formula.isAtomicExpressionFormula() || formula.isAtomicLabelFormula(), storm::exceptions::InvalidPropertyException, "The target states have to be given as label or an expression."); storm::expressions::Expression result; if (formula.isAtomicLabelFormula()) { result = preprocessedProgram.getLabelExpression(formula.asAtomicLabelFormula().getLabel()); } else { result = formula.asAtomicExpressionFormula().getExpression(); } return result; } template class GameBasedMdpModelChecker; } }