#include "storm/modelchecker/abstraction/AbstractAbstractionRefinementModelChecker.h" #include "storm/storage/dd/DdManager.h" #include "storm/storage/dd/Add.h" #include "storm/storage/dd/Bdd.h" #include "storm/logic/Formulas.h" #include "storm/logic/FragmentSpecification.h" #include "storm/models/symbolic/Dtmc.h" #include "storm/models/symbolic/Mdp.h" #include "storm/models/symbolic/StochasticTwoPlayerGame.h" #include "storm/models/symbolic/StandardRewardModel.h" #include "storm/modelchecker/CheckTask.h" #include "storm/modelchecker/results/CheckResult.h" #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h" #include "storm/modelchecker/results/SymbolicQuantitativeCheckResult.h" #include "storm/modelchecker/prctl/helper/SymbolicDtmcPrctlHelper.h" #include "storm/modelchecker/prctl/helper/SymbolicMdpPrctlHelper.h" #include "storm/solver/SymbolicGameSolver.h" #include "storm/abstraction/StateSet.h" #include "storm/abstraction/SymbolicStateSet.h" #include "storm/abstraction/QualitativeResultMinMax.h" #include "storm/abstraction/SymbolicQualitativeMdpResult.h" #include "storm/abstraction/SymbolicQualitativeMdpResultMinMax.h" #include "storm/abstraction/SymbolicQualitativeGameResultMinMax.h" #include "storm/settings/SettingsManager.h" #include "storm/settings/modules/AbstractionSettings.h" #include "storm/utility/graph.h" #include "storm/utility/constants.h" #include "storm/utility/macros.h" #include "storm/exceptions/NotSupportedException.h" #include "storm/exceptions/InvalidPropertyException.h" namespace storm { namespace modelchecker { template AbstractAbstractionRefinementModelChecker::AbstractAbstractionRefinementModelChecker() : AbstractModelChecker(), checkTask(nullptr) { storm::settings::modules::AbstractionSettings::ReuseMode reuseMode = storm::settings::getModule().getReuseMode(); reuseQualitativeResults = reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::All || reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::Qualitative; reuseQuantitativeResults = reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::All || reuseMode == storm::settings::modules::AbstractionSettings::ReuseMode::Quantitative; } template AbstractAbstractionRefinementModelChecker::~AbstractAbstractionRefinementModelChecker() { // Intentionally left empty. } template bool AbstractAbstractionRefinementModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); bool enableRewards = this->supportsReachabilityRewards(); storm::logic::FragmentSpecification fragment = storm::logic::reachability().setRewardOperatorsAllowed(enableRewards).setReachabilityRewardFormulasAllowed(enableRewards); return formula.isInFragment(fragment) && checkTask.isOnlyInitialStatesRelevantSet(); } template bool AbstractAbstractionRefinementModelChecker::supportsReachabilityRewards() const { return false; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeUntilProbabilities(Environment const& env, CheckTask const& checkTask) { this->setCheckTask(checkTask.template substituteFormula(checkTask.getFormula())); return performAbstractionRefinement(env); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeReachabilityProbabilities(Environment const& env, CheckTask const& checkTask) { STORM_LOG_THROW(this->supportsReachabilityRewards(), storm::exceptions::NotSupportedException, "Reachability rewards are not supported by this abstraction-refinement model checker."); this->setCheckTask(checkTask.template substituteFormula(checkTask.getFormula())); return performAbstractionRefinement(env); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeReachabilityRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { this->setCheckTask(checkTask.template substituteFormula(checkTask.getFormula())); return performAbstractionRefinement(env); } template void AbstractAbstractionRefinementModelChecker::setCheckTask(CheckTask const& checkTask) { this->checkTask = std::make_unique>(checkTask); } template CheckTask::ValueType> const& AbstractAbstractionRefinementModelChecker::getCheckTask() const { return *checkTask; } template bool AbstractAbstractionRefinementModelChecker::getReuseQualitativeResults() const { return reuseQualitativeResults; } template bool AbstractAbstractionRefinementModelChecker::getReuseQuantitativeResults() const { return reuseQuantitativeResults; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::performAbstractionRefinement(Environment const& env) { STORM_LOG_THROW(checkTask->isOnlyInitialStatesRelevantSet(), storm::exceptions::InvalidPropertyException, "The abstraction-refinement model checkers can only compute the result for the initial states."); // Notify the underlying implementation that the abstraction-refinement process is being started. this->initializeAbstractionRefinement(); uint64_t iterations = 0; std::unique_ptr result; auto start = std::chrono::high_resolution_clock::now(); while (!result) { ++iterations; // Obtain the abstract model. auto abstractionStart = std::chrono::high_resolution_clock::now(); std::shared_ptr> abstractModel = this->getAbstractModel(); auto abstractionEnd = std::chrono::high_resolution_clock::now(); STORM_LOG_TRACE("Model in iteration " << iterations << " has " << abstractModel->getNumberOfStates() << " states and " << abstractModel->getNumberOfTransitions() << " transitions (retrieved in " << std::chrono::duration_cast(abstractionEnd - abstractionStart).count() << "ms)."); // Obtain lower and upper bounds from the abstract model. std::pair, std::unique_ptr> bounds = computeBounds(env, *abstractModel); // Try to derive the final result from the obtained bounds. if (bounds.first || bounds.second) { result = tryToObtainResultFromBounds(*abstractModel, bounds); } if (!result) { if (bounds.first && bounds.second) { printBoundsInformation(bounds); } auto refinementStart = std::chrono::high_resolution_clock::now(); this->refineAbstractModel(); auto refinementEnd = std::chrono::high_resolution_clock::now(); STORM_LOG_TRACE("Refinement in iteration " << iterations << " completed in " << std::chrono::duration_cast(refinementEnd - refinementStart).count() << "ms."); } } auto end = std::chrono::high_resolution_clock::now(); STORM_LOG_TRACE("Completed abstraction-refinement (" << this->getName() << ") in " << std::chrono::duration_cast(end - start).count() << "ms."); return result; } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeBounds(Environment const& env, storm::models::Model const& abstractModel) { // We go through two phases. In phase (1) we are solving the qualitative part and in phase (2) the quantitative part. std::pair, std::unique_ptr> result; // Preparation: determine the constraint states and the target states of the reachability objective. std::pair, std::unique_ptr> constraintAndTargetStates = getConstraintAndTargetStates(abstractModel); // Phase (1): solve qualitatively. lastQualitativeResults = computeQualitativeResult(env, abstractModel, *constraintAndTargetStates.first, *constraintAndTargetStates.second); // Check whether the answer can be given after the qualitative solution. result.first = checkForResultAfterQualitativeCheck(abstractModel); if (result.first) { return result; } // Check whether we should skip the quantitative solution (for example if there are initial states for which // the value is already known to be different at this point. bool doSkipQuantitativeSolution = skipQuantitativeSolution(abstractModel, *lastQualitativeResults); STORM_LOG_TRACE("" << (doSkipQuantitativeSolution ? "Skipping" : "Not skipping") << " quantitative solution."); // Phase (2): solve quantitatively. if (!doSkipQuantitativeSolution) { lastBounds = computeQuantitativeResult(env, abstractModel, *constraintAndTargetStates.first, *constraintAndTargetStates.second, *lastQualitativeResults); STORM_LOG_ASSERT(lastBounds.first && lastBounds.second, "Expected both bounds."); result = std::make_pair(lastBounds.first->clone(), lastBounds.second->clone()); filterInitialStates(abstractModel, result); // Check whether the answer can be given after the quantitative solution. if (checkForResultAfterQuantitativeCheck(abstractModel, true, result.first->asQuantitativeCheckResult())) { result.second = nullptr; } else if (checkForResultAfterQuantitativeCheck(abstractModel, false, result.second->asQuantitativeCheckResult())) { result.first = nullptr; } } return result; } template bool AbstractAbstractionRefinementModelChecker::checkForResultAfterQuantitativeCheck(storm::models::Model const& abstractModel, bool lowerBounds, QuantitativeCheckResult const& quantitativeResult) { bool result = false; if (checkTask->isBoundSet()) { storm::logic::ComparisonType comparisonType = checkTask->getBoundComparisonType(); ValueType threshold = checkTask->getBoundThreshold(); if (lowerBounds) { if (storm::logic::isLowerBound(comparisonType)) { ValueType minimalLowerBound = quantitativeResult.getMin(); result = (storm::logic::isStrict(comparisonType) && minimalLowerBound > threshold) || (!storm::logic::isStrict(comparisonType) && minimalLowerBound >= threshold); } else { ValueType maximalLowerBound = quantitativeResult.getMax(); result = (storm::logic::isStrict(comparisonType) && maximalLowerBound >= threshold) || (!storm::logic::isStrict(comparisonType) && maximalLowerBound > threshold); } } else { if (storm::logic::isLowerBound(comparisonType)) { ValueType minimalUpperBound = quantitativeResult.getMin(); result = (storm::logic::isStrict(comparisonType) && minimalUpperBound <= threshold) || (!storm::logic::isStrict(comparisonType) && minimalUpperBound < threshold); } else { ValueType maximalUpperBound = quantitativeResult.getMax(); result = (storm::logic::isStrict(comparisonType) && maximalUpperBound < threshold) || (!storm::logic::isStrict(comparisonType) && maximalUpperBound <= threshold); } } if (result) { STORM_LOG_TRACE("Found result after quantiative check."); } else { STORM_LOG_TRACE("Did not find result after quantiative check."); } } return result; } template void AbstractAbstractionRefinementModelChecker::printBoundsInformation(std::pair, std::unique_ptr>& bounds) { STORM_LOG_THROW(bounds.first->isSymbolicQuantitativeCheckResult() && bounds.second->isSymbolicQuantitativeCheckResult(), storm::exceptions::NotSupportedException, "Expected symbolic bounds."); printBoundsInformation(bounds.first->asSymbolicQuantitativeCheckResult(), bounds.second->asSymbolicQuantitativeCheckResult()); } template void AbstractAbstractionRefinementModelChecker::printBoundsInformation(SymbolicQuantitativeCheckResult const& lowerBounds, SymbolicQuantitativeCheckResult const& upperBounds) { // If there is exactly one value that we stored, we print the current bounds as an interval. if (lowerBounds.getStates().getNonZeroCount() == 1 && upperBounds.getStates().getNonZeroCount() == 1) { STORM_LOG_TRACE("Obtained bounds [" << lowerBounds.getValueVector().getMax() << ", " << upperBounds.getValueVector().getMax() << "] on actual result."); } else { storm::dd::Add diffs = upperBounds.getValueVector() - lowerBounds.getValueVector(); storm::dd::Bdd maxDiffRepresentative = diffs.maxAbstractRepresentative(diffs.getContainedMetaVariables()); std::pair bounds; bounds.first = (lowerBounds.getValueVector() * maxDiffRepresentative.template toAdd()).getMax(); bounds.second = (upperBounds.getValueVector() * maxDiffRepresentative.template toAdd()).getMax(); STORM_LOG_TRACE("Largest interval over initial is [" << bounds.first << ", " << bounds.second << "], difference " << (bounds.second - bounds.first) << "."); } } template void AbstractAbstractionRefinementModelChecker::filterInitialStates(storm::models::Model const& abstractModel, std::pair, std::unique_ptr>& bounds) { STORM_LOG_THROW(abstractModel.isSymbolicModel(), storm::exceptions::NotSupportedException, "Expected symbolic model."); filterInitialStates(*abstractModel.template as>(), bounds); } template void AbstractAbstractionRefinementModelChecker::filterInitialStates(storm::models::symbolic::Model const& abstractModel, std::pair, std::unique_ptr>& bounds) { storm::modelchecker::SymbolicQualitativeCheckResult initialStateFilter(abstractModel.getReachableStates(), abstractModel.getInitialStates()); bounds.first->filter(initialStateFilter); bounds.second->filter(initialStateFilter); } template bool AbstractAbstractionRefinementModelChecker::skipQuantitativeSolution(storm::models::Model const& abstractModel, storm::abstraction::QualitativeResultMinMax const& qualitativeResults) { STORM_LOG_THROW(abstractModel.isSymbolicModel(), storm::exceptions::NotSupportedException, "Expected symbolic model."); return skipQuantitativeSolution(*abstractModel.template as>(), qualitativeResults.asSymbolicQualitativeResultMinMax()); } template bool AbstractAbstractionRefinementModelChecker::skipQuantitativeSolution(storm::models::symbolic::Model const& abstractModel, storm::abstraction::SymbolicQualitativeResultMinMax const& qualitativeResults) { bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; if (isRewardFormula) { if ((abstractModel.getInitialStates() && qualitativeResults.getProb1Min().getStates()) != (abstractModel.getInitialStates() && qualitativeResults.getProb1Max().getStates())) { return true; } } else { if ((abstractModel.getInitialStates() && qualitativeResults.getProb0Min().getStates()) != (abstractModel.getInitialStates() && qualitativeResults.getProb0Max().getStates())) { return true; } else if ((abstractModel.getInitialStates() && qualitativeResults.getProb1Min().getStates()) != (abstractModel.getInitialStates() && qualitativeResults.getProb1Max().getStates())) { return true; } } return false; } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeQuantitativeResult(Environment const& env, storm::models::Model const& abstractModel, storm::abstraction::StateSet const& constraintStates, storm::abstraction::StateSet const& targetStates, storm::abstraction::QualitativeResultMinMax const& qualitativeResults) { STORM_LOG_ASSERT(abstractModel.isSymbolicModel(), "Expected symbolic abstract model."); return computeQuantitativeResult(env, *abstractModel.template as>(), constraintStates.asSymbolicStateSet(), targetStates.asSymbolicStateSet(), qualitativeResults.asSymbolicQualitativeResultMinMax()); } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeQuantitativeResult(Environment const& env, storm::models::symbolic::Model const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates, storm::abstraction::SymbolicQualitativeResultMinMax const& qualitativeResults) { STORM_LOG_THROW(abstractModel.isOfType(storm::models::ModelType::Dtmc) || abstractModel.isOfType(storm::models::ModelType::Mdp) || abstractModel.isOfType(storm::models::ModelType::S2pg), storm::exceptions::NotSupportedException, "Abstract model type is not supported."); if (abstractModel.isOfType(storm::models::ModelType::Dtmc)) { return computeQuantitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates, qualitativeResults); } else if (abstractModel.isOfType(storm::models::ModelType::Mdp)) { return computeQuantitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates, qualitativeResults); } else { return computeQuantitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates, qualitativeResults); } } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeQuantitativeResult(Environment const& env, storm::models::symbolic::Dtmc const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates, storm::abstraction::SymbolicQualitativeResultMinMax const& qualitativeResults) { std::pair, std::unique_ptr> result; storm::dd::Bdd maybe; bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; if (isRewardFormula) { maybe = qualitativeResults.getProb1Min().getStates() && abstractModel.getReachableStates(); } else { maybe = !(qualitativeResults.getProb0Min().getStates() || qualitativeResults.getProb1Min().getStates()) && abstractModel.getReachableStates(); } storm::dd::Add startValues; if (this->getReuseQuantitativeResults() && lastBounds.first) { startValues = maybe.ite(lastBounds.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero()); } else { startValues = abstractModel.getManager().template getAddZero(); } if (isRewardFormula) { storm::dd::Add values = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeReachabilityRewards(env, abstractModel, abstractModel.getTransitionMatrix(), checkTask->isRewardModelSet() ? abstractModel.getRewardModel(checkTask->getRewardModel()) : abstractModel.getRewardModel(""), maybe, targetStates.getStates(), !qualitativeResults.getProb1Min().getStates() && abstractModel.getReachableStates(), startValues); result.first = std::make_unique>(abstractModel.getReachableStates(), values); result.second = result.first->clone(); } else { storm::dd::Add values = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeUntilProbabilities(env, abstractModel, abstractModel.getTransitionMatrix(), maybe, qualitativeResults.getProb1Min().getStates(), startValues); result.first = std::make_unique>(abstractModel.getReachableStates(), values); result.second = result.first->clone(); } return result; } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeQuantitativeResult(Environment const& env, storm::models::symbolic::Mdp const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates, storm::abstraction::SymbolicQualitativeResultMinMax const& qualitativeResults) { std::pair, std::unique_ptr> result; storm::dd::Bdd maybeMin; storm::dd::Bdd maybeMax; bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; if (isRewardFormula) { maybeMin = qualitativeResults.getProb1Min().getStates() && abstractModel.getReachableStates(); maybeMax = qualitativeResults.getProb1Max().getStates() && abstractModel.getReachableStates(); } else { maybeMin = !(qualitativeResults.getProb0Min().getStates() || qualitativeResults.getProb1Min().getStates()) && abstractModel.getReachableStates(); maybeMax = !(qualitativeResults.getProb0Max().getStates() || qualitativeResults.getProb1Max().getStates()) && abstractModel.getReachableStates(); } storm::dd::Add minStartValues; if (this->getReuseQuantitativeResults() && lastBounds.first) { minStartValues = maybeMin.ite(lastBounds.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero()); } else { minStartValues = abstractModel.getManager().template getAddZero(); } uint64_t abstractionPlayer = this->getAbstractionPlayer(); if (isRewardFormula) { result.first = storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeReachabilityRewards(env, abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : checkTask->getOptimizationDirection(), abstractModel, abstractModel.getTransitionMatrix(), abstractModel.getTransitionMatrix().notZero(), checkTask->isRewardModelSet() ? abstractModel.getRewardModel(checkTask->getRewardModel()) : abstractModel.getRewardModel(""), maybeMin, targetStates.getStates(), !qualitativeResults.getProb1Min().getStates() && abstractModel.getReachableStates(), minStartValues); if (abstractionPlayer == 0) { result.second = result.first->clone(); } else { result.second = storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeReachabilityRewards(env, storm::OptimizationDirection::Maximize, abstractModel, abstractModel.getTransitionMatrix(), abstractModel.getTransitionMatrix().notZero(), checkTask->isRewardModelSet() ? abstractModel.getRewardModel(checkTask->getRewardModel()) : abstractModel.getRewardModel(""), maybeMin, targetStates.getStates(), !qualitativeResults.getProb1Max().getStates() && abstractModel.getReachableStates(), maybeMax.ite(result.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero())); } } else { result.first = storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeUntilProbabilities(env, abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : checkTask->getOptimizationDirection(), abstractModel, abstractModel.getTransitionMatrix(), maybeMin, qualitativeResults.getProb1Min().getStates(), minStartValues); if (abstractionPlayer == 0) { result.second = result.first->clone(); } else { result.second = storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeUntilProbabilities(env, storm::OptimizationDirection::Maximize, abstractModel, abstractModel.getTransitionMatrix(), maybeMax, qualitativeResults.getProb1Max().getStates(), maybeMax.ite(result.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero())); } } return result; } template std::pair, std::unique_ptr> AbstractAbstractionRefinementModelChecker::computeQuantitativeResult(Environment const& env, storm::models::symbolic::StochasticTwoPlayerGame const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates, storm::abstraction::SymbolicQualitativeResultMinMax const& qualitativeResults) { std::pair, std::unique_ptr> result; storm::dd::Bdd maybeMin; storm::dd::Bdd maybeMax; bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; if (!isRewardFormula) { maybeMin = !(qualitativeResults.getProb0Min().getStates() || qualitativeResults.getProb1Min().getStates()) && abstractModel.getReachableStates(); maybeMax = !(qualitativeResults.getProb0Max().getStates() || qualitativeResults.getProb1Max().getStates()) && abstractModel.getReachableStates(); } storm::dd::Add minStartValues; if (this->getReuseQuantitativeResults() && lastBounds.first) { minStartValues = maybeMin.ite(lastBounds.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero()); } else { minStartValues = abstractModel.getManager().template getAddZero(); } if (isRewardFormula) { STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Reward properties are not supported for abstract stochastic games."); } else { result.first = computeReachabilityProbabilitiesHelper(env, abstractModel, this->getAbstractionPlayer() == 1 ? storm::OptimizationDirection::Minimize : checkTask->getOptimizationDirection(), this->getAbstractionPlayer() == 2 ? storm::OptimizationDirection::Minimize : checkTask->getOptimizationDirection(), maybeMin, qualitativeResults.getProb1Min().getStates(), minStartValues); result.second = computeReachabilityProbabilitiesHelper(env, abstractModel, this->getAbstractionPlayer() == 1 ? storm::OptimizationDirection::Maximize : checkTask->getOptimizationDirection(), this->getAbstractionPlayer() == 2 ? storm::OptimizationDirection::Maximize : checkTask->getOptimizationDirection(), maybeMin, qualitativeResults.getProb1Max().getStates(), maybeMax.ite(result.first->asSymbolicQuantitativeCheckResult().getValueVector(), abstractModel.getManager().template getAddZero())); } return result; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeReachabilityProbabilitiesHelper(Environment const& env, storm::models::symbolic::StochasticTwoPlayerGame const& abstractModel, storm::OptimizationDirection const& player1Direction, storm::OptimizationDirection const& player2Direction, storm::dd::Bdd const& maybeStates, storm::dd::Bdd const& prob1States, storm::dd::Add const& startValues) { STORM_LOG_TRACE("Performing quantative solution step. Player 1: " << player1Direction << ", player 2: " << player2Direction << "."); // Compute the ingredients of the equation system. storm::dd::Add maybeStatesAdd = maybeStates.template toAdd(); storm::dd::Add submatrix = maybeStatesAdd * abstractModel.getTransitionMatrix(); storm::dd::Add prob1StatesAsColumn = prob1States.template toAdd().swapVariables(abstractModel.getRowColumnMetaVariablePairs()); storm::dd::Add subvector = submatrix * prob1StatesAsColumn; subvector = subvector.sumAbstract(abstractModel.getColumnVariables()); // Cut away all columns targeting non-maybe states. submatrix *= maybeStatesAdd.swapVariables(abstractModel.getRowColumnMetaVariablePairs()); // Cut the starting vector to the maybe states of this query. storm::dd::Add startVector = maybeStates.ite(startValues, abstractModel.getManager().template getAddZero()); // Create the solver and solve the equation system. storm::solver::SymbolicGameSolverFactory solverFactory; std::unique_ptr> solver = solverFactory.create(submatrix, maybeStates, abstractModel.getIllegalPlayer1Mask(), abstractModel.getIllegalPlayer2Mask(), abstractModel.getRowVariables(), abstractModel.getColumnVariables(), abstractModel.getRowColumnMetaVariablePairs(), abstractModel.getPlayer1Variables(), abstractModel.getPlayer2Variables()); auto values = solver->solveGame(env, player1Direction, player2Direction, startVector, subvector); return std::make_unique>(abstractModel.getReachableStates(), prob1States.template toAdd() + values); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeQualitativeResult(Environment const& env, storm::models::Model const& abstractModel, storm::abstraction::StateSet const& constraintStates, storm::abstraction::StateSet const& targetStates) { STORM_LOG_ASSERT(abstractModel.isSymbolicModel(), "Expected symbolic abstract model."); return computeQualitativeResult(env, *abstractModel.template as>(), constraintStates.asSymbolicStateSet(), targetStates.asSymbolicStateSet()); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeQualitativeResult(Environment const& env, storm::models::symbolic::Model const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates) { STORM_LOG_THROW(abstractModel.isOfType(storm::models::ModelType::Dtmc) || abstractModel.isOfType(storm::models::ModelType::Mdp) || abstractModel.isOfType(storm::models::ModelType::S2pg), storm::exceptions::NotSupportedException, "Expected discrete-time abstract model."); if (abstractModel.isOfType(storm::models::ModelType::Dtmc)) { return computeQualitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates); } else if (abstractModel.isOfType(storm::models::ModelType::Mdp)) { return computeQualitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates); } else { return computeQualitativeResult(env, *abstractModel.template as>(), constraintStates, targetStates); } } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeQualitativeResult(Environment const& env, storm::models::symbolic::Dtmc const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates) { STORM_LOG_DEBUG("Computing qualitative solution for DTMC."); std::unique_ptr> result = std::make_unique>(); auto start = std::chrono::high_resolution_clock::now(); bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; storm::dd::Bdd transitionMatrixBdd = abstractModel.getTransitionMatrix().notZero(); if (isRewardFormula) { auto prob1 = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob1Min = result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(prob1); } else { auto prob01 = storm::utility::graph::performProb01(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob0Min = result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult(prob01.first); result->prob1Min = result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(prob01.second); } auto end = std::chrono::high_resolution_clock::now(); auto timeInMilliseconds = std::chrono::duration_cast(end - start).count(); STORM_LOG_DEBUG("Computed qualitative solution in " << timeInMilliseconds << "ms."); return result; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeQualitativeResult(Environment const& env, storm::models::symbolic::Mdp const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates) { STORM_LOG_DEBUG("Computing qualitative solution for MDP."); std::unique_ptr> result = std::make_unique>(); auto start = std::chrono::high_resolution_clock::now(); bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; storm::dd::Bdd transitionMatrixBdd = abstractModel.getTransitionMatrix().notZero(); uint64_t abstractionPlayer = this->getAbstractionPlayer(); if (this->getReuseQualitativeResults()) { if (isRewardFormula) { bool computedMin = false; if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) { auto states = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Min().getStates() : storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult(states); computedMin = true; } if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) { auto states = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(states); if (!computedMin) { result->prob1Min = result->prob1Max; } } else { result->prob1Max = result->prob1Min; } } else { bool computedMax = false; if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) { auto states = storm::utility::graph::performProb0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult(states); states = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Min().getStates() : storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(states); computedMax = true; } if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) { auto states = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Min().getStates() : targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult(states); states = storm::utility::graph::performProb0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob0Min = storm::abstraction::SymbolicQualitativeMdpResult(states); if (!computedMax) { result->prob0Max = result->prob0Min; result->prob1Max = result->prob1Min; } } else { result->prob0Min = result->prob0Max; result->prob1Min = result->prob1Max; } } } else { if (isRewardFormula) { bool computedMin = false; if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) { auto prob1 = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::utility::graph::performProbGreater0E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult(prob1); computedMin = true; } if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) { auto prob1 = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, targetStates.getStates(), storm::utility::graph::performProbGreater0A(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates())); result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(prob1); if (!computedMin) { result->prob1Min = result->prob1Max; } } else { result->prob1Max = result->prob1Min; } } else { bool computedMin = false; if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Minimize) { auto prob01 = storm::utility::graph::performProb01Min(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob0Min = storm::abstraction::SymbolicQualitativeMdpResult(prob01.first); result->prob1Min = storm::abstraction::SymbolicQualitativeMdpResult(prob01.second); computedMin = true; } if (abstractionPlayer == 1 || checkTask->getOptimizationDirection() == storm::OptimizationDirection::Maximize) { auto prob01 = storm::utility::graph::performProb01Max(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates()); result->prob0Max = storm::abstraction::SymbolicQualitativeMdpResult(prob01.first); result->prob1Max = storm::abstraction::SymbolicQualitativeMdpResult(prob01.second); if (!computedMin) { result->prob0Min = result->prob0Max; result->prob1Min = result->prob1Max; } } else { result->prob0Max = result->prob0Min; result->prob1Max = result->prob1Min; } } } auto end = std::chrono::high_resolution_clock::now(); auto timeInMilliseconds = std::chrono::duration_cast(end - start).count(); if (isRewardFormula) { STORM_LOG_TRACE("Min: " << result->getProb1Min().getStates().getNonZeroCount() << " states with probability 1."); STORM_LOG_TRACE("Max: " << result->getProb1Max().getStates().getNonZeroCount() << " states with probability 1."); } else { STORM_LOG_TRACE("Min: " << result->getProb0Min().getStates().getNonZeroCount() << " states with probability 0, " << result->getProb1Min().getStates().getNonZeroCount() << " states with probability 1."); STORM_LOG_TRACE("Max: " << result->getProb0Max().getStates().getNonZeroCount() << " states with probability 0, " << result->getProb1Max().getStates().getNonZeroCount() << " states with probability 1."); } STORM_LOG_DEBUG("Computed qualitative solution in " << timeInMilliseconds << "ms."); return result; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::computeQualitativeResult(Environment const& env, storm::models::symbolic::StochasticTwoPlayerGame const& abstractModel, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates) { STORM_LOG_DEBUG("Computing qualitative solution for S2PG."); std::unique_ptr> result; // Obtain the player optimization directions. uint64_t abstractionPlayer = this->getAbstractionPlayer(); // Obtain direction for player 1 (model nondeterminism). storm::OptimizationDirection modelNondeterminismDirection = this->getCheckTask().getOptimizationDirection(); // Convert the transition matrix to a BDD to use it in the qualitative algorithms. storm::dd::Bdd transitionMatrixBdd = abstractModel.getTransitionMatrix().toBdd(); // Remembers whether we need to synthesize schedulers. bool requiresSchedulers = this->requiresSchedulerSynthesis(); auto start = std::chrono::high_resolution_clock::now(); if (this->getReuseQualitativeResults()) { result = computeQualitativeResultReuse(abstractModel, transitionMatrixBdd, constraintStates, targetStates, abstractionPlayer, modelNondeterminismDirection, requiresSchedulers); } else { result = std::make_unique>(); result->prob0Min = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); result->prob1Min = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); result->prob0Max = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); result->prob1Max = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); } STORM_LOG_TRACE("Qualitative precomputation completed."); if (abstractionPlayer == 1) { STORM_LOG_TRACE("[" << storm::OptimizationDirection::Minimize << ", " << modelNondeterminismDirection << "]: " << result->prob0Min.player1States.getNonZeroCount() << " 'no', " << result->prob1Min.player1States.getNonZeroCount() << " 'yes'."); STORM_LOG_TRACE("[" << storm::OptimizationDirection::Maximize << ", " << modelNondeterminismDirection << "]: " << result->prob0Max.player1States.getNonZeroCount() << " 'no', " << result->prob1Max.player1States.getNonZeroCount() << " 'yes'."); } else { STORM_LOG_TRACE("[" << modelNondeterminismDirection << ", " << storm::OptimizationDirection::Minimize << "]: " << result->prob0Min.player1States.getNonZeroCount() << " 'no', " << result->prob1Min.player1States.getNonZeroCount() << " 'yes'."); STORM_LOG_TRACE("[" << modelNondeterminismDirection << ", " << storm::OptimizationDirection::Maximize << "]: " << result->prob0Max.player1States.getNonZeroCount() << " 'no', " << result->prob1Max.player1States.getNonZeroCount() << " 'yes'."); } auto end = std::chrono::high_resolution_clock::now(); auto timeInMilliseconds = std::chrono::duration_cast(end - start).count(); STORM_LOG_DEBUG("Computed qualitative solution in " << timeInMilliseconds << "ms."); return result; } template std::unique_ptr::DdType>> AbstractAbstractionRefinementModelChecker::computeQualitativeResultReuse(storm::models::symbolic::StochasticTwoPlayerGame const& abstractModel, storm::dd::Bdd const& transitionMatrixBdd, storm::abstraction::SymbolicStateSet const& constraintStates, storm::abstraction::SymbolicStateSet const& targetStates, uint64_t abstractionPlayer, storm::OptimizationDirection const& modelNondeterminismDirection, bool requiresSchedulers) { std::unique_ptr> result = std::make_unique>(); // Depending on the model nondeterminism direction, we choose a different order of operations. if (modelNondeterminismDirection == storm::OptimizationDirection::Minimize) { // (1) min/min: compute prob0 using the game functions result->prob0Min = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, requiresSchedulers, requiresSchedulers); // (2) min/min: compute prob1 using the MDP functions storm::dd::Bdd candidates = abstractModel.getReachableStates() && !result->getProb0Min().getStates(); storm::dd::Bdd prob1MinMinMdp = storm::utility::graph::performProb1A(abstractModel, transitionMatrixBdd, lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Min().getStates() : targetStates.getStates(), candidates); // (3) min/min: compute prob1 using the game functions result->prob1Min = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::OptimizationDirection::Minimize, storm::OptimizationDirection::Minimize, requiresSchedulers, requiresSchedulers, boost::make_optional(prob1MinMinMdp)); // (4) min/max, max/min: compute prob 0 using the game functions result->prob0Max = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); // (5) min/max, max/min: compute prob 1 using the game functions // We know that only previous prob1 states can now be prob 1 states again, because the upper bound // values can only decrease over iterations. result->prob1Max = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Maximize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers, lastQualitativeResults ? lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Max().getStates() : boost::optional>()); } else { // (1) max/max: compute prob0 using the game functions result->prob0Max = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, requiresSchedulers, requiresSchedulers); // (2) max/max: compute prob1 using the MDP functions, reuse prob1 states of last iteration to constrain the candidate states. storm::dd::Bdd candidates = abstractModel.getReachableStates() && !result->getProb0Max().getStates(); if (this->getReuseQualitativeResults() && lastQualitativeResults) { candidates &= lastQualitativeResults->asSymbolicQualitativeResultMinMax().getProb1Max().getStates(); } storm::dd::Bdd prob1MaxMaxMdp = storm::utility::graph::performProb1E(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), candidates); // (3) max/max: compute prob1 using the game functions, reuse prob1 states from the MDP precomputation result->prob1Max = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), storm::OptimizationDirection::Maximize, storm::OptimizationDirection::Maximize, requiresSchedulers, requiresSchedulers, boost::make_optional(prob1MaxMaxMdp)); // (4) max/min, min/max: compute prob0 using the game functions result->prob0Min = storm::utility::graph::performProb0(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers); // (5) max/min:, max/min compute prob1 using the game functions, use prob1 from max/max as the candidate set result->prob1Min = storm::utility::graph::performProb1(abstractModel, transitionMatrixBdd, constraintStates.getStates(), targetStates.getStates(), abstractionPlayer == 1 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, abstractionPlayer == 2 ? storm::OptimizationDirection::Minimize : modelNondeterminismDirection, requiresSchedulers, requiresSchedulers, boost::make_optional(prob1MaxMaxMdp)); } return result; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::checkForResultAfterQualitativeCheck(storm::models::Model const& abstractModel) { STORM_LOG_THROW(abstractModel.isSymbolicModel(), storm::exceptions::NotSupportedException, "Expected symbolic model."); return checkForResultAfterQualitativeCheck(*abstractModel.template as>()); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::checkForResultAfterQualitativeCheck(storm::models::symbolic::Model const& abstractModel) { std::unique_ptr result; auto const& symbolicQualitativeResultMinMax = lastQualitativeResults->asSymbolicQualitativeResultMinMax(); bool isRewardFormula = checkTask->getFormula().isEventuallyFormula() && checkTask->getFormula().asEventuallyFormula().getContext() == storm::logic::FormulaContext::Reward; if (isRewardFormula) { // In the reachability reward case, we can give an answer if all initial states of the system are infinity // states in the min result. if ((abstractModel.getInitialStates() && !symbolicQualitativeResultMinMax.getProb1Min().getStates()) == abstractModel.getInitialStates()) { result = std::make_unique>(abstractModel.getReachableStates(), abstractModel.getInitialStates(), abstractModel.getInitialStates().ite(abstractModel.getManager().getConstant(storm::utility::infinity()), abstractModel.getManager().template getAddZero())); } } else { // In the reachability probability case, we can give the answer if all initial states are prob1 states // in the min result or if all initial states are prob0 in the max case. // Furthermore, we can give the answer if there are initial states with probability > 0 in the min case // and the probability bound was 0 or if there are initial states with probability < 1 in the max case // and the probability bound was 1. if ((abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb1Min().getStates()) == abstractModel.getInitialStates()) { result = std::make_unique>(abstractModel.getReachableStates(), abstractModel.getInitialStates(), abstractModel.getManager().template getAddOne()); } else if ((abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb0Max().getStates()) == abstractModel.getInitialStates()) { result = std::make_unique>(abstractModel.getReachableStates(), abstractModel.getInitialStates(), abstractModel.getManager().template getAddZero()); } else if (checkTask->isBoundSet() && checkTask->getBoundThreshold() == storm::utility::zero() && (abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb0Min().getStates()) != abstractModel.getInitialStates()) { result = std::make_unique>(abstractModel.getReachableStates(), abstractModel.getInitialStates(), (abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb0Min().getStates()).ite(abstractModel.getManager().template getConstant(0.5), abstractModel.getManager().template getAddZero())); } else if (checkTask->isBoundSet() && checkTask->getBoundThreshold() == storm::utility::one() && (abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb1Max().getStates()) != abstractModel.getInitialStates()) { result = std::make_unique>(abstractModel.getReachableStates(), abstractModel.getInitialStates(), (abstractModel.getInitialStates() && symbolicQualitativeResultMinMax.getProb1Max().getStates()).ite(abstractModel.getManager().template getConstant(0.5), abstractModel.getManager().template getAddZero()) + symbolicQualitativeResultMinMax.getProb1Max().getStates().template toAdd()); } } if (result) { STORM_LOG_TRACE("Found result after qualitative check."); } else { STORM_LOG_TRACE("Did not find result after qualitative check."); } return result; } template std::unique_ptr AbstractAbstractionRefinementModelChecker::tryToObtainResultFromBounds(storm::models::Model const& abstractModel, std::pair, std::unique_ptr>& bounds) { std::unique_ptr result; if (bounds.first == nullptr || bounds.second == nullptr) { STORM_LOG_ASSERT(bounds.first || bounds.second, "Expected at least one bound."); if (bounds.first) { return std::move(bounds.first); } else { return std::move(bounds.second); } } else { if (boundsAreSufficientlyClose(bounds)) { STORM_LOG_TRACE("Obtained bounds are sufficiently close."); result = getAverageOfBounds(bounds); } } if (result) { abstractModel.printModelInformationToStream(std::cout); } return result; } template bool AbstractAbstractionRefinementModelChecker::boundsAreSufficientlyClose(std::pair, std::unique_ptr> const& bounds) { STORM_LOG_ASSERT(bounds.first->isSymbolicQuantitativeCheckResult(), "Expected symbolic quantitative check result."); storm::modelchecker::SymbolicQuantitativeCheckResult const& lowerBounds = bounds.first->asSymbolicQuantitativeCheckResult(); STORM_LOG_ASSERT(bounds.second->isSymbolicQuantitativeCheckResult(), "Expected symbolic quantitative check result."); storm::modelchecker::SymbolicQuantitativeCheckResult const& upperBounds = bounds.second->asSymbolicQuantitativeCheckResult(); return lowerBounds.getValueVector().equalModuloPrecision(upperBounds.getValueVector(), storm::settings::getModule().getPrecision(), false); } template std::unique_ptr AbstractAbstractionRefinementModelChecker::getAverageOfBounds(std::pair, std::unique_ptr> const& bounds) { STORM_LOG_ASSERT(bounds.first->isSymbolicQuantitativeCheckResult(), "Expected symbolic quantitative check result."); storm::modelchecker::SymbolicQuantitativeCheckResult const& lowerBounds = bounds.first->asSymbolicQuantitativeCheckResult(); STORM_LOG_ASSERT(bounds.second->isSymbolicQuantitativeCheckResult(), "Expected symbolic quantitative check result."); storm::modelchecker::SymbolicQuantitativeCheckResult const& upperBounds = bounds.second->asSymbolicQuantitativeCheckResult(); return std::make_unique>(lowerBounds.getReachableStates(), lowerBounds.getStates(), (lowerBounds.getValueVector() + upperBounds.getValueVector()) / lowerBounds.getValueVector().getDdManager().getConstant(storm::utility::convertNumber(std::string("2.0")))); } template class AbstractAbstractionRefinementModelChecker>; template class AbstractAbstractionRefinementModelChecker>; template class AbstractAbstractionRefinementModelChecker>; template class AbstractAbstractionRefinementModelChecker>; } }