From c7aec92dc90bb205a0f433ddfcadd7b1406a49dd Mon Sep 17 00:00:00 2001 From: TimQu Date: Fri, 5 Apr 2019 16:40:42 +0200 Subject: [PATCH] modelchecker: Added support for non-trivial reward accumulations for Sparse/Hybrid/Dd engines. --- .../csl/HybridCtmcCslModelChecker.cpp | 13 ++-- .../csl/SparseCtmcCslModelChecker.cpp | 16 +++-- .../SparseMarkovAutomatonCslModelChecker.cpp | 14 ++-- .../prctl/HybridDtmcPrctlModelChecker.cpp | 12 ++-- .../prctl/HybridMdpPrctlModelChecker.cpp | 9 ++- .../prctl/SparseDtmcPrctlModelChecker.cpp | 16 +++-- .../prctl/SparseMdpPrctlModelChecker.cpp | 16 +++-- .../prctl/SymbolicDtmcPrctlModelChecker.cpp | 9 ++- .../prctl/SymbolicMdpPrctlModelChecker.cpp | 9 ++- src/storm/utility/FilteredRewardModel.h | 64 +++++++++++++++++++ 10 files changed, 139 insertions(+), 39 deletions(-) create mode 100644 src/storm/utility/FilteredRewardModel.h diff --git a/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp b/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp index 6e41f2782..b6d65edf0 100644 --- a/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp +++ b/src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp @@ -10,6 +10,7 @@ #include "storm/storage/dd/DdManager.h" #include "storm/storage/dd/Add.h" #include "storm/storage/dd/Bdd.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/logic/FragmentSpecification.h" @@ -31,7 +32,7 @@ namespace storm { template::SupportsExponential, int>::type> bool HybridCtmcCslModelChecker::canHandleImplementation(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - return formula.isInFragment(storm::logic::csrl().setGloballyFormulasAllowed(false).setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeAllowed(true).setTimeOperatorsAllowed(true)); + return formula.isInFragment(storm::logic::csrl().setGloballyFormulasAllowed(false).setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeAllowed(true).setTimeOperatorsAllowed(true).setRewardAccumulationAllowed(true)); } template @@ -66,8 +67,8 @@ namespace storm { storm::logic::EventuallyFormula const& eventuallyFormula = checkTask.getFormula(); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - - return storm::modelchecker::helper::HybridCtmcCslHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridCtmcCslHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); } template @@ -117,7 +118,8 @@ namespace storm { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(rewardPathFormula.getTimeBoundReference().isTimeBound(), storm::exceptions::NotImplementedException, "Currently step-bounded and reward-bounded properties on CTMCs are not supported."); - return storm::modelchecker::helper::HybridCtmcCslHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridCtmcCslHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); } template @@ -131,7 +133,8 @@ namespace storm { template std::unique_ptr HybridCtmcCslModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { - return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel("")); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get()); } // Explicitly instantiate the model checker. diff --git a/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp index cf2aa5bcb..d084a42e4 100644 --- a/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp +++ b/src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp @@ -9,6 +9,7 @@ #include "storm/utility/vector.h" #include "storm/utility/graph.h" #include "storm/utility/solver.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" @@ -37,7 +38,7 @@ namespace storm { template::SupportsExponential, int>::type> bool SparseCtmcCslModelChecker::canHandleImplementation(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - return formula.isInFragment(storm::logic::csrl().setGloballyFormulasAllowed(false).setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeAllowed(true).setTotalRewardFormulasAllowed(true)); + return formula.isInFragment(storm::logic::csrl().setGloballyFormulasAllowed(false).setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeAllowed(true).setTotalRewardFormulasAllowed(true).setRewardAccumulationAllowed(true)); } template @@ -103,7 +104,8 @@ namespace storm { std::unique_ptr SparseCtmcCslModelChecker::computeCumulativeRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(rewardPathFormula.getTimeBoundReference().isTimeBound(), storm::exceptions::NotImplementedException, "Currently step-bounded and reward-bounded properties on CTMCs are not supported."); - std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } @@ -112,14 +114,15 @@ namespace storm { storm::logic::EventuallyFormula const& eventuallyFormula = checkTask.getFormula(); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - - std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRateVector(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } template std::unique_ptr SparseCtmcCslModelChecker::computeTotalRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { - std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRateVector(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRateVector(), rewardModel.get(), checkTask.isQualitativeSet()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } @@ -137,7 +140,8 @@ namespace storm { template std::unique_ptr SparseCtmcCslModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { storm::storage::SparseMatrix probabilityMatrix = storm::modelchecker::helper::SparseCtmcCslHelper::computeProbabilityMatrix(this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector()); - std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), probabilityMatrix, checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), &this->getModel().getExitRateVector()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), probabilityMatrix, rewardModel.get(), &this->getModel().getExitRateVector()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } diff --git a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp index 3a21d36db..7da489e01 100644 --- a/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp +++ b/src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp @@ -6,6 +6,7 @@ #include "storm/models/sparse/StandardRewardModel.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/utility/macros.h" #include "storm/settings/SettingsManager.h" @@ -35,7 +36,7 @@ namespace storm { template::SupportsExponential, int>::type> bool SparseMarkovAutomatonCslModelChecker::canHandleImplementation(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - if(formula.isInFragment(storm::logic::csl().setGloballyFormulasAllowed(false).setNextFormulasAllowed(false).setRewardOperatorsAllowed(true).setReachabilityRewardFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setTimeAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setLongRunAverageRewardFormulasAllowed(true))) { + if(formula.isInFragment(storm::logic::csl().setGloballyFormulasAllowed(false).setNextFormulasAllowed(false).setRewardOperatorsAllowed(true).setReachabilityRewardFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setTimeAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setLongRunAverageRewardFormulasAllowed(true).setRewardAccumulationAllowed(true))) { return true; } else { // Check whether we consider a multi-objective formula @@ -105,8 +106,9 @@ namespace storm { STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute reachability rewards in non-closed Markov automaton."); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - - std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector()); + + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), rewardModel.get(), subResult.getTruthValuesVector()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); } @@ -115,7 +117,8 @@ namespace storm { std::unique_ptr SparseMarkovAutomatonCslModelChecker::computeTotalRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute reachability rewards in non-closed Markov automaton."); - std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeTotalRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel("")); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeTotalRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), rewardModel.get()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); } @@ -136,7 +139,8 @@ namespace storm { std::unique_ptr SparseMarkovAutomatonCslModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute long run average rewards in non-closed Markov automaton."); - std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getUniqueRewardModel()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), rewardModel.get()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); } diff --git a/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp index 0654a9788..ecd57182f 100644 --- a/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp @@ -8,6 +8,7 @@ #include "storm/utility/macros.h" #include "storm/utility/graph.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/models/symbolic/StandardRewardModel.h" @@ -33,7 +34,7 @@ namespace storm { template bool HybridDtmcPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true)); + return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true)); } template @@ -78,7 +79,8 @@ namespace storm { std::unique_ptr HybridDtmcPrctlModelChecker::computeCumulativeRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); } template @@ -93,7 +95,8 @@ namespace storm { storm::logic::EventuallyFormula const& eventuallyFormula = checkTask.getFormula(); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); } template @@ -117,7 +120,8 @@ namespace storm { template std::unique_ptr HybridDtmcPrctlModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { - return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel("")); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridDtmcPrctlHelper::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get()); } template class HybridDtmcPrctlModelChecker>; diff --git a/src/storm/modelchecker/prctl/HybridMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/HybridMdpPrctlModelChecker.cpp index 9af47c767..270e99c47 100644 --- a/src/storm/modelchecker/prctl/HybridMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/HybridMdpPrctlModelChecker.cpp @@ -19,6 +19,7 @@ #include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h" #include "storm/solver/MinMaxLinearEquationSolver.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/settings/modules/GeneralSettings.h" @@ -39,7 +40,7 @@ namespace storm { template bool HybridMdpPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true))) { + if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true))) { return true; } else { // Check whether we consider a multi-objective formula @@ -98,7 +99,8 @@ namespace storm { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - return storm::modelchecker::helper::HybridMdpPrctlHelper::computeCumulativeRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridMdpPrctlHelper::computeCumulativeRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); } template @@ -115,7 +117,8 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - return storm::modelchecker::helper::HybridMdpPrctlHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::HybridMdpPrctlHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); } template diff --git a/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp index 7c11c31b8..4a1c22ee9 100644 --- a/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp @@ -4,6 +4,7 @@ #include #include "storm/utility/macros.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" @@ -33,7 +34,7 @@ namespace storm { template bool SparseDtmcPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setConditionalProbabilityFormulasAllowed(true).setConditionalRewardFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setOnlyEventuallyFormuluasInConditionalFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true))) { + if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setConditionalProbabilityFormulasAllowed(true).setConditionalRewardFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setOnlyEventuallyFormuluasInConditionalFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true))) { return true; } else if (formula.isInFragment(storm::logic::quantiles())) { if (this->getModel().getInitialStates().getNumberOfSetBits() > 1) return false; @@ -101,6 +102,7 @@ namespace storm { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); if (rewardPathFormula.isMultiDimensional() || rewardPathFormula.getTimeBoundReference().isRewardBound()) { STORM_LOG_THROW(checkTask.isOnlyInitialStatesRelevantSet(), storm::exceptions::InvalidOperationException, "Checking non-trivial bounded until probabilities can only be computed for the initial states of the model."); + STORM_LOG_THROW(checkTask.getFormula().hasRewardAccumulation(), storm::exceptions::InvalidOperationException, "Checking reward bounded cumulative reward formulas is not supported if reward accumulations are given."); storm::logic::OperatorInformation opInfo; if (checkTask.isBoundSet()) { opInfo.bound = checkTask.getBound(); @@ -110,7 +112,8 @@ namespace storm { return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } else { STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } } @@ -128,7 +131,8 @@ namespace storm { storm::logic::EventuallyFormula const& eventuallyFormula = checkTask.getFormula(); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet(), checkTask.getHint()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet(), checkTask.getHint()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } @@ -143,7 +147,8 @@ namespace storm { template std::unique_ptr SparseDtmcPrctlModelChecker::computeTotalRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { - std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), checkTask.isQualitativeSet(), checkTask.getHint()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseDtmcPrctlHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isQualitativeSet(), checkTask.getHint()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } @@ -158,7 +163,8 @@ namespace storm { template std::unique_ptr SparseDtmcPrctlModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { - std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), nullptr); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), nullptr); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp index bd2dfd326..f12262945 100644 --- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp @@ -4,6 +4,7 @@ #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/utility/graph.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" @@ -40,7 +41,7 @@ namespace storm { template bool SparseMdpPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setConditionalProbabilityFormulasAllowed(true).setOnlyEventuallyFormuluasInConditionalFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true))) { + if (formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(true).setLongRunAverageProbabilitiesAllowed(true).setConditionalProbabilityFormulasAllowed(true).setOnlyEventuallyFormuluasInConditionalFormulasAllowed(true).setTotalRewardFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true))) { return true; } else if (formula.isInFragment(storm::logic::multiObjective().setCumulativeRewardFormulasAllowed(true).setTimeBoundedCumulativeRewardFormulasAllowed(true).setStepBoundedCumulativeRewardFormulasAllowed(true).setRewardBoundedCumulativeRewardFormulasAllowed(true).setTimeBoundedUntilFormulasAllowed(true).setStepBoundedUntilFormulasAllowed(true).setRewardBoundedUntilFormulasAllowed(true).setMultiDimensionalBoundedUntilFormulasAllowed(true).setMultiDimensionalCumulativeRewardFormulasAllowed(true))) { // Check whether we consider a multi-objective formula @@ -140,6 +141,7 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); if (rewardPathFormula.isMultiDimensional() || rewardPathFormula.getTimeBoundReference().isRewardBound()) { STORM_LOG_THROW(checkTask.isOnlyInitialStatesRelevantSet(), storm::exceptions::InvalidOperationException, "Checking reward bounded cumulative reward formulas can only be done for the initial states of the model."); + STORM_LOG_THROW(checkTask.getFormula().hasRewardAccumulation(), storm::exceptions::InvalidOperationException, "Checking reward bounded cumulative reward formulas is not supported if reward accumulations are given."); STORM_LOG_WARN_COND(!checkTask.isQualitativeSet(), "Checking reward bounded until formulas is not optimized w.r.t. qualitative queries"); storm::logic::OperatorInformation opInfo(checkTask.getOptimizationDirection()); if (checkTask.isBoundSet()) { @@ -151,7 +153,8 @@ namespace storm { return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } else { STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - std::vector numericResult = storm::modelchecker::helper::SparseMdpPrctlHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector numericResult = storm::modelchecker::helper::SparseMdpPrctlHelper::computeCumulativeRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); } } @@ -171,7 +174,8 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet(), checkTask.isProduceSchedulersSet(), checkTask.getHint()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeReachabilityRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet(), checkTask.isProduceSchedulersSet(), checkTask.getHint()); std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); if (checkTask.isProduceSchedulersSet() && ret.scheduler) { result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); @@ -196,7 +200,8 @@ namespace storm { template std::unique_ptr SparseMdpPrctlModelChecker::computeTotalRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); - auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), checkTask.isQualitativeSet(), checkTask.isProduceSchedulersSet(), checkTask.getHint()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeTotalRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isQualitativeSet(), checkTask.isProduceSchedulersSet(), checkTask.getHint()); std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); if (checkTask.isProduceSchedulersSet() && ret.scheduler) { result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); @@ -217,7 +222,8 @@ namespace storm { template std::unique_ptr SparseMdpPrctlModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); - std::vector result = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getUniqueRewardModel()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + std::vector result = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get()); return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); } diff --git a/src/storm/modelchecker/prctl/SymbolicDtmcPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SymbolicDtmcPrctlModelChecker.cpp index a6a099833..8b0040b2f 100644 --- a/src/storm/modelchecker/prctl/SymbolicDtmcPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SymbolicDtmcPrctlModelChecker.cpp @@ -5,6 +5,7 @@ #include "storm/storage/dd/Add.h" #include "storm/utility/macros.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/models/symbolic/StandardRewardModel.h" @@ -30,7 +31,7 @@ namespace storm { template bool SymbolicDtmcPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true)); + return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true)); } template @@ -79,7 +80,8 @@ namespace storm { std::unique_ptr SymbolicDtmcPrctlModelChecker::computeCumulativeRewards(Environment const& env, storm::logic::RewardMeasureType, CheckTask const& checkTask) { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - storm::dd::Add numericResult = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + storm::dd::Add numericResult = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeCumulativeRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); return std::unique_ptr>(new SymbolicQuantitativeCheckResult(this->getModel().getReachableStates(), numericResult)); } @@ -96,7 +98,8 @@ namespace storm { storm::logic::EventuallyFormula const& eventuallyFormula = checkTask.getFormula(); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - storm::dd::Add numericResult = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + storm::dd::Add numericResult = storm::modelchecker::helper::SymbolicDtmcPrctlHelper::computeReachabilityRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet()); return std::make_unique>(this->getModel().getReachableStates(), numericResult); } diff --git a/src/storm/modelchecker/prctl/SymbolicMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SymbolicMdpPrctlModelChecker.cpp index 713ce3295..a08546669 100644 --- a/src/storm/modelchecker/prctl/SymbolicMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SymbolicMdpPrctlModelChecker.cpp @@ -11,6 +11,7 @@ #include "storm/utility/macros.h" #include "storm/utility/graph.h" +#include "storm/utility/FilteredRewardModel.h" #include "storm/settings/modules/GeneralSettings.h" @@ -30,7 +31,7 @@ namespace storm { template bool SymbolicMdpPrctlModelChecker::canHandle(CheckTask const& checkTask) const { storm::logic::Formula const& formula = checkTask.getFormula(); - return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true)); + return formula.isInFragment(storm::logic::prctl().setLongRunAverageRewardFormulasAllowed(false).setTimeOperatorsAllowed(true).setReachbilityTimeFormulasAllowed(true).setRewardAccumulationAllowed(true)); } template @@ -80,7 +81,8 @@ namespace storm { storm::logic::CumulativeRewardFormula const& rewardPathFormula = checkTask.getFormula(); STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidArgumentException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(rewardPathFormula.hasIntegerBound(), storm::exceptions::InvalidPropertyException, "Formula needs to have a discrete time bound."); - return storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeCumulativeRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), rewardPathFormula.getNonStrictBound()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeCumulativeRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), rewardPathFormula.getNonStrictBound()); } template @@ -97,7 +99,8 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); std::unique_ptr subResultPointer = this->check(env, eventuallyFormula.getSubformula()); SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - return storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), checkTask.isRewardModelSet() ? this->getModel().getRewardModel(checkTask.getRewardModel()) : this->getModel().getRewardModel(""), subResult.getTruthValuesVector()); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + return storm::modelchecker::helper::SymbolicMdpPrctlHelper::computeReachabilityRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get(), subResult.getTruthValuesVector()); } template diff --git a/src/storm/utility/FilteredRewardModel.h b/src/storm/utility/FilteredRewardModel.h new file mode 100644 index 000000000..32dc13521 --- /dev/null +++ b/src/storm/utility/FilteredRewardModel.h @@ -0,0 +1,64 @@ +#pragma once + +#include + +#include "storm/utility/macros.h" +#include "storm/exceptions/NotSupportedException.h" + +namespace storm { + namespace utility { + + + /* + * This class wraps around a + */ + template + class FilteredRewardModel { + public: + FilteredRewardModel(RewardModelType const& baseRewardModel, bool enableStateRewards, bool enableStateActionRewards, bool enableTransitionRewards) { + if ((baseRewardModel.hasStateRewards() && !enableStateRewards) || (baseRewardModel.hasStateActionRewards() && !enableStateActionRewards) || (baseRewardModel.hasTransitionRewards() && !enableTransitionRewards)) { + // One of the available reward types need to be deleted. + typename std::remove_const::type>::type stateRewards; + if (enableStateRewards) { + stateRewards = baseRewardModel.getOptionalStateRewardVector(); + } + typename std::remove_const::type>::type stateActionRewards; + if (enableStateActionRewards) { + stateActionRewards = baseRewardModel.getOptionalStateActionRewardVector(); + } + typename std::remove_const::type>::type transitionRewards; + if (enableTransitionRewards) { + transitionRewards = baseRewardModel.getOptionalTransitionRewardMatrix(); + } + localRewardModel = std::unique_ptr(new RewardModelType(stateRewards, stateActionRewards, transitionRewards)); + rewardModel = localRewardModel.get(); + } else { + rewardModel = &baseRewardModel; + } + } + + RewardModelType const& get() const { + return *rewardModel; + } + private: + std::unique_ptr localRewardModel; + RewardModelType const* rewardModel; + }; + + template + FilteredRewardModel createFilteredRewardModel(ModelType const& model, CheckTaskType const& checkTask) { + auto const& baseRewardModel = checkTask.isRewardModelSet() ? model.getRewardModel(checkTask.getRewardModel()) : model.getUniqueRewardModel(); + if (checkTask.getFormula().hasRewardAccumulation()) { + auto const& acc = checkTask.getFormula().getRewardAccumulation(); + STORM_LOG_THROW(model.isDiscreteTimeModel() || !acc.isExitSet() || !baseRewardModel.hasStateRewards(), storm::exceptions::NotSupportedException, "Exit rewards for continuous time models are not supported."); + // Check which of the available reward types are allowed. + bool hasStateRewards = model.isDiscreteTimeModel() ? acc.isExitSet() : acc.isTimeSet(); + bool hasStateActionRewards = acc.isStepsSet(); + bool hasTransitionRewards = acc.isStepsSet(); + return FilteredRewardModel(baseRewardModel, hasStateRewards, hasStateActionRewards, hasTransitionRewards); + } else { + return FilteredRewardModel(baseRewardModel, true, true, true); + } + } + } +} \ No newline at end of file