Browse Source

Using the new helper for MDP LRA properties.

tempestpy_adaptions
Tim Quatmann 4 years ago
parent
commit
3c84e68216
  1. 23
      src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

23
src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

@ -15,6 +15,7 @@
#include "storm/models/sparse/StandardRewardModel.h" #include "storm/models/sparse/StandardRewardModel.h"
#include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h" #include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h" #include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
#include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h" #include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
@ -224,10 +225,13 @@ namespace storm {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula); std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
helper.setOptimizationDirection(checkTask.getOptimizationDirection());
helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
} }
return result; return result;
} }
@ -236,10 +240,13 @@ namespace storm {
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) { std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
helper.setOptimizationDirection(checkTask.getOptimizationDirection());
helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
} }
return result; return result;
} }

Loading…
Cancel
Save