From b5566fa861d8b57342101f03403135f68c942880 Mon Sep 17 00:00:00 2001 From: TimQu Date: Mon, 25 Jun 2018 11:22:02 +0200 Subject: [PATCH] more on total reward formulas for mdps --- src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.h | 1 + .../modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.h b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.h index 5ea9a010a..5667549e3 100644 --- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.h +++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.h @@ -27,6 +27,7 @@ namespace storm { virtual std::unique_ptr computeConditionalProbabilities(Environment const& env, CheckTask const& checkTask) override; virtual std::unique_ptr computeCumulativeRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) override; virtual std::unique_ptr computeInstantaneousRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) override; + virtual std::unique_ptr computeTotalRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) override; virtual std::unique_ptr computeReachabilityRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) override; virtual std::unique_ptr computeLongRunAverageProbabilities(Environment const& env, CheckTask const& checkTask) override; virtual std::unique_ptr computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) override; diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp index e2ce02120..4b29d2324 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp @@ -846,7 +846,7 @@ namespace storm { MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint) { // Reduce to reachability rewards - if (goal.minimize) { + if (goal.minimize()) { STORM_LOG_ERROR_COND(!produceScheduler, "Can not produce scheduler for this property (functionality not implemented"); // Identify the states from which no reward can be collected under some scheduler storm::storage::BitVector choicesWithoutReward = rewardModel.getChoicesWithZeroReward(transitionMatrix); @@ -883,7 +883,7 @@ namespace storm { return computeReachabilityRewardsHelper(env, std::move(goal), ecElimResult.matrix, ecElimResult.matrix.transpose(true), [&] (uint_fast64_t rowCount, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& maybeStates) { std::vector result; - std::vector oldChoiceRewards = rewardModel.getTotalRewardVector(rowCount, transitionMatrix); + std::vector oldChoiceRewards = rewardModel.getTotalRewardVector(transitionMatrix); result.reserve(rowCount); for (uint64_t newState : maybeStates) { for (uint64_t newChoice = transitionMatrix.getRowGroupIndices()[newState]; newChoice < transitionMatrix.getRowGroupIndices()[newState + 1]; ++newChoice) { @@ -904,7 +904,7 @@ namespace storm { [&] () { storm::storage::BitVector newChoicesWithoutReward(ecElimResult.matrix.getRowGroupCount(), false); for (uint64_t newChoice = 0; newChoice < ecElimResult.matrix.getRowCount(); ++newChoice) { - if (choicesWithoutReward.get(ecElimResult.newToOldChoiceMapping[newChoice])) { + if (choicesWithoutReward.get(ecElimResult.newToOldRowMapping[newChoice])) { newChoicesWithoutReward.set(newChoice); } } @@ -1761,6 +1761,7 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeInstantaneousRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepCount); template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); @@ -1771,6 +1772,7 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeInstantaneousRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepCount); template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec);