From 009cee1c258db53d3890ceb7ed57bbe1cef7179b Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 18:44:18 +0200 Subject: [PATCH 1/5] Implemented scheduler extraction for LRA properties for MDP. --- .../prctl/SparseMdpPrctlModelChecker.cpp | 16 ++- .../prctl/helper/SparseMdpPrctlHelper.cpp | 115 +++++++++++++++--- .../prctl/helper/SparseMdpPrctlHelper.h | 8 +- 3 files changed, 114 insertions(+), 25 deletions(-) diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp index 79e0a433b..11821a518 100644 --- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp @@ -215,16 +215,24 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); std::unique_ptr subResultPointer = this->check(env, stateFormula); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - std::vector numericResult = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector()); - return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet()); + std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); + if (checkTask.isProduceSchedulersSet() && ret.scheduler) { + result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); + } + return result; } template std::unique_ptr SparseMdpPrctlModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); - std::vector result = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get()); - return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet()); + std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); + if (checkTask.isProduceSchedulersSet() && ret.scheduler) { + result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); + } + return result; } template diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp index 9e036a383..bb6b77170 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp @@ -1206,7 +1206,7 @@ namespace storm { } template - std::vector SparseMdpPrctlHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates) { + MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) { // If there are no goal states, we avoid the computation and directly return zero. if (psiStates.empty()) { @@ -1223,15 +1223,20 @@ namespace storm { std::vector stateRewards(psiStates.size(), storm::utility::zero()); storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one()); storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); - return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel); + return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler); } template template - std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel) { + MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) { uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); + std::unique_ptr> scheduler; + if (produceScheduler) { + scheduler = std::make_unique>(numberOfStates); + } + // Start by decomposing the MDP into its MECs. storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); @@ -1253,7 +1258,7 @@ namespace storm { for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; - lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec); + lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler); // Gather information for later use. for (auto const& stateChoicesPair : mec) { @@ -1312,6 +1317,8 @@ namespace storm { } } + std::vector> sspMecChoicesToOriginalMap; // for scheduler extraction + // Now we are ready to construct the choices for the auxiliary states. for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; @@ -1345,6 +1352,9 @@ namespace storm { } } + if (produceScheduler) { + sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]); + } ++currentChoice; } } @@ -1353,6 +1363,10 @@ namespace storm { // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. ++currentChoice; b.push_back(lraValuesForEndComponents[mecIndex]); + if (produceScheduler) { + // Insert some invalid values + sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits::max(), std::numeric_limits::max()); + } } // Finalize the matrix and solve the corresponding system of equations. @@ -1360,7 +1374,7 @@ namespace storm { // Check for requirements of the solver. storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxLinearEquationSolverFactory; - storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction()); + storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler); requirements.clearBounds(); STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); @@ -1372,6 +1386,7 @@ namespace storm { solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end())); solver->setHasUniqueSolution(); solver->setHasNoEndComponents(); + solver->setTrackScheduler(produceScheduler); solver->setRequirementsChecked(); solver->solveEquations(underlyingSolverEnvironment, sspResult, b); @@ -1386,25 +1401,74 @@ namespace storm { result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; } - return result; + if (produceScheduler && solver->hasScheduler()) { + // Translate result for ssp matrix to original model + auto const& sspChoices = solver->getSchedulerChoices(); + uint64_t sspState = 0; + for (auto state : statesNotContainedInAnyMec) { + scheduler->setChoice(sspChoices[sspState], state); + ++sspState; + } + // The other sspStates correspond to MECS in the original system. + uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState]; + for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { + // Obtain the state and choice of the original model to which the selected choice corresponds. + auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset]; + // Check if the best choice is to stay in this MEC + if (originalStateChoice.first == std::numeric_limits::max()) { + STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC."); + // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent. + } else { + // The best choice is to leave this MEC via the selected state and choice. + scheduler->setChoice(originalStateChoice.second, originalStateChoice.first); + // The remaining states in this MEC need to reach this state with probability 1. + storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false); + exitStateAsBitVector.set(originalStateChoice.first, true); + storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false); + for (auto const& stateChoices : mecDecomposition[mecIndex]) { + if (stateChoices.first != originalStateChoice.first) { + otherStatesAsBitVector.set(stateChoices.first, true); + } + } + storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler); + } + ++sspState; + } + assert(sspState == sspMatrix.getRowGroupCount()); + } else { + STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated."); + } + + return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); } template template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { + ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { // If the mec only consists of a single state, we compute the LRA value directly if (++mec.begin() == mec.end()) { uint64_t state = mec.begin()->first; auto choiceIt = mec.begin()->second.begin(); ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); + uint_fast64_t bestChoice = *choiceIt; for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) { + ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); if (storm::solver::minimize(dir)) { - result = std::min(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); + if (result > choiceValue) { + result = std::move(choiceValue); + bestChoice = *choiceIt; + } } else { - result = std::max(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); + if (result < choiceValue) { + result = std::move(choiceValue); + bestChoice = *choiceIt; + } } } + if (scheduler) { + scheduler->setChoice(*choiceIt - transitionMatrix.getRowGroupIndices()[state], state); + } return result; } @@ -1417,10 +1481,11 @@ namespace storm { STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); method = storm::solver::LraMethod::ValueIteration; } + STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration."); if (method == storm::solver::LraMethod::LinearProgramming) { return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec); } else if (method == storm::solver::LraMethod::ValueIteration) { - return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec); + return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); } @@ -1428,7 +1493,7 @@ namespace storm { template template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { + ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { // Initialize data about the mec storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); @@ -1520,6 +1585,22 @@ namespace storm { break; } } + + if (scheduler) { + std::vector localMecChoices(mecTransitions.getRowGroupCount(), 0); + multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices); + auto localMecChoiceIt = localMecChoices.begin(); + for (auto const& mecState : mecStates) { + // Get the choice index of the selected mec choice with respect to the global transition matrix. + uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]); + for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) { + globalChoice = mecChoices.getNextSetIndex(globalChoice + 1); + } + STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state."); + scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState); + ++localMecChoiceIt; + } + } return (maxDiff + minDiff) / (storm::utility::convertNumber(2.0) * scalingFactor); } @@ -1717,9 +1798,9 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); + template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); + template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #ifdef STORM_HAVE_CARL @@ -1728,9 +1809,9 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); + template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); + template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #endif } diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h index 0be4ad060..441e6ce41 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h @@ -67,11 +67,11 @@ namespace storm { static std::vector computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative); #endif - static std::vector computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates); + static MDPSparseModelCheckingHelperReturnType computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler); template - static std::vector computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel); + static MDPSparseModelCheckingHelperReturnType computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler); static std::unique_ptr computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates); @@ -79,9 +79,9 @@ namespace storm { static MDPSparseModelCheckingHelperReturnType computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::function(uint_fast64_t, storm::storage::SparseMatrix const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function const& zeroRewardStatesGetter, std::function const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint()); template - static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); + static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template - static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); + static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); From 72425ec1b2f3da16ff7c7fd3cc3f5d404495f17c Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 18:45:01 +0200 Subject: [PATCH 2/5] CLI: Added an option to export the produced scheduler to a file. --- src/storm-cli-utilities/model-handling.h | 17 ++++++++++++++++- src/storm/api/export.h | 10 ++++++++++ src/storm/settings/modules/IOSettings.cpp | 10 ++++++++++ src/storm/settings/modules/IOSettings.h | 11 +++++++++++ 4 files changed, 47 insertions(+), 1 deletion(-) diff --git a/src/storm-cli-utilities/model-handling.h b/src/storm-cli-utilities/model-handling.h index 155707f10..fd88471c5 100644 --- a/src/storm-cli-utilities/model-handling.h +++ b/src/storm-cli-utilities/model-handling.h @@ -748,10 +748,14 @@ namespace storm { template void verifyWithSparseEngine(std::shared_ptr const& model, SymbolicInput const& input) { auto sparseModel = model->as>(); + auto const& ioSettings = storm::settings::getModule(); verifyProperties(input, - [&sparseModel] (std::shared_ptr const& formula, std::shared_ptr const& states) { + [&sparseModel,&ioSettings] (std::shared_ptr const& formula, std::shared_ptr const& states) { bool filterForInitialStates = states->isInitialFormula(); auto task = storm::api::createTask(formula, filterForInitialStates); + if (ioSettings.isExportSchedulerSet()) { + task.setProduceSchedulers(true); + } std::unique_ptr result = storm::api::verifyWithSparseEngine(sparseModel, task); std::unique_ptr filter; @@ -764,6 +768,17 @@ namespace storm { result->filter(filter->asQualitativeCheckResult()); } return result; + }, + [&sparseModel,&ioSettings] (std::unique_ptr const& result) { + if (ioSettings.isExportSchedulerSet()) { + if (result->template asExplicitQuantitativeCheckResult().hasScheduler()) { + auto const& scheduler = result->template asExplicitQuantitativeCheckResult().getScheduler(); + STORM_PRINT_AND_LOG("Exporting scheduler to '" << ioSettings.getExportSchedulerFilename()) + storm::api::exportScheduler(sparseModel, scheduler, ioSettings.getExportSchedulerFilename()); + } else { + STORM_LOG_ERROR("Scheduler requested but could not be generated."); + } + } }); } diff --git a/src/storm/api/export.h b/src/storm/api/export.h index f1342ef5a..f3064b538 100644 --- a/src/storm/api/export.h +++ b/src/storm/api/export.h @@ -6,6 +6,7 @@ #include "storm/utility/DDEncodingExporter.h" #include "storm/utility/file.h" #include "storm/utility/macros.h" +#include "storm/storage/Scheduler.h" namespace storm { @@ -42,5 +43,14 @@ namespace storm { void exportSymbolicModelAsDot(std::shared_ptr> const& model, std::string const& filename) { model->writeDotToFile(filename); } + + template + void exportScheduler(std::shared_ptr> const& model, storm::storage::Scheduler const& scheduler, std::string const& filename) { + std::ofstream stream; + storm::utility::openFile(filename, stream); + scheduler.printToStream(stream, model); + storm::utility::closeFile(stream); + } + } } diff --git a/src/storm/settings/modules/IOSettings.cpp b/src/storm/settings/modules/IOSettings.cpp index 43bf02306..04c7d4256 100644 --- a/src/storm/settings/modules/IOSettings.cpp +++ b/src/storm/settings/modules/IOSettings.cpp @@ -23,6 +23,7 @@ namespace storm { const std::string IOSettings::exportJaniDotOptionName = "exportjanidot"; const std::string IOSettings::exportCdfOptionName = "exportcdf"; const std::string IOSettings::exportCdfOptionShortName = "cdf"; + const std::string IOSettings::exportSchedulerOptionName = "exportscheduler"; const std::string IOSettings::explicitOptionName = "explicit"; const std::string IOSettings::explicitOptionShortName = "exp"; const std::string IOSettings::explicitDrnOptionName = "explicit-drn"; @@ -55,6 +56,7 @@ namespace storm { this->addOption(storm::settings::OptionBuilder(moduleName, exportJaniDotOptionName, "", "If given, the loaded jani model will be written to the specified file in the dot format.").setIsAdvanced() .addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "The name of the file to which the model is to be written.").build()).build()); this->addOption(storm::settings::OptionBuilder(moduleName, exportCdfOptionName, false, "Exports the cumulative density function for reward bounded properties into a .csv file.").setIsAdvanced().setShortName(exportCdfOptionShortName).addArgument(storm::settings::ArgumentBuilder::createStringArgument("directory", "A path to an existing directory where the cdf files will be stored.").build()).build()); + this->addOption(storm::settings::OptionBuilder(moduleName, exportSchedulerOptionName, false, "Exports the choices of an optimal scheduler to the given file (if supported by engine).").setIsAdvanced().addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "The output file.").build()).build()); this->addOption(storm::settings::OptionBuilder(moduleName, exportExplicitOptionName, "", "If given, the loaded model will be written to the specified file in the drn format.") .addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "the name of the file to which the model is to be writen.").build()).build()); this->addOption(storm::settings::OptionBuilder(moduleName, exportDdOptionName, "", "If given, the loaded model will be written to the specified file in the drdd format.") @@ -148,6 +150,14 @@ namespace storm { return result; } + bool IOSettings::isExportSchedulerSet() const { + return this->getOption(exportSchedulerOptionName).getHasOptionBeenSet(); + } + + std::string IOSettings::getExportSchedulerFilename() const { + return this->getOption(exportSchedulerOptionName).getArgumentByName("filename").getValueAsString(); + } + bool IOSettings::isExplicitSet() const { return this->getOption(explicitOptionName).getHasOptionBeenSet(); } diff --git a/src/storm/settings/modules/IOSettings.h b/src/storm/settings/modules/IOSettings.h index e4ff6adf2..96b419b67 100644 --- a/src/storm/settings/modules/IOSettings.h +++ b/src/storm/settings/modules/IOSettings.h @@ -89,6 +89,16 @@ namespace storm { */ std::string getExportCdfDirectory() const; + /*! + * Retrieves whether an optimal scheduler is to be exported + */ + bool isExportSchedulerSet() const; + + /*! + * Retrieves a filename to which an optimal scheduler will be exported. + */ + std::string getExportSchedulerFilename() const; + /*! * Retrieves whether the explicit option was set. * @@ -325,6 +335,7 @@ namespace storm { static const std::string exportDdOptionName; static const std::string exportCdfOptionName; static const std::string exportCdfOptionShortName; + static const std::string exportSchedulerOptionName; static const std::string explicitOptionName; static const std::string explicitOptionShortName; static const std::string explicitDrnOptionName; From 8a23197a77f62b7e3139541ea7fa3ce10205007f Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 19:02:36 +0200 Subject: [PATCH 3/5] Fix for LRA scheduler generation. --- src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp index bb6b77170..f970244d1 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp @@ -1467,7 +1467,7 @@ namespace storm { } } if (scheduler) { - scheduler->setChoice(*choiceIt - transitionMatrix.getRowGroupIndices()[state], state); + scheduler->setChoice(bestChoice - transitionMatrix.getRowGroupIndices()[state], state); } return result; } From a47945a93184893d6c3f81acfa6b2a269f072294 Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 19:03:02 +0200 Subject: [PATCH 4/5] Cleaner output when exporting schedulers --- src/storm-cli-utilities/model-handling.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/storm-cli-utilities/model-handling.h b/src/storm-cli-utilities/model-handling.h index fd88471c5..f4f133b8a 100644 --- a/src/storm-cli-utilities/model-handling.h +++ b/src/storm-cli-utilities/model-handling.h @@ -773,7 +773,7 @@ namespace storm { if (ioSettings.isExportSchedulerSet()) { if (result->template asExplicitQuantitativeCheckResult().hasScheduler()) { auto const& scheduler = result->template asExplicitQuantitativeCheckResult().getScheduler(); - STORM_PRINT_AND_LOG("Exporting scheduler to '" << ioSettings.getExportSchedulerFilename()) + STORM_PRINT_AND_LOG("Exporting scheduler ... ") storm::api::exportScheduler(sparseModel, scheduler, ioSettings.getExportSchedulerFilename()); } else { STORM_LOG_ERROR("Scheduler requested but could not be generated."); From b896726c4a5d7bd751a35c23dab52c33bf91a98f Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 19:03:20 +0200 Subject: [PATCH 5/5] Include choice labels in exported scheduler. --- src/storm/storage/Scheduler.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/storm/storage/Scheduler.cpp b/src/storm/storage/Scheduler.cpp index ed118b4ca..fa40732c6 100644 --- a/src/storm/storage/Scheduler.cpp +++ b/src/storm/storage/Scheduler.cpp @@ -3,6 +3,7 @@ #include "storm/utility/macros.h" #include "storm/exceptions/NotImplementedException.h" +#include namespace storm { namespace storage { @@ -125,6 +126,7 @@ namespace storm { STORM_LOG_THROW(model == nullptr || model->getNumberOfStates() == schedulerChoices.front().size(), storm::exceptions::InvalidOperationException, "The given model is not compatible with this scheduler."); bool const stateValuationsGiven = model != nullptr && model->hasStateValuations(); + bool const choiceLabelsGiven = model != nullptr && model->hasChoiceLabeling(); bool const choiceOriginsGiven = model != nullptr && model->hasChoiceOrigins(); uint_fast64_t widthOfStates = std::to_string(schedulerChoices.front().size()).length(); if (stateValuationsGiven) { @@ -181,6 +183,10 @@ namespace storm { } else { out << choice.getDeterministicChoice(); } + if (choiceLabelsGiven) { + auto choiceLabels = model->getChoiceLabeling().getLabelsOfChoice(model->getTransitionMatrix().getRowGroupIndices()[state] + choice.getDeterministicChoice()); + out << " {" << boost::join(choiceLabels, ", ") << "}"; + } } else { bool firstChoice = true; for (auto const& choiceProbPair : choice.getChoiceAsDistribution()) { @@ -195,6 +201,10 @@ namespace storm { } else { out << choiceProbPair.first; } + if (choiceLabelsGiven) { + auto choiceLabels = model->getChoiceLabeling().getLabelsOfChoice(model->getTransitionMatrix().getRowGroupIndices()[state] + choice.getDeterministicChoice()); + out << " {" << boost::join(choiceLabels, ", ") << "}"; + } out << ")"; } }