From 009cee1c258db53d3890ceb7ed57bbe1cef7179b Mon Sep 17 00:00:00 2001 From: Tim Quatmann Date: Wed, 31 Jul 2019 18:44:18 +0200 Subject: [PATCH] Implemented scheduler extraction for LRA properties for MDP. --- .../prctl/SparseMdpPrctlModelChecker.cpp | 16 ++- .../prctl/helper/SparseMdpPrctlHelper.cpp | 115 +++++++++++++++--- .../prctl/helper/SparseMdpPrctlHelper.h | 8 +- 3 files changed, 114 insertions(+), 25 deletions(-) diff --git a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp index 79e0a433b..11821a518 100644 --- a/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp +++ b/src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp @@ -215,16 +215,24 @@ namespace storm { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); std::unique_ptr subResultPointer = this->check(env, stateFormula); ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult(); - std::vector numericResult = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector()); - return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(numericResult))); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet()); + std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); + if (checkTask.isProduceSchedulersSet() && ret.scheduler) { + result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); + } + return result; } template std::unique_ptr SparseMdpPrctlModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); - std::vector result = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get()); - return std::unique_ptr(new ExplicitQuantitativeCheckResult(std::move(result))); + auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet()); + std::unique_ptr result(new ExplicitQuantitativeCheckResult(std::move(ret.values))); + if (checkTask.isProduceSchedulersSet() && ret.scheduler) { + result->asExplicitQuantitativeCheckResult().setScheduler(std::move(ret.scheduler)); + } + return result; } template diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp index 9e036a383..bb6b77170 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp @@ -1206,7 +1206,7 @@ namespace storm { } template - std::vector SparseMdpPrctlHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates) { + MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) { // If there are no goal states, we avoid the computation and directly return zero. if (psiStates.empty()) { @@ -1223,15 +1223,20 @@ namespace storm { std::vector stateRewards(psiStates.size(), storm::utility::zero()); storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one()); storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); - return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel); + return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler); } template template - std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel) { + MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) { uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); + std::unique_ptr> scheduler; + if (produceScheduler) { + scheduler = std::make_unique>(numberOfStates); + } + // Start by decomposing the MDP into its MECs. storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); @@ -1253,7 +1258,7 @@ namespace storm { for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; - lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec); + lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler); // Gather information for later use. for (auto const& stateChoicesPair : mec) { @@ -1312,6 +1317,8 @@ namespace storm { } } + std::vector> sspMecChoicesToOriginalMap; // for scheduler extraction + // Now we are ready to construct the choices for the auxiliary states. for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; @@ -1345,6 +1352,9 @@ namespace storm { } } + if (produceScheduler) { + sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]); + } ++currentChoice; } } @@ -1353,6 +1363,10 @@ namespace storm { // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. ++currentChoice; b.push_back(lraValuesForEndComponents[mecIndex]); + if (produceScheduler) { + // Insert some invalid values + sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits::max(), std::numeric_limits::max()); + } } // Finalize the matrix and solve the corresponding system of equations. @@ -1360,7 +1374,7 @@ namespace storm { // Check for requirements of the solver. storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxLinearEquationSolverFactory; - storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction()); + storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler); requirements.clearBounds(); STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); @@ -1372,6 +1386,7 @@ namespace storm { solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end())); solver->setHasUniqueSolution(); solver->setHasNoEndComponents(); + solver->setTrackScheduler(produceScheduler); solver->setRequirementsChecked(); solver->solveEquations(underlyingSolverEnvironment, sspResult, b); @@ -1386,25 +1401,74 @@ namespace storm { result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; } - return result; + if (produceScheduler && solver->hasScheduler()) { + // Translate result for ssp matrix to original model + auto const& sspChoices = solver->getSchedulerChoices(); + uint64_t sspState = 0; + for (auto state : statesNotContainedInAnyMec) { + scheduler->setChoice(sspChoices[sspState], state); + ++sspState; + } + // The other sspStates correspond to MECS in the original system. + uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState]; + for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { + // Obtain the state and choice of the original model to which the selected choice corresponds. + auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset]; + // Check if the best choice is to stay in this MEC + if (originalStateChoice.first == std::numeric_limits::max()) { + STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC."); + // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent. + } else { + // The best choice is to leave this MEC via the selected state and choice. + scheduler->setChoice(originalStateChoice.second, originalStateChoice.first); + // The remaining states in this MEC need to reach this state with probability 1. + storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false); + exitStateAsBitVector.set(originalStateChoice.first, true); + storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false); + for (auto const& stateChoices : mecDecomposition[mecIndex]) { + if (stateChoices.first != originalStateChoice.first) { + otherStatesAsBitVector.set(stateChoices.first, true); + } + } + storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler); + } + ++sspState; + } + assert(sspState == sspMatrix.getRowGroupCount()); + } else { + STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated."); + } + + return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); } template template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { + ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { // If the mec only consists of a single state, we compute the LRA value directly if (++mec.begin() == mec.end()) { uint64_t state = mec.begin()->first; auto choiceIt = mec.begin()->second.begin(); ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); + uint_fast64_t bestChoice = *choiceIt; for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) { + ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); if (storm::solver::minimize(dir)) { - result = std::min(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); + if (result > choiceValue) { + result = std::move(choiceValue); + bestChoice = *choiceIt; + } } else { - result = std::max(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); + if (result < choiceValue) { + result = std::move(choiceValue); + bestChoice = *choiceIt; + } } } + if (scheduler) { + scheduler->setChoice(*choiceIt - transitionMatrix.getRowGroupIndices()[state], state); + } return result; } @@ -1417,10 +1481,11 @@ namespace storm { STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); method = storm::solver::LraMethod::ValueIteration; } + STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration."); if (method == storm::solver::LraMethod::LinearProgramming) { return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec); } else if (method == storm::solver::LraMethod::ValueIteration) { - return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec); + return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); } @@ -1428,7 +1493,7 @@ namespace storm { template template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { + ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { // Initialize data about the mec storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); @@ -1520,6 +1585,22 @@ namespace storm { break; } } + + if (scheduler) { + std::vector localMecChoices(mecTransitions.getRowGroupCount(), 0); + multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices); + auto localMecChoiceIt = localMecChoices.begin(); + for (auto const& mecState : mecStates) { + // Get the choice index of the selected mec choice with respect to the global transition matrix. + uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]); + for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) { + globalChoice = mecChoices.getNextSetIndex(globalChoice + 1); + } + STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state."); + scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState); + ++localMecChoiceIt; + } + } return (maxDiff + minDiff) / (storm::utility::convertNumber(2.0) * scalingFactor); } @@ -1717,9 +1798,9 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); + template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); + template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #ifdef STORM_HAVE_CARL @@ -1728,9 +1809,9 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); + template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); + template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); + template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #endif } diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h index 0be4ad060..441e6ce41 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h @@ -67,11 +67,11 @@ namespace storm { static std::vector computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative); #endif - static std::vector computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates); + static MDPSparseModelCheckingHelperReturnType computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler); template - static std::vector computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel); + static MDPSparseModelCheckingHelperReturnType computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler); static std::unique_ptr computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates); @@ -79,9 +79,9 @@ namespace storm { static MDPSparseModelCheckingHelperReturnType computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::function(uint_fast64_t, storm::storage::SparseMatrix const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function const& zeroRewardStatesGetter, std::function const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint()); template - static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); + static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template - static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); + static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); template static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);