diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp index 3b69fa62d..5282f9e60 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp @@ -39,7 +39,6 @@ #include "storm/transformer/EndComponentEliminator.h" #include "storm/environment/solver/MinMaxSolverEnvironment.h" -#include "storm/environment/solver/LongRunAverageSolverEnvironment.h" #include "storm/exceptions/InvalidStateException.h" #include "storm/exceptions/InvalidPropertyException.h" @@ -1208,464 +1207,6 @@ namespace storm { return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); } - template - MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) { - - // If there are no goal states, we avoid the computation and directly return zero. - if (psiStates.empty()) { - return std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); - } - - // Likewise, if all bits are set, we can avoid the computation and set. - if (psiStates.full()) { - return std::vector(transitionMatrix.getRowGroupCount(), storm::utility::one()); - } - - // Reduce long run average probabilities to long run average rewards by - // building a reward model assigning one reward to every psi state - std::vector stateRewards(psiStates.size(), storm::utility::zero()); - storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one()); - storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); - return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler); - } - - template - template - MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) { - - uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); - - std::unique_ptr> scheduler; - if (produceScheduler) { - scheduler = std::make_unique>(numberOfStates); - } - - // Start by decomposing the MDP into its MECs. - storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); - - // Get some data members for convenience. - std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); - ValueType zero = storm::utility::zero(); - - //first calculate LRA for the Maximal End Components. - storm::storage::BitVector statesInMecs(numberOfStates); - std::vector stateToMecIndexMap(transitionMatrix.getColumnCount()); - std::vector lraValuesForEndComponents(mecDecomposition.size(), zero); - - auto underlyingSolverEnvironment = env; - if (env.solver().isForceSoundness()) { - // For sound computations, the error in the MECS plus the error in the remaining system should be less than the user defined precsion. - underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber(2)); - underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber(2)); - underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion()); - } - - for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; - - lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler); - - // Gather information for later use. - for (auto const& stateChoicesPair : mec) { - statesInMecs.set(stateChoicesPair.first); - stateToMecIndexMap[stateChoicesPair.first] = currentMecIndex; - } - } - - // For fast transition rewriting, we build some auxiliary data structures. - storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; - uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); - uint_fast64_t lastStateNotInMecs = 0; - uint_fast64_t numberOfStatesNotInMecs = 0; - std::vector statesNotInMecsBeforeIndex; - statesNotInMecsBeforeIndex.reserve(numberOfStates); - for (auto state : statesNotContainedInAnyMec) { - while (lastStateNotInMecs <= state) { - statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); - ++lastStateNotInMecs; - } - ++numberOfStatesNotInMecs; - } - - // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. - std::vector b; - uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size(); - - typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, numberOfSspStates, 0, false, true, numberOfSspStates); - - // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). - uint_fast64_t currentChoice = 0; - for (auto state : statesNotContainedInAnyMec) { - sspMatrixBuilder.newRowGroup(currentChoice); - - for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - b.push_back(storm::utility::zero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.getColumn())) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { - if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[mecIndex])) { - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); - } - } - } - } - - std::vector> sspMecChoicesToOriginalMap; // for scheduler extraction - - // Now we are ready to construct the choices for the auxiliary states. - for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; - sspMatrixBuilder.newRowGroup(currentChoice); - - for (auto const& stateChoicesPair : mec) { - uint_fast64_t state = stateChoicesPair.first; - storm::storage::FlatSet const& choicesInMec = stateChoicesPair.second; - - for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { - // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. - if (choicesInMec.find(choice) == choicesInMec.end()) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - b.push_back(storm::utility::zero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.getColumn())) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { - if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[targetMecIndex])) { - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); - } - } - - if (produceScheduler) { - sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]); - } - ++currentChoice; - } - } - } - - // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. - ++currentChoice; - b.push_back(lraValuesForEndComponents[mecIndex]); - if (produceScheduler) { - // Insert some invalid values - sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits::max(), std::numeric_limits::max()); - } - } - - // Finalize the matrix and solve the corresponding system of equations. - storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates); - - // Check for requirements of the solver. - storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxLinearEquationSolverFactory; - storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler); - requirements.clearBounds(); - STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); - - - std::vector sspResult(numberOfSspStates); - goal.restrictRelevantValues(statesNotContainedInAnyMec); - std::unique_ptr> solver = storm::solver::configureMinMaxLinearEquationSolver(underlyingSolverEnvironment, std::move(goal), minMaxLinearEquationSolverFactory, sspMatrix); - solver->setLowerBound(storm::utility::zero()); - solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end())); - solver->setHasUniqueSolution(); - solver->setHasNoEndComponents(); - solver->setTrackScheduler(produceScheduler); - solver->setRequirementsChecked(); - solver->solveEquations(underlyingSolverEnvironment, sspResult, b); - - // Prepare result vector. - std::vector result(numberOfStates, zero); - - // Set the values for states not contained in MECs. - storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, sspResult); - - // Set the values for all states in MECs. - for (auto state : statesInMecs) { - result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; - } - - if (produceScheduler && solver->hasScheduler()) { - // Translate result for ssp matrix to original model - auto const& sspChoices = solver->getSchedulerChoices(); - uint64_t sspState = 0; - for (auto state : statesNotContainedInAnyMec) { - scheduler->setChoice(sspChoices[sspState], state); - ++sspState; - } - // The other sspStates correspond to MECS in the original system. - uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState]; - for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { - // Obtain the state and choice of the original model to which the selected choice corresponds. - auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset]; - // Check if the best choice is to stay in this MEC - if (originalStateChoice.first == std::numeric_limits::max()) { - STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC."); - // In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent. - } else { - // The best choice is to leave this MEC via the selected state and choice. - scheduler->setChoice(originalStateChoice.second, originalStateChoice.first); - // The remaining states in this MEC need to reach this state with probability 1. - storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false); - exitStateAsBitVector.set(originalStateChoice.first, true); - storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false); - for (auto const& stateChoices : mecDecomposition[mecIndex]) { - if (stateChoices.first != originalStateChoice.first) { - otherStatesAsBitVector.set(stateChoices.first, true); - } - } - storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler); - } - ++sspState; - } - assert(sspState == sspMatrix.getRowGroupCount()); - } else { - STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated."); - } - - return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); - } - - template - template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { - - // If the mec only consists of a single state, we compute the LRA value directly - if (++mec.begin() == mec.end()) { - uint64_t state = mec.begin()->first; - auto choiceIt = mec.begin()->second.begin(); - ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); - uint_fast64_t bestChoice = *choiceIt; - for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) { - ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); - if (storm::solver::minimize(dir)) { - if (result > choiceValue) { - result = std::move(choiceValue); - bestChoice = *choiceIt; - } - } else { - if (result < choiceValue) { - result = std::move(choiceValue); - bestChoice = *choiceIt; - } - } - } - if (scheduler) { - scheduler->setChoice(bestChoice - transitionMatrix.getRowGroupIndices()[state], state); - } - return result; - } - - // Solve MEC with the method specified in the settings - storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod(); - if ((storm::NumberTraits::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) { - STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method."); - method = storm::solver::LraMethod::LinearProgramming; - } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) { - STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); - method = storm::solver::LraMethod::ValueIteration; - } - STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration."); - if (method == storm::solver::LraMethod::LinearProgramming) { - return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec); - } else if (method == storm::solver::LraMethod::ValueIteration) { - return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler); - } else { - STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); - } - } - - template - template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler) { - - // Initialize data about the mec - storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); - storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false); - for (auto const& stateChoicesPair : mec) { - mecStates.set(stateChoicesPair.first); - for (auto const& choice : stateChoicesPair.second) { - mecChoices.set(choice); - } - } - - boost::container::flat_map toSubModelStateMapping; - uint64_t currState = 0; - toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits()); - for (auto const& mecState : mecStates) { - toSubModelStateMapping.insert(std::pair(mecState, currState)); - ++currState; - } - - // Get a transition matrix that only considers the states and choices within the MEC - storm::storage::SparseMatrixBuilder mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits()); - std::vector choiceRewards; - choiceRewards.reserve(mecChoices.getNumberOfSetBits()); - uint64_t currRow = 0; - ValueType selfLoopProb = storm::utility::convertNumber(env.solver().lra().getAperiodicFactor()); - ValueType scalingFactor = storm::utility::one() - selfLoopProb; - for (auto const& mecState : mecStates) { - mecTransitionBuilder.newRowGroup(currRow); - uint64_t groupStart = transitionMatrix.getRowGroupIndices()[mecState]; - uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[mecState + 1]; - for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) { - bool insertedDiagElement = false; - for (auto const& entry : transitionMatrix.getRow(choice)) { - uint64_t column = toSubModelStateMapping[entry.getColumn()]; - if (!insertedDiagElement && entry.getColumn() > mecState) { - mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb); - insertedDiagElement = true; - } - if (!insertedDiagElement && entry.getColumn() == mecState) { - mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue()); - insertedDiagElement = true; - } else { - mecTransitionBuilder.addNextValue(currRow, column, scalingFactor * entry.getValue()); - } - } - if (!insertedDiagElement) { - mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb); - } - - // Compute the rewards obtained for this choice - choiceRewards.push_back(scalingFactor * rewardModel.getTotalStateActionReward(mecState, choice, transitionMatrix)); - - ++currRow; - } - } - auto mecTransitions = mecTransitionBuilder.build(); - STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic."); - - // start the iterations - ValueType precision = storm::utility::convertNumber(env.solver().lra().getPrecision()) / scalingFactor; - bool relative = env.solver().lra().getRelativeTerminationCriterion(); - std::vector x(mecTransitions.getRowGroupCount(), storm::utility::zero()); - std::vector xPrime = x; - - auto multiplier = storm::solver::MultiplierFactory().create(env, mecTransitions); - ValueType maxDiff, minDiff; - - uint64_t iter = 0; - boost::optional maxIter; - if (env.solver().lra().isMaximalIterationCountSet()) { - maxIter = env.solver().lra().getMaximalIterationCount(); - } - while (!maxIter.is_initialized() || iter < maxIter.get()) { - ++iter; - // Compute the obtained rewards for the next step - multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x); - - // update xPrime and check for convergence - // to avoid large (and numerically unstable) x-values, we substract a reference value. - auto xIt = x.begin(); - auto xPrimeIt = xPrime.begin(); - ValueType refVal = *xIt; - maxDiff = *xIt - *xPrimeIt; - minDiff = maxDiff; - *xIt -= refVal; - *xPrimeIt = *xIt; - for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) { - ValueType diff = *xIt - *xPrimeIt; - maxDiff = std::max(maxDiff, diff); - minDiff = std::min(minDiff, diff); - *xIt -= refVal; - *xPrimeIt = *xIt; - } - - if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) { - break; - } - if (storm::utility::resources::isTerminate()) { - break; - } - } - if (maxIter.is_initialized() && iter == maxIter.get()) { - STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations."); - } else { - STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations."); - } - - if (scheduler) { - std::vector localMecChoices(mecTransitions.getRowGroupCount(), 0); - multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices); - auto localMecChoiceIt = localMecChoices.begin(); - for (auto const& mecState : mecStates) { - // Get the choice index of the selected mec choice with respect to the global transition matrix. - uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]); - for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) { - globalChoice = mecChoices.getNextSetIndex(globalChoice + 1); - } - STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state."); - scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState); - ++localMecChoiceIt; - } - } - return (maxDiff + minDiff) / (storm::utility::convertNumber(2.0) * scalingFactor); - } - - template - template - ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { - std::shared_ptr> solver = storm::utility::solver::getLpSolver("LRA for MEC"); - solver->setOptimizationDirection(invert(dir)); - - // First, we need to create the variables for the problem. - std::map stateToVariableMap; - for (auto const& stateChoicesPair : mec) { - std::string variableName = "h" + std::to_string(stateChoicesPair.first); - stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName); - } - storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1); - solver->update(); - - // Now we encode the problem as constraints. - for (auto const& stateChoicesPair : mec) { - uint_fast64_t state = stateChoicesPair.first; - - // Now, based on the type of the state, create a suitable constraint. - for (auto choice : stateChoicesPair.second) { - storm::expressions::Expression constraint = -lambda; - - for (auto element : transitionMatrix.getRow(choice)) { - constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue()); - } - typename RewardModelType::ValueType r = rewardModel.getTotalStateActionReward(state, choice, transitionMatrix); - constraint = solver->getConstant(r) + constraint; - - if (dir == OptimizationDirection::Minimize) { - constraint = stateToVariableMap.at(state) <= constraint; - } else { - constraint = stateToVariableMap.at(state) >= constraint; - } - solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint); - } - } - - solver->optimize(); - return solver->getContinuousValue(lambda); - } - template std::unique_ptr SparseMdpPrctlHelper::computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates) { @@ -1818,10 +1359,6 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #ifdef STORM_HAVE_CARL template class SparseMdpPrctlHelper; @@ -1829,10 +1366,6 @@ namespace storm { template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint); - template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #endif } } diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h index 441e6ce41..57cb1ad4d 100644 --- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h +++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h @@ -67,24 +67,11 @@ namespace storm { static std::vector computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative); #endif - static MDPSparseModelCheckingHelperReturnType computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler); - - - template - static MDPSparseModelCheckingHelperReturnType computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler); - static std::unique_ptr computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates); private: static MDPSparseModelCheckingHelperReturnType computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::function(uint_fast64_t, storm::storage::SparseMatrix const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function const& zeroRewardStatesGetter, std::function const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint()); - template - static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template - static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr>& scheduler); - template - static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); - }; }