#include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h" #include #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" #include "storm/modelchecker/hints/ExplicitModelCheckerHint.h" #include "storm/models/sparse/StandardRewardModel.h" #include "storm/storage/MaximalEndComponentDecomposition.h" #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/utility/graph.h" #include "storm/storage/expressions/Variable.h" #include "storm/storage/expressions/Expression.h" #include "storm/storage/Scheduler.h" #include "storm/solver/MinMaxLinearEquationSolver.h" #include "storm/solver/LpSolver.h" #include "storm/settings/SettingsManager.h" #include "storm/settings/modules/MinMaxEquationSolverSettings.h" #include "storm/exceptions/InvalidStateException.h" #include "storm/exceptions/InvalidPropertyException.h" #include "storm/exceptions/InvalidSettingsException.h" #include "storm/exceptions/IllegalFunctionCallException.h" #include "storm/exceptions/IllegalArgumentException.h" namespace storm { namespace modelchecker { namespace helper { template std::vector SparseMdpPrctlHelper::computeBoundedUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, uint_fast64_t stepBound, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { std::vector result(transitionMatrix.getRowGroupCount(), storm::utility::zero()); // Determine the states that have 0 probability of reaching the target states. storm::storage::BitVector maybeStates; if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint().getComputeOnlyMaybeStates()) { maybeStates = hint.template asExplicitModelCheckerHint().getMaybeStates(); } else { if (dir == OptimizationDirection::Minimize) { maybeStates = storm::utility::graph::performProbGreater0A(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates, true, stepBound); } else { maybeStates = storm::utility::graph::performProbGreater0E(backwardTransitions, phiStates, psiStates, true, stepBound); } maybeStates &= ~psiStates; STORM_LOG_INFO("Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); } if (!maybeStates.empty()) { // We can eliminate the rows and columns from the original transition probability matrix that have probability 0. storm::storage::SparseMatrix submatrix = transitionMatrix.getSubmatrix(true, maybeStates, maybeStates, false); std::vector b = transitionMatrix.getConstrainedRowGroupSumVector(maybeStates, psiStates); // Create the vector with which to multiply. std::vector subresult(maybeStates.getNumberOfSetBits()); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(std::move(submatrix)); solver->repeatedMultiply(dir, subresult, &b, stepBound); // Set the values of the resulting vector accordingly. storm::utility::vector::setVectorValues(result, maybeStates, subresult); } storm::utility::vector::setVectorValues(result, psiStates, storm::utility::one()); return result; } template std::vector SparseMdpPrctlHelper::computeNextProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& nextStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Create the vector with which to multiply and initialize it correctly. std::vector result(transitionMatrix.getRowGroupCount()); storm::utility::vector::setVectorValues(result, nextStates, storm::utility::one()); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(transitionMatrix); solver->repeatedMultiply(dir, result, nullptr, 1); return result; } template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeUntilProbabilities(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { STORM_LOG_THROW(!(qualitative && produceScheduler), storm::exceptions::InvalidSettingsException, "Cannot produce scheduler when performing qualitative model checking only."); std::vector result(transitionMatrix.getRowGroupCount(), storm::utility::zero()); // We need to identify the maybe states (states which have a probability for satisfying the until formula // that is strictly between 0 and 1) and the states that satisfy the formula with probablity 1 and 0, respectively. storm::storage::BitVector maybeStates, statesWithProbability1, statesWithProbability0; if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint().getComputeOnlyMaybeStates()) { maybeStates = hint.template asExplicitModelCheckerHint().getMaybeStates(); // Treat the states with probability one std::vector const& resultsForNonMaybeStates = hint.template asExplicitModelCheckerHint().getResultHint(); statesWithProbability1 = storm::storage::BitVector(maybeStates.size(), false); statesWithProbability0 = storm::storage::BitVector(maybeStates.size(), false); storm::storage::BitVector nonMaybeStates = ~maybeStates; for (auto const& state : nonMaybeStates) { if (storm::utility::isOne(resultsForNonMaybeStates[state])) { statesWithProbability1.set(state, true); result[state] = storm::utility::one(); } else { STORM_LOG_THROW(storm::utility::isZero(resultsForNonMaybeStates[state]), storm::exceptions::IllegalArgumentException, "Expected that the result hint specifies probabilities in {0,1} for non-maybe states"); statesWithProbability0.set(state, true); } } } else { // Get all states that have probability 0 and 1 of satisfying the until-formula. std::pair statesWithProbability01; if (goal.minimize()) { statesWithProbability01 = storm::utility::graph::performProb01Min(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); } else { statesWithProbability01 = storm::utility::graph::performProb01Max(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, phiStates, psiStates); } statesWithProbability0 = std::move(statesWithProbability01.first); statesWithProbability1 = std::move(statesWithProbability01.second); maybeStates = ~(statesWithProbability0 | statesWithProbability1); STORM_LOG_INFO("Found " << statesWithProbability0.getNumberOfSetBits() << " 'no' states."); STORM_LOG_INFO("Found " << statesWithProbability1.getNumberOfSetBits() << " 'yes' states."); STORM_LOG_INFO("Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); // Set values of resulting vector that are known exactly. storm::utility::vector::setVectorValues(result, statesWithProbability0, storm::utility::zero()); storm::utility::vector::setVectorValues(result, statesWithProbability1, storm::utility::one()); } // If requested, we will produce a scheduler. std::unique_ptr> scheduler; if (produceScheduler) { scheduler = std::make_unique>(transitionMatrix.getRowGroupCount()); } // Check whether we need to compute exact probabilities for some states. if (qualitative) { // Set the values for all maybe-states to 0.5 to indicate that their probability values are neither 0 nor 1. storm::utility::vector::setVectorValues(result, maybeStates, storm::utility::convertNumber(0.5)); } else { if (!maybeStates.empty()) { // In this case we have have to compute the probabilities. // First, we can eliminate the rows and columns from the original transition probability matrix for states // whose probabilities are already known. storm::storage::SparseMatrix submatrix = transitionMatrix.getSubmatrix(true, maybeStates, maybeStates, false); // Prepare the right-hand side of the equation system. For entry i this corresponds to // the accumulated probability of going from state i to some 'yes' state. std::vector b = transitionMatrix.getConstrainedRowGroupSumVector(maybeStates, statesWithProbability1); // obtain hint information if possible bool skipEcWithinMaybeStatesCheck = goal.minimize() || (hint.isExplicitModelCheckerHint() && hint.asExplicitModelCheckerHint().getNoEndComponentsInMaybeStates()); std::pair>, boost::optional>> hintInformation = extractHintInformationForMaybeStates(transitionMatrix, backwardTransitions, maybeStates, boost::none, hint, skipEcWithinMaybeStatesCheck); // Now compute the results for the maybeStates std::pair, boost::optional>> resultForMaybeStates = computeValuesOnlyMaybeStates(goal, submatrix, b, produceScheduler, minMaxLinearEquationSolverFactory, std::move(hintInformation.first), std::move(hintInformation.second), storm::utility::zero(), storm::utility::one()); // Set values of resulting vector according to result. storm::utility::vector::setVectorValues(result, maybeStates, resultForMaybeStates.first); if (produceScheduler) { std::vector const& subChoices = resultForMaybeStates.second.get(); auto subChoiceIt = subChoices.begin(); for (auto maybeState : maybeStates) { scheduler->setChoice(*subChoiceIt, maybeState); ++subChoiceIt; } assert(subChoiceIt == subChoices.end()); } } } // Finally, if we need to produce a scheduler, we also need to figure out the parts of the scheduler for // the states with probability 1 or 0 (depending on whether we maximize or minimize). // We also need to define some arbitrary choice for the remaining states to obtain a fully defined scheduler. if (produceScheduler) { if (goal.minimize()) { storm::utility::graph::computeSchedulerProb0E(statesWithProbability0, transitionMatrix, *scheduler); for (auto const& prob1State : statesWithProbability1) { scheduler->setChoice(0, prob1State); } } else { storm::utility::graph::computeSchedulerProb1E(statesWithProbability1, transitionMatrix, backwardTransitions, phiStates, psiStates, *scheduler); for (auto const& prob0State : statesWithProbability0) { scheduler->setChoice(0, prob0State); } } } STORM_LOG_ASSERT((!produceScheduler && !scheduler) || (!scheduler->isPartialScheduler() && scheduler->isDeterministicScheduler() && scheduler->isMemorylessScheduler()), "Unexpected format of obtained scheduler."); return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); } template std::pair>, boost::optional>> SparseMdpPrctlHelper::extractHintInformationForMaybeStates(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& maybeStates, boost::optional const& selectedChoices, ModelCheckerHint const& hint, bool skipECWithinMaybeStatesCheck) { // Scheduler hint boost::optional> subSchedulerChoices; if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint().hasSchedulerHint()) { auto const& schedulerHint = hint.template asExplicitModelCheckerHint().getSchedulerHint(); std::vector hintChoices; // the scheduler hint is only applicable if it induces no BSCC consisting of maybestates bool hintApplicable; if (!skipECWithinMaybeStatesCheck) { hintChoices.reserve(maybeStates.size()); for (uint_fast64_t state = 0; state < maybeStates.size(); ++state) { hintChoices.push_back(schedulerHint.getChoice(state).getDeterministicChoice()); } hintApplicable = storm::utility::graph::performProb1(transitionMatrix.transposeSelectedRowsFromRowGroups(hintChoices), maybeStates, ~maybeStates).full(); } else { hintApplicable = true; } if (hintApplicable) { // Compute the hint w.r.t. the given subsystem hintChoices.clear(); hintChoices.reserve(maybeStates.getNumberOfSetBits()); for (auto const& state : maybeStates) { uint_fast64_t hintChoice = schedulerHint.getChoice(state).getDeterministicChoice(); if (selectedChoices) { uint_fast64_t firstChoice = transitionMatrix.getRowGroupIndices()[state]; uint_fast64_t lastChoice = firstChoice + hintChoice; hintChoice = 0; for (uint_fast64_t choice = selectedChoices->getNextSetIndex(firstChoice); choice < lastChoice; choice = selectedChoices->getNextSetIndex(choice + 1)) { ++hintChoice; } } hintChoices.push_back(hintChoice); } subSchedulerChoices = std::move(hintChoices); } } // Solution value hint boost::optional> subValues; // The result hint is only applicable if there are no End Components consisting of maybestates if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint().hasResultHint() && (skipECWithinMaybeStatesCheck || subSchedulerChoices || storm::utility::graph::performProb1A(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, maybeStates, ~maybeStates).full())) { subValues = storm::utility::vector::filterVector(hint.template asExplicitModelCheckerHint().getResultHint(), maybeStates); } return std::make_pair(std::move(subValues), std::move(subSchedulerChoices)); } template std::pair, boost::optional>> SparseMdpPrctlHelper::computeValuesOnlyMaybeStates(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& submatrix, std::vector const& b, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, boost::optional>&& hintValues, boost::optional>&& hintChoices, boost::optional const& lowerResultBound, boost::optional const& upperResultBound) { // Set up the solver std::unique_ptr> solver = storm::solver::configureMinMaxLinearEquationSolver(goal, minMaxLinearEquationSolverFactory, submatrix); if (lowerResultBound) { solver->setLowerBound(lowerResultBound.get()); } if (upperResultBound) { solver->setUpperBound(upperResultBound.get()); } if (hintChoices) { solver->setSchedulerHint(std::move(hintChoices.get())); } solver->setTrackScheduler(produceScheduler); // Initialize the solution vector. std::vector x = hintValues ? std::move(hintValues.get()) : std::vector(submatrix.getRowGroupCount(), lowerResultBound ? lowerResultBound.get() : storm::utility::zero()); // Solve the corresponding system of equations. solver->solveEquations(x, b); // If requested, a scheduler was produced if (produceScheduler) { return std::pair, boost::optional>>(std::move(x), std::move(solver->getSchedulerChoices())); } else { return std::pair, boost::optional>>(std::move(x), boost::none); } } template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { storm::solver::SolveGoal goal(dir); return std::move(computeUntilProbabilities(goal, transitionMatrix, backwardTransitions, phiStates, psiStates, qualitative, produceScheduler, minMaxLinearEquationSolverFactory, hint)); } template std::vector SparseMdpPrctlHelper::computeGloballyProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, bool qualitative, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, bool useMecBasedTechnique) { if (useMecBasedTechnique) { storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions, psiStates); storm::storage::BitVector statesInPsiMecs(transitionMatrix.getRowGroupCount()); for (auto const& mec : mecDecomposition) { for (auto const& stateActionsPair : mec) { statesInPsiMecs.set(stateActionsPair.first, true); } } return std::move(computeUntilProbabilities(dir, transitionMatrix, backwardTransitions, psiStates, statesInPsiMecs, qualitative, false, minMaxLinearEquationSolverFactory).values); } else { std::vector result = computeUntilProbabilities(dir == OptimizationDirection::Minimize ? OptimizationDirection::Maximize : OptimizationDirection::Minimize, transitionMatrix, backwardTransitions, storm::storage::BitVector(transitionMatrix.getRowGroupCount(), true), ~psiStates, qualitative, false, minMaxLinearEquationSolverFactory).values; for (auto& element : result) { element = storm::utility::one() - element; } return std::move(result); } } template template std::vector SparseMdpPrctlHelper::computeInstantaneousRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, uint_fast64_t stepCount, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Only compute the result if the model has a state-based reward this->getModel(). STORM_LOG_THROW(rewardModel.hasStateRewards(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); // Initialize result to state rewards of the this->getModel(). std::vector result(rewardModel.getStateRewardVector()); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(transitionMatrix); solver->repeatedMultiply(dir, result, nullptr, stepCount); return result; } template template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, uint_fast64_t stepBound, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Only compute the result if the model has at least one reward this->getModel(). STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); // Compute the reward vector to add in each step based on the available reward models. std::vector totalRewardVector = rewardModel.getTotalRewardVector(transitionMatrix); // Initialize result to the zero vector. std::vector result(transitionMatrix.getRowGroupCount(), storm::utility::zero()); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(transitionMatrix); solver->repeatedMultiply(dir, result, &totalRewardVector, stepBound); return result; } template template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { // Only compute the result if the model has at least one reward this->getModel(). STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); return computeReachabilityRewardsHelper(storm::solver::SolveGoal(dir), transitionMatrix, backwardTransitions, [&rewardModel] (uint_fast64_t rowCount, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& maybeStates) { return rewardModel.getTotalRewardVector(rowCount, transitionMatrix, maybeStates); }, targetStates, qualitative, produceScheduler, minMaxLinearEquationSolverFactory, hint); } template template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { // Only compute the result if the model has at least one reward this->getModel(). STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); return computeReachabilityRewardsHelper(goal, transitionMatrix, backwardTransitions, [&rewardModel] (uint_fast64_t rowCount, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& maybeStates) { return rewardModel.getTotalRewardVector(rowCount, transitionMatrix, maybeStates); }, targetStates, qualitative, produceScheduler, minMaxLinearEquationSolverFactory, hint); } #ifdef STORM_HAVE_CARL template std::vector SparseMdpPrctlHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Only compute the result if the reward model is not empty. STORM_LOG_THROW(!intervalRewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula."); return computeReachabilityRewardsHelper(storm::solver::SolveGoal(dir), transitionMatrix, backwardTransitions, \ [&] (uint_fast64_t rowCount, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& maybeStates) { std::vector result; result.reserve(rowCount); std::vector subIntervalVector = intervalRewardModel.getTotalRewardVector(rowCount, transitionMatrix, maybeStates); for (auto const& interval : subIntervalVector) { result.push_back(lowerBoundOfIntervals ? interval.lower() : interval.upper()); } return result; }, \ targetStates, qualitative, false, minMaxLinearEquationSolverFactory).values; } template<> std::vector SparseMdpPrctlHelper::computeReachabilityRewards(OptimizationDirection, storm::storage::SparseMatrix const&, storm::storage::SparseMatrix const&, storm::models::sparse::StandardRewardModel const&, bool, storm::storage::BitVector const&, bool, storm::solver::MinMaxLinearEquationSolverFactory const&) { STORM_LOG_THROW(false, storm::exceptions::IllegalFunctionCallException, "Computing reachability rewards is unsupported for this data type."); } #endif template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewardsHelper(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::function(uint_fast64_t, storm::storage::SparseMatrix const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) { std::vector result(transitionMatrix.getRowGroupCount(), storm::utility::zero()); std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); // Determine which states have a reward that is infinity or less than infinity. storm::storage::BitVector maybeStates, infinityStates; if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint().getComputeOnlyMaybeStates()) { maybeStates = hint.template asExplicitModelCheckerHint().getMaybeStates(); infinityStates = ~(maybeStates | targetStates); } else { storm::storage::BitVector trueStates(transitionMatrix.getRowGroupCount(), true); if (goal.minimize()) { STORM_LOG_WARN("Results of reward computation may be too low, because of zero-reward loops."); infinityStates = storm::utility::graph::performProb1E(transitionMatrix, nondeterministicChoiceIndices, backwardTransitions, trueStates, targetStates); } else { infinityStates = storm::utility::graph::performProb1A(transitionMatrix, nondeterministicChoiceIndices, backwardTransitions, trueStates, targetStates); } infinityStates.complement(); maybeStates = ~(targetStates | infinityStates); STORM_LOG_INFO("Found " << infinityStates.getNumberOfSetBits() << " 'infinity' states."); STORM_LOG_INFO("Found " << targetStates.getNumberOfSetBits() << " 'target' states."); STORM_LOG_INFO("Found " << maybeStates.getNumberOfSetBits() << " 'maybe' states."); } storm::utility::vector::setVectorValues(result, infinityStates, storm::utility::infinity()); // If requested, we will produce a scheduler. std::unique_ptr> scheduler; if (produceScheduler) { scheduler = std::make_unique>(transitionMatrix.getRowGroupCount()); } // Check whether we need to compute exact rewards for some states. if (qualitative) { STORM_LOG_INFO("The rewards for the initial states were determined in a preprocessing step. No exact rewards were computed."); // Set the values for all maybe-states to 1 to indicate that their reward values // are neither 0 nor infinity. storm::utility::vector::setVectorValues(result, maybeStates, storm::utility::one()); } else { if (!maybeStates.empty()) { // In this case we have to compute the reward values for the remaining states. // Prepare matrix and vector for the equation system. storm::storage::SparseMatrix submatrix; std::vector b; // Remove rows and columns from the original transition probability matrix for states whose reward values are already known. // If there are infinity states, we additionaly have to remove choices of maybeState that lead to infinity boost::optional selectedChoices; // if not given, all maybeState choices are selected if (infinityStates.empty()) { submatrix = transitionMatrix.getSubmatrix(true, maybeStates, maybeStates, false); b = totalStateRewardVectorGetter(submatrix.getRowCount(), transitionMatrix, maybeStates); } else { selectedChoices = transitionMatrix.getRowFilter(maybeStates, ~infinityStates); submatrix = transitionMatrix.getSubmatrix(false, *selectedChoices, maybeStates, false); b = totalStateRewardVectorGetter(transitionMatrix.getRowCount(), transitionMatrix, storm::storage::BitVector(transitionMatrix.getRowGroupCount(), true)); storm::utility::vector::filterVectorInPlace(b, *selectedChoices); } // obtain hint information if possible bool skipEcWithinMaybeStatesCheck = !goal.minimize() || (hint.isExplicitModelCheckerHint() && hint.asExplicitModelCheckerHint().getNoEndComponentsInMaybeStates()); std::pair>, boost::optional>> hintInformation = extractHintInformationForMaybeStates(transitionMatrix, backwardTransitions, maybeStates, selectedChoices, hint, skipEcWithinMaybeStatesCheck); // Now compute the results for the maybeStates std::pair, boost::optional>> resultForMaybeStates = computeValuesOnlyMaybeStates(goal, submatrix, b, produceScheduler, minMaxLinearEquationSolverFactory, std::move(hintInformation.first), std::move(hintInformation.second), storm::utility::zero()); // Set values of resulting vector according to result. storm::utility::vector::setVectorValues(result, maybeStates, resultForMaybeStates.first); if (produceScheduler) { std::vector const& subChoices = resultForMaybeStates.second.get(); auto subChoiceIt = subChoices.begin(); if (selectedChoices) { for (auto maybeState : maybeStates) { // find the rowindex that corresponds to the selected row of the submodel uint_fast64_t firstRowIndex = transitionMatrix.getRowGroupIndices()[maybeState]; uint_fast64_t selectedRowIndex = selectedChoices->getNextSetIndex(firstRowIndex); for (uint_fast64_t choice = 0; choice < *subChoiceIt; ++choice) { selectedRowIndex = selectedChoices->getNextSetIndex(selectedRowIndex + 1); } scheduler->setChoice(selectedRowIndex - firstRowIndex, maybeState); ++subChoiceIt; } } else { for (auto maybeState : maybeStates) { scheduler->setChoice(*subChoiceIt, maybeState); ++subChoiceIt; } } assert(subChoiceIt == subChoices.end()); } } } // Finally, if we need to produce a scheduler, we also need to figure out the parts of the scheduler for // the states with reward infinity. Moreover, we have to set some arbitrary choice for the remaining states // to obtain a fully defined scheduler if (produceScheduler) { if (!goal.minimize()) { storm::utility::graph::computeSchedulerProb0E(infinityStates, transitionMatrix, *scheduler); } else { for (auto const& state : infinityStates) { scheduler->setChoice(0, state); } } for (auto const& state : targetStates) { scheduler->setChoice(0, state); } } STORM_LOG_ASSERT((!produceScheduler && !scheduler) || (!scheduler->isPartialScheduler() && scheduler->isDeterministicScheduler() && scheduler->isMemorylessScheduler()), "Unexpected format of obtained scheduler."); return MDPSparseModelCheckingHelperReturnType(std::move(result), std::move(scheduler)); } template std::vector SparseMdpPrctlHelper::computeLongRunAverageProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // If there are no goal states, we avoid the computation and directly return zero. if (psiStates.empty()) { return std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); } // Likewise, if all bits are set, we can avoid the computation and set. if (psiStates.full()) { return std::vector(transitionMatrix.getRowGroupCount(), storm::utility::one()); } // Reduce long run average probabilities to long run average rewards by // building a reward model assigning one reward to every psi state std::vector stateRewards(psiStates.size(), storm::utility::zero()); storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one()); storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); return computeLongRunAverageRewards(dir, transitionMatrix, backwardTransitions, rewardModel, minMaxLinearEquationSolverFactory); } template template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, RewardModelType const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); // Start by decomposing the MDP into its MECs. storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); // Get some data members for convenience. std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); ValueType zero = storm::utility::zero(); //first calculate LRA for the Maximal End Components. storm::storage::BitVector statesInMecs(numberOfStates); std::vector stateToMecIndexMap(transitionMatrix.getColumnCount()); std::vector lraValuesForEndComponents(mecDecomposition.size(), zero); for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(dir, transitionMatrix, rewardModel, mec, minMaxLinearEquationSolverFactory); // Gather information for later use. for (auto const& stateChoicesPair : mec) { statesInMecs.set(stateChoicesPair.first); stateToMecIndexMap[stateChoicesPair.first] = currentMecIndex; } } // For fast transition rewriting, we build some auxiliary data structures. storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); uint_fast64_t lastStateNotInMecs = 0; uint_fast64_t numberOfStatesNotInMecs = 0; std::vector statesNotInMecsBeforeIndex; statesNotInMecsBeforeIndex.reserve(numberOfStates); for (auto state : statesNotContainedInAnyMec) { while (lastStateNotInMecs <= state) { statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); ++lastStateNotInMecs; } ++numberOfStatesNotInMecs; } // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. std::vector b; uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size(); typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, numberOfSspStates, 0, false, true, numberOfSspStates); // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). uint_fast64_t currentChoice = 0; for (auto state : statesNotContainedInAnyMec) { sspMatrixBuilder.newRowGroup(currentChoice); for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); b.push_back(storm::utility::zero()); for (auto element : transitionMatrix.getRow(choice)) { if (statesNotContainedInAnyMec.get(element.getColumn())) { // If the target state is not contained in an MEC, we can copy over the entry. sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); } else { // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector // so that we are able to write the cumulative probability to the MEC into the matrix. auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); } } // Now insert all (cumulative) probability values that target an MEC. for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); } } } } // Now we are ready to construct the choices for the auxiliary states. for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; sspMatrixBuilder.newRowGroup(currentChoice); for (auto const& stateChoicesPair : mec) { uint_fast64_t state = stateChoicesPair.first; boost::container::flat_set const& choicesInMec = stateChoicesPair.second; for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. if (choicesInMec.find(choice) == choicesInMec.end()) { std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); b.push_back(storm::utility::zero()); for (auto element : transitionMatrix.getRow(choice)) { if (statesNotContainedInAnyMec.get(element.getColumn())) { // If the target state is not contained in an MEC, we can copy over the entry. sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); } else { // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector // so that we are able to write the cumulative probability to the MEC into the matrix. auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); } } // Now insert all (cumulative) probability values that target an MEC. for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); } } ++currentChoice; } } } // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. ++currentChoice; b.push_back(lraValuesForEndComponents[mecIndex]); } // Finalize the matrix and solve the corresponding system of equations. storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates); std::vector sspResult(numberOfSspStates); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(std::move(sspMatrix)); solver->solveEquations(dir, sspResult, b); // Prepare result vector. std::vector result(numberOfStates, zero); // Set the values for states not contained in MECs. storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, sspResult); // Set the values for all states in MECs. for (auto state : statesInMecs) { result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; } return result; } template template ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // If the mec only consists of a single state, we compute the LRA value directly if (++mec.begin() == mec.end()) { uint64_t state = mec.begin()->first; auto choiceIt = mec.begin()->second.begin(); ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix); for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) { if (storm::solver::minimize(dir)) { result = std::min(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); } else { result = std::max(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix)); } } return result; } // Solve MEC with the method specified in the settings storm::solver::LraMethod method = storm::settings::getModule().getLraMethod(); if (method == storm::solver::LraMethod::LinearProgramming) { return computeLraForMaximalEndComponentLP(dir, transitionMatrix, rewardModel, mec); } else if (method == storm::solver::LraMethod::ValueIteration) { return computeLraForMaximalEndComponentVI(dir, transitionMatrix, rewardModel, mec, minMaxLinearEquationSolverFactory); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); } } template template ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Initialize data about the mec storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false); for (auto const& stateChoicesPair : mec) { mecStates.set(stateChoicesPair.first); for (auto const& choice : stateChoicesPair.second) { mecChoices.set(choice); } } boost::container::flat_map toSubModelStateMapping; uint64_t currState = 0; toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits()); for (auto const& mecState : mecStates) { toSubModelStateMapping.insert(std::pair(mecState, currState)); ++currState; } // Get a transition matrix that only considers the states and choices within the MEC storm::storage::SparseMatrixBuilder mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits()); std::vector choiceRewards; choiceRewards.reserve(mecChoices.getNumberOfSetBits()); uint64_t currRow = 0; ValueType selfLoopProb = storm::utility::convertNumber(0.1); // todo try other values ValueType scalingFactor = storm::utility::one() - selfLoopProb; for (auto const& mecState : mecStates) { mecTransitionBuilder.newRowGroup(currRow); uint64_t groupStart = transitionMatrix.getRowGroupIndices()[mecState]; uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[mecState + 1]; for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) { bool insertedDiagElement = false; for (auto const& entry : transitionMatrix.getRow(choice)) { uint64_t column = toSubModelStateMapping[entry.getColumn()]; if (!insertedDiagElement && entry.getColumn() > mecState) { mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb); insertedDiagElement = true; } if (!insertedDiagElement && entry.getColumn() == mecState) { mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue()); insertedDiagElement = true; } else { mecTransitionBuilder.addNextValue(currRow, column, scalingFactor * entry.getValue()); } } if (!insertedDiagElement) { mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb); } // Compute the rewards obtained for this choice choiceRewards.push_back(scalingFactor * rewardModel.getTotalStateActionReward(mecState, choice, transitionMatrix)); ++currRow; } } auto mecTransitions = mecTransitionBuilder.build(); STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic."); // start the iterations ValueType precision = storm::utility::convertNumber(storm::settings::getModule().getPrecision()); std::vector x(mecTransitions.getRowGroupCount(), storm::utility::zero()); std::vector xPrime = x; auto solver = minMaxLinearEquationSolverFactory.create(std::move(mecTransitions)); solver->setCachingEnabled(true); ValueType maxDiff, minDiff; while (true) { // Compute the obtained rewards for the next step solver->repeatedMultiply(dir, x, &choiceRewards, 1); // update xPrime and check for convergence // to avoid large (and numerically unstable) x-values, we substract a reference value. auto xIt = x.begin(); auto xPrimeIt = xPrime.begin(); ValueType refVal = *xIt; maxDiff = *xIt - *xPrimeIt; minDiff = maxDiff; *xIt -= refVal; *xPrimeIt = *xIt; for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) { ValueType diff = *xIt - *xPrimeIt; maxDiff = std::max(maxDiff, diff); minDiff = std::min(minDiff, diff); *xIt -= refVal; *xPrimeIt = *xIt; } if ((maxDiff - minDiff) < precision) { break; } } return (maxDiff + minDiff) / (storm::utility::convertNumber(2.0) * scalingFactor); } template template ValueType SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { std::shared_ptr solver = storm::utility::solver::getLpSolver("LRA for MEC"); solver->setOptimizationDirection(invert(dir)); // First, we need to create the variables for the problem. std::map stateToVariableMap; for (auto const& stateChoicesPair : mec) { std::string variableName = "h" + std::to_string(stateChoicesPair.first); stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName); } storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1); solver->update(); // Now we encode the problem as constraints. for (auto const& stateChoicesPair : mec) { uint_fast64_t state = stateChoicesPair.first; // Now, based on the type of the state, create a suitable constraint. for (auto choice : stateChoicesPair.second) { storm::expressions::Expression constraint = -lambda; for (auto element : transitionMatrix.getRow(choice)) { constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(storm::utility::convertNumber(element.getValue())); } typename RewardModelType::ValueType r = rewardModel.getTotalStateActionReward(state, choice, transitionMatrix); constraint = solver->getConstant(storm::utility::convertNumber(r)) + constraint; if (dir == OptimizationDirection::Minimize) { constraint = stateToVariableMap.at(state) <= constraint; } else { constraint = stateToVariableMap.at(state) >= constraint; } solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint); } } solver->optimize(); return storm::utility::convertNumber(solver->getContinuousValue(lambda)); } template std::unique_ptr SparseMdpPrctlHelper::computeConditionalProbabilities(OptimizationDirection dir, storm::storage::sparse::state_type initialState, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // For the max-case, we can simply take the given target states. For the min-case, however, we need to // find the MECs of non-target states and make them the new target states. storm::storage::BitVector fixedTargetStates; if (dir == OptimizationDirection::Maximize) { fixedTargetStates = targetStates; } else { fixedTargetStates = storm::storage::BitVector(targetStates.size()); storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions, ~targetStates); for (auto const& mec : mecDecomposition) { for (auto const& stateActionsPair : mec) { fixedTargetStates.set(stateActionsPair.first); } } } // We solve the max-case and later adjust the result if the optimization direction was to minimize. storm::storage::BitVector initialStatesBitVector(transitionMatrix.getRowGroupCount()); initialStatesBitVector.set(initialState); storm::storage::BitVector allStates(initialStatesBitVector.size(), true); std::vector conditionProbabilities = std::move(computeUntilProbabilities(OptimizationDirection::Maximize, transitionMatrix, backwardTransitions, allStates, conditionStates, false, false, minMaxLinearEquationSolverFactory).values); // If the conditional probability is undefined for the initial state, we return directly. if (storm::utility::isZero(conditionProbabilities[initialState])) { return std::unique_ptr(new ExplicitQuantitativeCheckResult(initialState, storm::utility::infinity())); } std::vector targetProbabilities = std::move(computeUntilProbabilities(OptimizationDirection::Maximize, transitionMatrix, backwardTransitions, allStates, fixedTargetStates, false, false, minMaxLinearEquationSolverFactory).values); storm::storage::BitVector statesWithProbabilityGreater0E(transitionMatrix.getRowGroupCount(), true); storm::storage::sparse::state_type state = 0; for (auto const& element : conditionProbabilities) { if (storm::utility::isZero(element)) { statesWithProbabilityGreater0E.set(state, false); } ++state; } // Determine those states that need to be equipped with a restart mechanism. storm::storage::BitVector problematicStates = storm::utility::graph::performProb0E(transitionMatrix, transitionMatrix.getRowGroupIndices(), backwardTransitions, allStates, conditionStates | fixedTargetStates); // Otherwise, we build the transformed MDP. storm::storage::BitVector relevantStates = storm::utility::graph::getReachableStates(transitionMatrix, initialStatesBitVector, allStates, conditionStates | fixedTargetStates); std::vector numberOfStatesBeforeRelevantStates = relevantStates.getNumberOfSetBitsBeforeIndices(); storm::storage::sparse::state_type newGoalState = relevantStates.getNumberOfSetBits(); storm::storage::sparse::state_type newStopState = newGoalState + 1; storm::storage::sparse::state_type newFailState = newStopState + 1; // Build the transitions of the (relevant) states of the original model. storm::storage::SparseMatrixBuilder builder(0, newFailState + 1, 0, true, true); uint_fast64_t currentRow = 0; for (auto state : relevantStates) { builder.newRowGroup(currentRow); if (fixedTargetStates.get(state)) { builder.addNextValue(currentRow, newGoalState, conditionProbabilities[state]); if (!storm::utility::isZero(conditionProbabilities[state])) { builder.addNextValue(currentRow, newFailState, storm::utility::one() - conditionProbabilities[state]); } ++currentRow; } else if (conditionStates.get(state)) { builder.addNextValue(currentRow, newGoalState, targetProbabilities[state]); if (!storm::utility::isZero(targetProbabilities[state])) { builder.addNextValue(currentRow, newStopState, storm::utility::one() - targetProbabilities[state]); } ++currentRow; } else { for (uint_fast64_t row = transitionMatrix.getRowGroupIndices()[state]; row < transitionMatrix.getRowGroupIndices()[state + 1]; ++row) { for (auto const& successorEntry : transitionMatrix.getRow(row)) { builder.addNextValue(currentRow, numberOfStatesBeforeRelevantStates[successorEntry.getColumn()], successorEntry.getValue()); } ++currentRow; } if (problematicStates.get(state)) { builder.addNextValue(currentRow, numberOfStatesBeforeRelevantStates[initialState], storm::utility::one()); ++currentRow; } } } // Now build the transitions of the newly introduced states. builder.newRowGroup(currentRow); builder.addNextValue(currentRow, newGoalState, storm::utility::one()); ++currentRow; builder.newRowGroup(currentRow); builder.addNextValue(currentRow, newStopState, storm::utility::one()); ++currentRow; builder.newRowGroup(currentRow); builder.addNextValue(currentRow, numberOfStatesBeforeRelevantStates[initialState], storm::utility::one()); ++currentRow; // Finally, build the matrix and dispatch the query as a reachability query. storm::storage::BitVector newGoalStates(newFailState + 1); newGoalStates.set(newGoalState); storm::storage::SparseMatrix newTransitionMatrix = builder.build(); storm::storage::SparseMatrix newBackwardTransitions = newTransitionMatrix.transpose(true); std::vector goalProbabilities = std::move(computeUntilProbabilities(OptimizationDirection::Maximize, newTransitionMatrix, newBackwardTransitions, storm::storage::BitVector(newFailState + 1, true), newGoalStates, false, false, minMaxLinearEquationSolverFactory).values); return std::unique_ptr(new ExplicitQuantitativeCheckResult(initialState, dir == OptimizationDirection::Maximize ? goalProbabilities[numberOfStatesBeforeRelevantStates[initialState]] : storm::utility::one() - goalProbabilities[numberOfStatesBeforeRelevantStates[initialState]])); } template class SparseMdpPrctlHelper; template std::vector SparseMdpPrctlHelper::computeInstantaneousRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepCount, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint); template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template double SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #ifdef STORM_HAVE_CARL template class SparseMdpPrctlHelper; template std::vector SparseMdpPrctlHelper::computeInstantaneousRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepCount, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMdpPrctlHelper::computeCumulativeRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, uint_fast64_t stepBound, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint); template MDPSparseModelCheckingHelperReturnType SparseMdpPrctlHelper::computeReachabilityRewards(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint); template std::vector SparseMdpPrctlHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::models::sparse::StandardRewardModel const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template storm::RationalNumber SparseMdpPrctlHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); #endif } } }