diff --git a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp index d09a0e58a..4b27befa7 100644 --- a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp +++ b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp @@ -77,73 +77,19 @@ namespace storm { } - template - std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates) { - - // Convert this query to an instance for the sparse engine. - storm::utility::Stopwatch conversionWatch(true); - // Create ODD for the translation. - storm::dd::Odd odd = model.getReachableStates().createOdd(); - storm::storage::SparseMatrix explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd); - std::vector explicitExitRateVector = exitRateVector.toVector(odd); - conversionWatch.stop(); - STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms."); - - auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), psiStates.toVector(odd)); - return std::unique_ptr(new HybridQuantitativeCheckResult(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero(), model.getReachableStates(), std::move(odd), std::move(explicitResult))); - - } - - template - std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel) { - - // Convert this query to an instance for the sparse engine. - storm::utility::Stopwatch conversionWatch(true); - // Create ODD for the translation. - storm::dd::Odd odd = model.getReachableStates().createOdd(); - std::vector explicitExitRateVector = exitRateVector.toVector(odd); - storm::storage::SparseMatrix explicitTransitionMatrix; - boost::optional> optionalStateRewards, optionalStateActionRewards; - if (rewardModel.hasStateRewards()) { - optionalStateRewards = rewardModel.getStateRewardVector().toVector(odd); - } - if (rewardModel.hasStateActionRewards()) { - auto matrixRewards = transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), model.getNondeterminismVariables(), odd, odd); - explicitTransitionMatrix = std::move(matrixRewards.first); - optionalStateActionRewards = std::move(matrixRewards.second); - } else { - explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd); - } - STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine."); - storm::models::sparse::StandardRewardModel explicitRewardModel(optionalStateRewards, optionalStateActionRewards); - conversionWatch.stop(); - STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms."); - - auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), explicitRewardModel); - return std::unique_ptr(new HybridQuantitativeCheckResult(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero(), model.getReachableStates(), std::move(odd), std::move(explicitResult))); - - } - // Explicit instantiations. // Cudd, double. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); // Sylvan, double. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); // Sylvan, rational number. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); - template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); - } } diff --git a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h index 45a82c0ff..092aeeb06 100644 --- a/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h +++ b/src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h @@ -27,13 +27,6 @@ namespace storm { template::SupportsExponential, int>::type = 0> static std::unique_ptr computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); - template - static std::unique_ptr computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); - - template - static std::unique_ptr computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); - - }; } diff --git a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp index 6647d83a6..bdd1f544b 100644 --- a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp +++ b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp @@ -717,198 +717,6 @@ namespace storm { return SparseMdpPrctlHelper::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, scaledRewardModel, psiStates, false, produceScheduler); } - template - std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates) { - - uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); - - // If there are no goal states, we avoid the computation and directly return zero. - if (psiStates.empty()) { - return std::vector(numberOfStates, storm::utility::zero()); - } - - // Likewise, if all bits are set, we can avoid the computation and set. - if (psiStates.full()) { - return std::vector(numberOfStates, storm::utility::one()); - } - - // Otherwise, reduce the long run average probabilities to long run average rewards. - // Every Markovian goal state gets reward one. - std::vector stateRewards(transitionMatrix.getRowGroupCount(), storm::utility::zero()); - storm::utility::vector::setVectorValues(stateRewards, markovianStates & psiStates, storm::utility::one()); - storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); - - return computeLongRunAverageRewards(env, dir, transitionMatrix, backwardTransitions, exitRateVector, markovianStates, rewardModel); - - } - - template - std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel) { - - uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); - - // Start by decomposing the Markov automaton into its MECs. - storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); - - // Get some data members for convenience. - std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); - - // Now start with compute the long-run average for all end components in isolation. - std::vector lraValuesForEndComponents; - - // While doing so, we already gather some information for the following steps. - std::vector stateToMecIndexMap(numberOfStates); - storm::storage::BitVector statesInMecs(numberOfStates); - - auto underlyingSolverEnvironment = env; - if (env.solver().isForceSoundness()) { - // For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precsion. - underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber(2)); - underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion()); - underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber(2)); - } - - for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; - - // Gather information for later use. - for (auto const& stateChoicesPair : mec) { - uint64_t state = stateChoicesPair.first; - - statesInMecs.set(state); - stateToMecIndexMap[state] = currentMecIndex; - } - - // Compute the LRA value for the current MEC. - lraValuesForEndComponents.push_back(computeLraForMaximalEndComponent(underlyingSolverEnvironment, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec)); - } - - // For fast transition rewriting, we build some auxiliary data structures. - storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; - uint64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); - uint64_t lastStateNotInMecs = 0; - uint64_t numberOfStatesNotInMecs = 0; - std::vector statesNotInMecsBeforeIndex; - statesNotInMecsBeforeIndex.reserve(numberOfStates); - for (auto state : statesNotContainedInAnyMec) { - while (lastStateNotInMecs <= state) { - statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); - ++lastStateNotInMecs; - } - ++numberOfStatesNotInMecs; - } - uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size(); - - // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. - std::vector b; - typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates); - - // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). - uint64_t currentChoice = 0; - for (auto state : statesNotContainedInAnyMec) { - sspMatrixBuilder.newRowGroup(currentChoice); - - for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - b.push_back(storm::utility::zero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.getColumn())) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { - if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); - } - } - } - } - - // Now we are ready to construct the choices for the auxiliary states. - for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { - storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; - sspMatrixBuilder.newRowGroup(currentChoice); - - for (auto const& stateChoicesPair : mec) { - uint64_t state = stateChoicesPair.first; - storm::storage::FlatSet const& choicesInMec = stateChoicesPair.second; - - for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { - - // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. - if (choicesInMec.find(choice) == choicesInMec.end()) { - std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); - b.push_back(storm::utility::zero()); - - for (auto element : transitionMatrix.getRow(choice)) { - if (statesNotContainedInAnyMec.get(element.getColumn())) { - // If the target state is not contained in an MEC, we can copy over the entry. - sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); - } else { - // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector - // so that we are able to write the cumulative probability to the MEC into the matrix. - auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); - } - } - - // Now insert all (cumulative) probability values that target an MEC. - for (uint64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { - if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { - sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); - } - } - - ++currentChoice; - } - } - } - - // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. - ++currentChoice; - b.push_back(lraValuesForEndComponents[mecIndex]); - } - - // Finalize the matrix and solve the corresponding system of equations. - storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates); - - std::vector x(numberOfSspStates); - - // Check for requirements of the solver. - storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxLinearEquationSolverFactory; - storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, dir); - requirements.clearBounds(); - STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); - - std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(underlyingSolverEnvironment, sspMatrix); - solver->setHasUniqueSolution(); - solver->setHasNoEndComponents(); - solver->setLowerBound(storm::utility::zero()); - solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end())); - solver->setRequirementsChecked(); - solver->solveEquations(underlyingSolverEnvironment, dir, x, b); - - // Prepare result vector. - std::vector result(numberOfStates); - - // Set the values for states not contained in MECs. - storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x); - - // Set the values for all states in MECs. - for (auto state : statesInMecs) { - result[state] = x[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; - } - - return result; - } - template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler) { @@ -922,268 +730,6 @@ namespace storm { return SparseMdpPrctlHelper::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, rewardModel, psiStates, false, produceScheduler); } - template - ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { - - // If the mec only consists of a single state, we compute the LRA value directly - if (++mec.begin() == mec.end()) { - uint64_t state = mec.begin()->first; - STORM_LOG_THROW(markovianStates.get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported."); - ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero(); - if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) { - STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has nondeterministic behavior."); - uint64_t choice = *mec.begin()->second.begin(); - result += exitRateVector[state] * rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero()); - } - return result; - } - - // Solve MEC with the method specified in the settings - storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod(); - if ((storm::NumberTraits::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) { - STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method."); - method = storm::solver::LraMethod::LinearProgramming; - } else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) { - STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); - method = storm::solver::LraMethod::ValueIteration; - } - if (method == storm::solver::LraMethod::LinearProgramming) { - return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec); - } else if (method == storm::solver::LraMethod::ValueIteration) { - return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec); - } else { - STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); - } - } - - template - ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { - std::unique_ptr> lpSolverFactory(new storm::utility::solver::LpSolverFactory()); - std::unique_ptr> solver = lpSolverFactory->create("LRA for MEC"); - solver->setOptimizationDirection(invert(dir)); - - // First, we need to create the variables for the problem. - std::map stateToVariableMap; - for (auto const& stateChoicesPair : mec) { - std::string variableName = "x" + std::to_string(stateChoicesPair.first); - stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName); - } - storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one()); - solver->update(); - - // Now we encode the problem as constraints. - std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); - for (auto const& stateChoicesPair : mec) { - uint64_t state = stateChoicesPair.first; - - // Now, based on the type of the state, create a suitable constraint. - if (markovianStates.get(state)) { - STORM_LOG_ASSERT(stateChoicesPair.second.size() == 1, "Markovian state " << state << " is not deterministic: It has " << stateChoicesPair.second.size() << " choices."); - uint64_t choice = *stateChoicesPair.second.begin(); - - storm::expressions::Expression constraint = stateToVariableMap.at(state); - - for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) { - constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational((element.getValue())); - } - - constraint = constraint + solver->getManager().rational(storm::utility::one() / exitRateVector[state]) * k; - - storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, (ValueType) (storm::utility::one() / exitRateVector[state]))); - if (dir == OptimizationDirection::Minimize) { - constraint = constraint <= rightHandSide; - } else { - constraint = constraint >= rightHandSide; - } - solver->addConstraint("state" + std::to_string(state), constraint); - } else { - // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action - // and the sum ranges over all states s'. - for (auto choice : stateChoicesPair.second) { - storm::expressions::Expression constraint = stateToVariableMap.at(state); - - for (auto element : transitionMatrix.getRow(choice)) { - constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational(element.getValue()); - } - - storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero())); - if (dir == OptimizationDirection::Minimize) { - constraint = constraint <= rightHandSide; - } else { - constraint = constraint >= rightHandSide; - } - solver->addConstraint("state" + std::to_string(state), constraint); - } - } - } - - solver->optimize(); - return solver->getContinuousValue(k); - } - - template - ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { - - // Initialize data about the mec - - storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); - storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false); - for (auto const& stateChoicesPair : mec) { - mecStates.set(stateChoicesPair.first); - for (auto const& choice : stateChoicesPair.second) { - mecChoices.set(choice); - } - } - storm::storage::BitVector markovianMecStates = mecStates & markovianStates; - storm::storage::BitVector probabilisticMecStates = mecStates & ~markovianStates; - storm::storage::BitVector probabilisticMecChoices = transitionMatrix.getRowFilter(probabilisticMecStates) & mecChoices; - STORM_LOG_THROW(!markovianMecStates.empty(), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported."); - bool hasProbabilisticStates = !probabilisticMecStates.empty(); - // Get the uniformization rate - - ValueType uniformizationRate = storm::utility::vector::max_if(exitRateVector, markovianMecStates); - // To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop. - // Hence, we increase the uniformization rate a little. - uniformizationRate *= (storm::utility::one() + storm::utility::convertNumber(env.solver().lra().getAperiodicFactor())); - - // Get the transitions of the submodel, that is - // * a matrix aMarkovian with all (uniformized) transitions from Markovian mec states to all Markovian mec states. - // * a matrix aMarkovianToProbabilistic with all (uniformized) transitions from Markovian mec states to all probabilistic mec states. - // * a matrix aProbabilistic with all transitions from probabilistic mec states to other probabilistic mec states. - // * a matrix aProbabilisticToMarkovian with all transitions from probabilistic mec states to all Markovian mec states. - typename storm::storage::SparseMatrix aMarkovian = transitionMatrix.getSubmatrix(true, markovianMecStates, markovianMecStates, true); - typename storm::storage::SparseMatrix aMarkovianToProbabilistic, aProbabilistic, aProbabilisticToMarkovian; - if (hasProbabilisticStates) { - aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianMecStates, probabilisticMecStates); - aProbabilistic = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, probabilisticMecStates); - aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, markovianMecStates); - } - - // The matrices with transitions from Markovian states need to be uniformized. - uint64_t subState = 0; - for (auto state : markovianMecStates) { - ValueType uniformizationFactor = exitRateVector[state] / uniformizationRate; - if (hasProbabilisticStates) { - for (auto& entry : aMarkovianToProbabilistic.getRow(subState)) { - entry.setValue(entry.getValue() * uniformizationFactor); - } - } - for (auto& entry : aMarkovian.getRow(subState)) { - if (entry.getColumn() == subState) { - entry.setValue(storm::utility::one() - uniformizationFactor * (storm::utility::one() - entry.getValue())); - } else { - entry.setValue(entry.getValue() * uniformizationFactor); - } - } - ++subState; - } - - // Compute the rewards obtained in a single uniformization step - - std::vector markovianChoiceRewards; - markovianChoiceRewards.reserve(aMarkovian.getRowCount()); - for (auto const& state : markovianMecStates) { - ValueType stateRewardScalingFactor = storm::utility::one() / uniformizationRate; - ValueType actionRewardScalingFactor = exitRateVector[state] / uniformizationRate; - assert(transitionMatrix.getRowGroupSize(state) == 1); - uint64_t choice = transitionMatrix.getRowGroupIndices()[state]; - markovianChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, stateRewardScalingFactor, actionRewardScalingFactor)); - } - - std::vector probabilisticChoiceRewards; - if (hasProbabilisticStates) { - probabilisticChoiceRewards.reserve(aProbabilistic.getRowCount()); - for (auto const& state : probabilisticMecStates) { - uint64_t groupStart = transitionMatrix.getRowGroupIndices()[state]; - uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[state + 1]; - for (uint64_t choice = probabilisticMecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = probabilisticMecChoices.getNextSetIndex(choice + 1)) { - probabilisticChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero())); - } - } - } - - // start the iterations - - ValueType precision = storm::utility::convertNumber(env.solver().lra().getPrecision()) / uniformizationRate; - bool relative = env.solver().lra().getRelativeTerminationCriterion(); - std::vector v(aMarkovian.getRowCount(), storm::utility::zero()); - std::vector w = v; - std::vector x, b; - auto solverEnv = env; - std::unique_ptr> solver; - if (hasProbabilisticStates) { - if (env.solver().isForceSoundness()) { - // To get correct results, the inner equation systems are solved exactly. - // TODO investigate how an error would propagate - solverEnv.solver().setForceExact(true); - } - - x.resize(aProbabilistic.getRowGroupCount(), storm::utility::zero()); - b = probabilisticChoiceRewards; - - solver = setUpProbabilisticStatesSolver(solverEnv, dir, aProbabilistic); - } - - uint64_t iter = 0; - boost::optional maxIter; - if (env.solver().lra().isMaximalIterationCountSet()) { - maxIter = env.solver().lra().getMaximalIterationCount(); - } - while (!maxIter.is_initialized() || iter < maxIter.get()) { - ++iter; - // Compute the expected total rewards for the probabilistic states - if (hasProbabilisticStates) { - if (solver) { - solver->solveEquations(solverEnv, dir, x, b); - } else { - storm::utility::vector::reduceVectorMinOrMax(dir, b, x, aProbabilistic.getRowGroupIndices()); - } - } - // now compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking) - auto vIt = v.begin(); - uint64_t row = 0; - ValueType newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w); - if (hasProbabilisticStates) { - newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x); - } - ValueType maxDiff = newValue - *vIt; - ValueType minDiff = maxDiff; - *vIt = newValue; - for (++vIt, ++row; row < aMarkovian.getRowCount(); ++vIt, ++row) { - newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w); - if (hasProbabilisticStates) { - newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x); - } - ValueType diff = newValue - *vIt; - maxDiff = std::max(maxDiff, diff); - minDiff = std::min(minDiff, diff); - *vIt = newValue; - } - - // Check for convergence - if ((maxDiff - minDiff) <= (relative ? (precision * (v.front() + minDiff)) : precision)) { - break; - } - if (storm::utility::resources::isTerminate()) { - break; - } - - // update the rhs of the MinMax equation system - ValueType referenceValue = v.front(); - storm::utility::vector::applyPointwise(v, w, [&referenceValue] (ValueType const& v_i) -> ValueType { return v_i - referenceValue; }); - if (hasProbabilisticStates) { - aProbabilisticToMarkovian.multiplyWithVector(w, b); - storm::utility::vector::addVectors(b, probabilisticChoiceRewards, b); - } - } - if (maxIter.is_initialized() && iter == maxIter.get()) { - STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations."); - } else { - STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations."); - } - return v.front() * uniformizationRate; - } - template std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair); template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler); @@ -1192,18 +738,8 @@ namespace storm { template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); - template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates); - - template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel); - template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler); - template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - - template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - - template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal&& goal, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair); template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler); @@ -1212,18 +748,7 @@ namespace storm { template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, bool produceScheduler); - template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates); - - template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel); - template MDPSparseModelCheckingHelperReturnType SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler); - - template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - - template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - - template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); - } } } diff --git a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h index ed0dfddef..e52017aed 100644 --- a/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h +++ b/src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h @@ -34,37 +34,9 @@ namespace storm { template static MDPSparseModelCheckingHelperReturnType computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::BitVector const& psiStates, bool produceScheduler); - template - static std::vector computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates); - - template - static std::vector computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel); - template static MDPSparseModelCheckingHelperReturnType computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler); - private: - /*! - * Computes the long-run average value for the given maximal end component of a Markov automaton. - * - * Implementations are based on Linear Programming (LP) and Value Iteration (VI). - * - * @param dir Sets whether the long-run average is to be minimized or maximized. - * @param transitionMatrix The transition matrix of the underlying Markov automaton. - * @param markovianStates A bit vector storing all markovian states. - * @param exitRateVector A vector with exit rates for all states. Exit rates of probabilistic states are - * assumed to be zero. - * @param rewardModel The considered reward model - * @param actionRewards The action rewards (earned instantaneously). - * @param mec The maximal end component to consider for computing the long-run average. - * @return The long-run average of being in a goal state for the given MEC. - */ - template - static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template - static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); - template - static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec); };