#include "storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h" #include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h" #include "storm/modelchecker/prctl/helper/HybridMdpPrctlHelper.h" #include "storm/storage/dd/DdManager.h" #include "storm/storage/dd/Add.h" #include "storm/storage/dd/Bdd.h" #include "storm/utility/macros.h" #include "storm/utility/graph.h" #include "storm/utility/constants.h" #include "storm/solver/SolveGoal.h" #include "storm/models/symbolic/StandardRewardModel.h" #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h" #include "storm/modelchecker/results/SymbolicQuantitativeCheckResult.h" #include "storm/modelchecker/results/HybridQuantitativeCheckResult.h" #include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h" #include "storm/utility/Stopwatch.h" #include "storm/exceptions/InvalidOperationException.h" #include "storm/exceptions/NotSupportedException.h" namespace storm { namespace modelchecker { namespace helper { template void discretizeRewardModel(typename storm::models::symbolic::Model::RewardModelType& rewardModel, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& markovianStates) { if (rewardModel.hasStateRewards()) { rewardModel.getStateRewardVector() *= markovianStates.ite(exitRateVector.getDdManager().template getAddOne() / exitRateVector, exitRateVector.getDdManager().template getAddZero()); } } template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative) { auto discretizedRewardModel = rewardModel; discretizeRewardModel(discretizedRewardModel, exitRateVector, markovianStates); return HybridMdpPrctlHelper::computeReachabilityRewards(env, dir, model, transitionMatrix, discretizedRewardModel, targetStates, qualitative); } template::SupportsExponential, int>::type> std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound) { // If the time bounds are [0, inf], we rather call untimed reachability. if (storm::utility::isZero(lowerBound) && upperBound == storm::utility::infinity()) { return storm::modelchecker::helper::HybridMdpPrctlHelper::computeUntilProbabilities(env, dir, model, transitionMatrix, phiStates, psiStates, qualitative); } // If the interval is of the form [0,0], we can return the result directly if (storm::utility::isZero(upperBound)) { // In this case, the interval is of the form [0, 0]. return std::unique_ptr(new SymbolicQuantitativeCheckResult(model.getReachableStates(), psiStates.template toAdd())); } // If we reach this point, we convert this query to an instance for the sparse engine. storm::utility::Stopwatch conversionWatch(true); // Create ODD for the translation. storm::dd::Odd odd = model.getReachableStates().createOdd(); storm::storage::SparseMatrix explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd); std::vector explicitExitRateVector = exitRateVector.toVector(odd); conversionWatch.stop(); STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms."); auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(env, storm::solver::SolveGoal(dir), explicitTransitionMatrix, explicitExitRateVector, markovianStates.toVector(odd), phiStates.toVector(odd), psiStates.toVector(odd), {lowerBound, upperBound}); return std::unique_ptr(new HybridQuantitativeCheckResult(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero(), model.getReachableStates(), std::move(odd), std::move(explicitResult))); } template::SupportsExponential, int>::type> std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const&, OptimizationDirection, storm::models::symbolic::MarkovAutomaton const&, storm::dd::Add const&, storm::dd::Bdd const&, storm::dd::Add const&, storm::dd::Bdd const&, storm::dd::Bdd const&, bool, double, double) { STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing bounded until probabilities is unsupported for this value type."); } template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates) { // Convert this query to an instance for the sparse engine. storm::utility::Stopwatch conversionWatch(true); // Create ODD for the translation. storm::dd::Odd odd = model.getReachableStates().createOdd(); storm::storage::SparseMatrix explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd); std::vector explicitExitRateVector = exitRateVector.toVector(odd); conversionWatch.stop(); STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms."); auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), psiStates.toVector(odd)); return std::unique_ptr(new HybridQuantitativeCheckResult(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero(), model.getReachableStates(), std::move(odd), std::move(explicitResult))); } template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel) { // Convert this query to an instance for the sparse engine. storm::utility::Stopwatch conversionWatch(true); // Create ODD for the translation. storm::dd::Odd odd = model.getReachableStates().createOdd(); std::vector explicitExitRateVector = exitRateVector.toVector(odd); storm::storage::SparseMatrix explicitTransitionMatrix; boost::optional> optionalStateRewards, optionalStateActionRewards; if (rewardModel.hasStateRewards()) { optionalStateRewards = rewardModel.getStateRewardVector().toVector(odd); } if (rewardModel.hasStateActionRewards()) { auto matrixRewards = transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), model.getNondeterminismVariables(), odd, odd); explicitTransitionMatrix = std::move(matrixRewards.first); optionalStateActionRewards = std::move(matrixRewards.second); } else { explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd); } STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine."); storm::models::sparse::StandardRewardModel explicitRewardModel(optionalStateRewards, optionalStateActionRewards); conversionWatch.stop(); STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms."); auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), explicitRewardModel); return std::unique_ptr(new HybridQuantitativeCheckResult(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero(), model.getReachableStates(), std::move(odd), std::move(explicitResult))); } // Explicit instantiations. // Cudd, double. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); // Sylvan, double. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); // Sylvan, rational number. template std::unique_ptr HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel, storm::dd::Bdd const& targetStates, bool qualitative); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& phiStates, storm::dd::Bdd const& psiStates, bool qualitative, double lowerBound, double upperBound); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, storm::dd::Bdd const& psiStates); template std::unique_ptr HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector, typename storm::models::symbolic::Model::RewardModelType const& rewardModel); } } }