diff --git a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp index 8cec970c6..b2833ee78 100644 --- a/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp +++ b/src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp @@ -5,6 +5,8 @@ #include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h" #include "storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h" #include "storm/modelchecker/prctl/helper/HybridMdpPrctlHelper.h" +#include "storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h" +#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h" #include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h" @@ -100,20 +102,23 @@ namespace storm { template std::unique_ptr HybridMarkovAutomatonCslModelChecker::computeLongRunAverageProbabilities(Environment const& env, CheckTask const& checkTask) { - storm::logic::StateFormula const& stateFormula = checkTask.getFormula(); - std::unique_ptr subResultPointer = this->check(env, stateFormula); - SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); - STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); - - return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), subResult.getTruthValuesVector()); - + storm::logic::StateFormula const& stateFormula = checkTask.getFormula(); + std::unique_ptr subResultPointer = this->check(env, stateFormula); + SymbolicQualitativeCheckResult const& subResult = subResultPointer->asSymbolicQualitativeCheckResult(); + STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); + + storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector()); + storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel()); + return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector()); } template std::unique_ptr HybridMarkovAutomatonCslModelChecker::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask const& checkTask) { - STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); - auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); - return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), rewardModel.get()); + STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model."); + auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask); + storm::modelchecker::helper::HybridNondeterministicInfiniteHorizonHelper helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector()); + storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel()); + return helper.computeLongRunAverageRewards(env, rewardModel.get()); } // Explicitly instantiate the model checker. diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp new file mode 100644 index 000000000..b9868f5a9 --- /dev/null +++ b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.cpp @@ -0,0 +1,99 @@ +#include "HybridNondeterministicInfiniteHorizonHelper.h" + +#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h" +#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h" + +#include "storm/utility/macros.h" + +#include "storm/exceptions/NotSupportedException.h" + +namespace storm { + namespace modelchecker { + namespace helper { + + template + HybridNondeterministicInfiniteHorizonHelper::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel const& model, storm::dd::Add const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) { + // Intentionally left empty. + } + + template + HybridNondeterministicInfiniteHorizonHelper::HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) { + // Intentionally left empty. + } + + template + std::unique_ptr> HybridNondeterministicInfiniteHorizonHelper::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd const& psiStates) { + // Convert this query to an instance for the sparse engine. + // Create ODD for the translation. + storm::dd::Odd odd = _model.getReachableStates().createOdd(); + storm::storage::SparseMatrix explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd); + std::unique_ptr> sparseHelper; + std::vector explicitExitRateVector; + storm::storage::BitVector explicitMarkovianStates; + if (isContinuousTime()) { + explicitExitRateVector = _exitRates->toVector(odd); + explicitMarkovianStates = _markovianStates->toVector(odd); + sparseHelper = std::make_unique>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector); + } else { + sparseHelper = std::make_unique>(explicitTransitionMatrix); + } + storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd const& s){ return s.toVector(odd); }); + STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine."); + auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd)); + return std::make_unique>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero(), _model.getReachableStates(), std::move(odd), std::move(explicitResult)); + } + + template + std::unique_ptr> HybridNondeterministicInfiniteHorizonHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel const& rewardModel) { + // Convert this query to an instance for the sparse engine. + // Create ODD for the translation. + storm::dd::Odd odd = _model.getReachableStates().createOdd(); + + // Create matrix and reward vectors + storm::storage::SparseMatrix explicitTransitionMatrix; + std::vector explicitStateRewards, explicitActionRewards; + if (rewardModel.hasStateRewards()) { + explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd); + } + if (rewardModel.hasStateActionRewards()) { + // Matrix and action-based vector have to be produced at the same time to guarantee the correct order + auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), _model.getNondeterminismVariables(), odd, odd); + explicitTransitionMatrix = std::move(matrixRewards.first); + explicitActionRewards = std::move(matrixRewards.second); + } else { + // Translate matrix only + explicitTransitionMatrix = _transitionMatrix.toMatrix(_model.getNondeterminismVariables(), odd, odd); + } + STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine."); + + // Create remaining components and helper + std::vector explicitExitRateVector; + storm::storage::BitVector explicitMarkovianStates; + std::unique_ptr> sparseHelper; + if (isContinuousTime()) { + explicitExitRateVector = _exitRates->toVector(odd); + explicitMarkovianStates = _markovianStates->toVector(odd); + sparseHelper = std::make_unique>(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector); + } else { + sparseHelper = std::make_unique>(explicitTransitionMatrix); + } + storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*sparseHelper, *this, [&odd](storm::dd::Bdd const& s){ return s.toVector(odd); }); + + STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine."); + auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr); + return std::make_unique>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero(), _model.getReachableStates(), std::move(odd), std::move(explicitResult)); + } + + template + bool HybridNondeterministicInfiniteHorizonHelper::isContinuousTime() const { + STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." ); + return _markovianStates != nullptr; + } + + template class HybridNondeterministicInfiniteHorizonHelper; + template class HybridNondeterministicInfiniteHorizonHelper; + template class HybridNondeterministicInfiniteHorizonHelper; + + } + } +} \ No newline at end of file diff --git a/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h new file mode 100644 index 000000000..d6a67089b --- /dev/null +++ b/src/storm/modelchecker/helper/infinitehorizon/HybridNondeterministicInfiniteHorizonHelper.h @@ -0,0 +1,64 @@ +#pragma once +#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h" + +#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h" + +#include "storm/models/symbolic/NondeterministicModel.h" +#include "storm/models/symbolic/StandardRewardModel.h" + +#include "storm/storage/dd/DdManager.h" +#include "storm/storage/dd/Add.h" +#include "storm/storage/dd/Bdd.h" + +namespace storm { + class Environment; + + namespace modelchecker { + namespace helper { + + /*! + * Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system. + */ + template + class HybridNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper { + + public: + /*! + * Initializes the helper for a discrete time (i.e. MDP) + */ + HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel const& model, storm::dd::Add const& transitionMatrix); + + /*! + * Initializes the helper for a continuous time (i.e. MA) + */ + HybridNondeterministicInfiniteHorizonHelper(storm::models::symbolic::NondeterministicModel const& model, storm::dd::Add const& transitionMatrix, storm::dd::Bdd const& markovianStates, storm::dd::Add const& _exitRates); + + /*! + * Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState + * @return a value for each state + */ + std::unique_ptr> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd const& psiStates); + + /*! + * Computes the long run average rewards, i.e., the average reward collected per time unit + * @return a value for each state + */ + std::unique_ptr> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel const& rewardModel); + + protected: + + /*! + * @return true iff this is a computation on a continuous time model (i.e. MA) + */ + bool isContinuousTime() const; + + + private: + storm::models::symbolic::NondeterministicModel const& _model; + storm::dd::Add const& _transitionMatrix; + storm::dd::Bdd const* _markovianStates; + storm::dd::Add const* _exitRates; + }; + } + } +}