#include "SparseInfiniteHorizonHelper.h" #include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h" #include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h" #include "storm/models/sparse/StandardRewardModel.h" #include "storm/storage/SparseMatrix.h" #include "storm/storage/MaximalEndComponentDecomposition.h" #include "storm/storage/StronglyConnectedComponentDecomposition.h" #include "storm/solver/MinMaxLinearEquationSolver.h" #include "storm/solver/LinearEquationSolver.h" #include "storm/solver/Multiplier.h" #include "storm/solver/LpSolver.h" #include "storm/utility/SignalHandler.h" #include "storm/utility/solver.h" #include "storm/utility/vector.h" #include "storm/environment/solver/LongRunAverageSolverEnvironment.h" #include "storm/environment/solver/MinMaxSolverEnvironment.h" #include "storm/exceptions/UnmetRequirementException.h" namespace storm { namespace modelchecker { namespace helper { template SparseInfiniteHorizonHelper::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitionMatrix) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { // Intentionally left empty. } template SparseInfiniteHorizonHelper::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { // Intentionally left empty. } template SparseInfiniteHorizonHelper::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { // Intentionally left empty. } template void SparseInfiniteHorizonHelper::provideBackwardTransitions(storm::storage::SparseMatrix const& backwardTransitions) { STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before."); _backwardTransitions = &backwardTransitions; } template void SparseInfiniteHorizonHelper::provideLongRunComponentDecomposition(storm::storage::Decomposition const& decomposition) { STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before."); _longRunComponentDecomposition = &decomposition; } template std::vector SparseInfiniteHorizonHelper::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) { return computeLongRunAverageValues(env, [&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one() : storm::utility::zero(); }, [] (uint64_t) { return storm::utility::zero(); } ); } template std::vector SparseInfiniteHorizonHelper::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel const& rewardModel) { ValueGetter stateRewardsGetter; if (rewardModel.hasStateRewards()) { stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); }; } else { stateRewardsGetter = [] (uint64_t) { return storm::utility::zero(); }; } ValueGetter actionRewardsGetter; if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) { if (rewardModel.hasTransitionRewards()) { actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); }; } else { actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); }; } } else { actionRewardsGetter = [] (uint64_t) { return storm::utility::zero(); }; } return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter); } template std::vector SparseInfiniteHorizonHelper::computeLongRunAverageValues(Environment const& env, std::vector const* stateValues, std::vector const* actionValues) { ValueGetter stateValuesGetter; if (stateValues) { stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; }; } else { stateValuesGetter = [] (uint64_t) { return storm::utility::zero(); }; } ValueGetter actionValuesGetter; if (actionValues) { actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; }; } else { actionValuesGetter = [] (uint64_t) { return storm::utility::zero(); }; } return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter); } template std::vector SparseInfiniteHorizonHelper::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) { // We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states. // For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13 // Prepare an environment for the underlying solvers. auto underlyingSolverEnvironment = env; if (env.solver().isForceSoundness()) { // For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion. storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber(2); underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision); underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion()); underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion()); underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision); } // If requested, allocate memory for the choices made if (Nondeterministic && this->isProduceSchedulerSet()) { if (!_producedOptimalChoices.is_initialized()) { _producedOptimalChoices.emplace(); } _producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount()); } STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model."); // Decompose the model to their bottom components (MECS or BSCCS) createDecomposition(); // Compute the long-run average for all components in isolation. std::vector componentLraValues; componentLraValues.reserve(_longRunComponentDecomposition->size()); for (auto const& c : *_longRunComponentDecomposition) { componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c)); } // Solve the resulting SSP where end components are collapsed into single auxiliary states return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues); } template bool SparseInfiniteHorizonHelper::isContinuousTime() const { STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." ); return _exitRates != nullptr; } template class SparseInfiniteHorizonHelper; template class SparseInfiniteHorizonHelper; template class SparseInfiniteHorizonHelper; template class SparseInfiniteHorizonHelper; template class SparseInfiniteHorizonHelper; } } }