55 changed files with 3078 additions and 1811 deletions
-
25resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
-
12src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp
-
25src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
-
17src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp
-
25src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
-
55src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
-
6src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
-
54src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp
-
7src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h
-
562src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
-
21src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
-
475src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp
-
28src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h
-
47src/storm/modelchecker/helper/ModelCheckerHelper.cpp
-
74src/storm/modelchecker/helper/ModelCheckerHelper.h
-
98src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
-
111src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
-
147src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
-
87src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h
-
431src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
-
75src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
-
161src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
-
140src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h
-
478src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
-
101src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
-
27src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
-
491src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
-
149src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
-
46src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
-
46src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
-
12src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp
-
21src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
-
26src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
-
26src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
-
4src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
-
15src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
-
6src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
-
467src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
-
13src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
-
12src/storm/models/sparse/Ctmc.cpp
-
6src/storm/models/sparse/Ctmc.h
-
33src/storm/models/sparse/StandardRewardModel.cpp
-
9src/storm/models/sparse/StandardRewardModel.h
-
4src/storm/models/symbolic/Ctmc.cpp
-
6src/storm/models/symbolic/Ctmc.h
-
12src/storm/solver/GmmxxMultiplier.cpp
-
4src/storm/storage/MaximalEndComponent.cpp
-
5src/storm/storage/MaximalEndComponent.h
-
86src/storm/storage/SparseMatrix.cpp
-
13src/storm/storage/SparseMatrix.h
-
1src/storm/storage/dd/DdType.h
-
5src/storm/utility/vector.h
-
7src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp
-
2src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp
-
73src/test/storm/storage/SparseMatrixTest.cpp
@ -0,0 +1,47 @@ |
|||
#include "ModelCheckerHelper.h"
|
|||
|
|||
#include "storm/utility/macros.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void ModelCheckerHelper<ValueType, DdType>::setRelevantStates(StateSet const& relevantStates) { |
|||
_relevantStates = relevantStates; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void ModelCheckerHelper<ValueType, DdType>::clearRelevantStates() { |
|||
_relevantStates = boost::none; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool ModelCheckerHelper<ValueType, DdType>::hasRelevantStates() const { |
|||
return _relevantStates.is_initialized(); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
boost::optional<typename ModelCheckerHelper<ValueType, DdType>::StateSet> const& ModelCheckerHelper<ValueType, DdType>::getOptionalRelevantStates() const { |
|||
return _relevantStates; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
typename ModelCheckerHelper<ValueType, DdType>::StateSet const& ModelCheckerHelper<ValueType, DdType>::getRelevantStates() const { |
|||
STORM_LOG_ASSERT(hasRelevantStates(), "Retrieving relevant states although none have been set."); |
|||
return _relevantStates.get(); |
|||
} |
|||
|
|||
template class ModelCheckerHelper<double, storm::dd::DdType::None>; |
|||
template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>; |
|||
template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>; |
|||
|
|||
template class ModelCheckerHelper<double, storm::dd::DdType::Sylvan>; |
|||
template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>; |
|||
template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>; |
|||
|
|||
template class ModelCheckerHelper<double, storm::dd::DdType::CUDD>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,74 @@ |
|||
#pragma once |
|||
|
|||
#include <type_traits> |
|||
#include <boost/optional.hpp> |
|||
|
|||
#include "storm/storage/dd/DdType.h" |
|||
#include "storm/storage/dd/Bdd.h" |
|||
|
|||
#include "storm/storage/BitVector.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Helper class for solving a model checking query. |
|||
* @tparam VT The value type of a single value. |
|||
* @tparam DdType The used library for Dds (or None in case of a sparse representation). |
|||
*/ |
|||
template <typename VT, storm::dd::DdType DdType = storm::dd::DdType::None> |
|||
class ModelCheckerHelper { |
|||
public: |
|||
typedef VT ValueType; |
|||
|
|||
ModelCheckerHelper() = default; |
|||
virtual ~ModelCheckerHelper() = default; |
|||
|
|||
/*! |
|||
* Identifies a subset of the model states |
|||
*/ |
|||
using StateSet = typename std::conditional<DdType == storm::dd::DdType::None, storm::storage::BitVector, storm::dd::Bdd<DdType>>::type; |
|||
|
|||
/*! |
|||
* Sets relevant states. |
|||
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states. |
|||
* In this case, an arbitrary result can be set to non-relevant states. |
|||
*/ |
|||
void setRelevantStates(StateSet const& relevantStates); |
|||
|
|||
/*! |
|||
* Clears the relevant states. |
|||
* If no relevant states are set, it is assumed that a result is required for all (initial- and non-initial) states. |
|||
*/ |
|||
void clearRelevantStates(); |
|||
|
|||
/*! |
|||
* @return true if there are relevant states set. |
|||
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states. |
|||
* In this case, an arbitrary result can be set to non-relevant states. |
|||
*/ |
|||
bool hasRelevantStates() const; |
|||
|
|||
/*! |
|||
* @return relevant states (if there are any) or boost::none (otherwise). |
|||
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states. |
|||
* In this case, an arbitrary result can be set to non-relevant states. |
|||
*/ |
|||
boost::optional<StateSet> const& getOptionalRelevantStates() const; |
|||
|
|||
/*! |
|||
* @pre Relevant states have to be set before calling this. |
|||
* @return the relevant states. Should only be called if there are any. |
|||
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states. |
|||
* In this case, an arbitrary result can be set to non-relevant states. |
|||
* |
|||
*/ |
|||
StateSet const& getRelevantStates() const; |
|||
|
|||
private: |
|||
boost::optional<StateSet> _relevantStates; |
|||
}; |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,98 @@ |
|||
#include "SingleValueModelCheckerHelper.h"
|
|||
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
SingleValueModelCheckerHelper<ValueType, DdType>::SingleValueModelCheckerHelper() : _produceScheduler(false) { |
|||
// Intentionally left empty
|
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void SingleValueModelCheckerHelper<ValueType, DdType>::setOptimizationDirection(storm::solver::OptimizationDirection const& direction) { |
|||
_optimizationDirection = direction; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void SingleValueModelCheckerHelper<ValueType, DdType>::clearOptimizationDirection() { |
|||
_optimizationDirection = boost::none; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool SingleValueModelCheckerHelper<ValueType, DdType>::isOptimizationDirectionSet() const { |
|||
return _optimizationDirection.is_initialized(); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
storm::solver::OptimizationDirection const& SingleValueModelCheckerHelper<ValueType, DdType>::getOptimizationDirection() const { |
|||
STORM_LOG_ASSERT(isOptimizationDirectionSet(), "Requested optimization direction but none was set."); |
|||
return _optimizationDirection.get(); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool SingleValueModelCheckerHelper<ValueType, DdType>::minimize() const { |
|||
return storm::solver::minimize(getOptimizationDirection()); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool SingleValueModelCheckerHelper<ValueType, DdType>::maximize() const { |
|||
return storm::solver::maximize(getOptimizationDirection()); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
boost::optional<storm::solver::OptimizationDirection> SingleValueModelCheckerHelper<ValueType, DdType>::getOptionalOptimizationDirection() const { |
|||
return _optimizationDirection; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void SingleValueModelCheckerHelper<ValueType, DdType>::setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& threshold) { |
|||
_valueThreshold = std::make_pair(comparisonType, threshold); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void SingleValueModelCheckerHelper<ValueType, DdType>::clearValueThreshold() { |
|||
_valueThreshold = boost::none; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool SingleValueModelCheckerHelper<ValueType, DdType>::isValueThresholdSet() const { |
|||
return _valueThreshold.is_initialized(); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
storm::logic::ComparisonType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdComparisonType() const { |
|||
STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before."); |
|||
return _valueThreshold->first; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
ValueType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdValue() const { |
|||
STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before."); |
|||
return _valueThreshold->second; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
void SingleValueModelCheckerHelper<ValueType, DdType>::setProduceScheduler(bool value) { |
|||
_produceScheduler = value; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType> |
|||
bool SingleValueModelCheckerHelper<ValueType, DdType>::isProduceSchedulerSet() const { |
|||
return _produceScheduler; |
|||
} |
|||
|
|||
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::None>; |
|||
template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>; |
|||
template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>; |
|||
|
|||
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::Sylvan>; |
|||
template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>; |
|||
template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>; |
|||
|
|||
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::CUDD>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,111 @@ |
|||
#pragma once |
|||
|
|||
#include "ModelCheckerHelper.h" |
|||
|
|||
#include "storm/solver/OptimizationDirection.h" |
|||
#include "storm/logic/ComparisonType.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Helper for model checking queries where we are interested in (optimizing) a single value per state. |
|||
* @tparam ValueType The type of a value |
|||
* @tparam DdType The used library for Dds (or None in case of a sparse representation) |
|||
*/ |
|||
template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None> |
|||
class SingleValueModelCheckerHelper : public ModelCheckerHelper<ValueType, DdType> { |
|||
public: |
|||
|
|||
SingleValueModelCheckerHelper(); |
|||
|
|||
/*! |
|||
* Sets the optimization direction, i.e., whether we want to minimize or maximize the value for each state |
|||
* Has no effect for models without nondeterminism. |
|||
* Has to be set if there is nondeterminism in the model. |
|||
*/ |
|||
void setOptimizationDirection(storm::solver::OptimizationDirection const& direction); |
|||
|
|||
/*! |
|||
* Clears the optimization direction if it was set before. |
|||
*/ |
|||
void clearOptimizationDirection(); |
|||
|
|||
/*! |
|||
* @return true if there is an optimization direction set |
|||
*/ |
|||
bool isOptimizationDirectionSet() const; |
|||
|
|||
/*! |
|||
* @pre an optimization direction has to be set before calling this. |
|||
* @return the optimization direction. |
|||
*/ |
|||
storm::solver::OptimizationDirection const& getOptimizationDirection() const; |
|||
|
|||
/*! |
|||
* @pre an optimization direction has to be set before calling this. |
|||
* @return true iff the optimization goal is to minimize the value for each state |
|||
*/ |
|||
bool minimize() const; |
|||
|
|||
/*! |
|||
* @pre an optimization direction has to be set before calling this. |
|||
* @return true iff the optimization goal is to maximize the value for each state |
|||
*/ |
|||
bool maximize() const; |
|||
|
|||
/*! |
|||
* @return The optimization direction (if it was set) |
|||
*/ |
|||
boost::optional<storm::solver::OptimizationDirection> getOptionalOptimizationDirection() const; |
|||
|
|||
/*! |
|||
* Sets a goal threshold for the value at each state. If such a threshold is set, it is assumed that we are only interested |
|||
* in the satisfaction of the threshold. Setting this allows the helper to compute values only up to the precision |
|||
* where satisfaction of the threshold can be decided. |
|||
* @param comparisonType The relation used when comparing computed values (left hand side) with the given threshold value (right hand side). |
|||
* @param thresholdValue The value used on the right hand side of the comparison relation. |
|||
*/ |
|||
void setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& thresholdValue); |
|||
|
|||
/*! |
|||
* Clears the valueThreshold if it was set before. |
|||
*/ |
|||
void clearValueThreshold(); |
|||
|
|||
/*! |
|||
* @return true, if a value threshold has been set. |
|||
*/ |
|||
bool isValueThresholdSet() const; |
|||
|
|||
/*! |
|||
* @pre A value threshold has to be set before calling this. |
|||
* @return The relation used when comparing computed values (left hand side) with the specified threshold value (right hand side). |
|||
*/ |
|||
storm::logic::ComparisonType const& getValueThresholdComparisonType() const; |
|||
|
|||
/*! |
|||
* @pre A value threshold has to be set before calling this. |
|||
* @return The value used on the right hand side of the comparison relation. |
|||
*/ |
|||
ValueType const& getValueThresholdValue() const; |
|||
|
|||
/*! |
|||
* Sets whether an optimal scheduler shall be constructed during the computation |
|||
*/ |
|||
void setProduceScheduler(bool value); |
|||
|
|||
/*! |
|||
* @return whether an optimal scheduler shall be constructed during the computation |
|||
*/ |
|||
bool isProduceSchedulerSet() const; |
|||
|
|||
private: |
|||
boost::optional<storm::solver::OptimizationDirection> _optimizationDirection; |
|||
boost::optional<std::pair<storm::logic::ComparisonType, ValueType>> _valueThreshold; |
|||
bool _produceScheduler; |
|||
}; |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,147 @@ |
|||
#include "HybridInfiniteHorizonHelper.h"
|
|||
|
|||
#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
|
|||
#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
|
|||
#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
|
|||
|
|||
#include "storm/models/symbolic/NondeterministicModel.h"
|
|||
|
|||
#include "storm/storage/SparseMatrix.h"
|
|||
|
|||
#include "storm/utility/macros.h"
|
|||
|
|||
#include "storm/exceptions/NotSupportedException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) { |
|||
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type."); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) { |
|||
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type."); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRateVector) { |
|||
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type."); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates) { |
|||
// Convert this query to an instance for the sparse engine.
|
|||
// Create ODD for the translation.
|
|||
storm::dd::Odd odd = _model.getReachableStates().createOdd(); |
|||
// Translate all required components
|
|||
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix; |
|||
if (Nondeterministic) { |
|||
explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd); |
|||
} else { |
|||
explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd); |
|||
} |
|||
std::vector<ValueType> explicitExitRateVector; |
|||
storm::storage::BitVector explicitMarkovianStates; |
|||
if (isContinuousTime()) { |
|||
explicitExitRateVector = _exitRates->toVector(odd); |
|||
if (_markovianStates) { |
|||
explicitMarkovianStates = _markovianStates->toVector(odd); |
|||
} |
|||
} |
|||
auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd); |
|||
auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd)); |
|||
return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult)); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) { |
|||
// Convert this query to an instance for the sparse engine.
|
|||
// Create ODD for the translation.
|
|||
storm::dd::Odd odd = _model.getReachableStates().createOdd(); |
|||
|
|||
// Translate all required components
|
|||
// Transitions and rewards
|
|||
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix; |
|||
std::vector<ValueType> explicitStateRewards, explicitActionRewards; |
|||
if (rewardModel.hasStateRewards()) { |
|||
explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd); |
|||
} |
|||
if (Nondeterministic && rewardModel.hasStateActionRewards()) { |
|||
// Matrix and action-based vector have to be produced at the same time to guarantee the correct order
|
|||
auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd); |
|||
explicitTransitionMatrix = std::move(matrixRewards.first); |
|||
explicitActionRewards = std::move(matrixRewards.second); |
|||
} else { |
|||
// Translate matrix only
|
|||
if (Nondeterministic) { |
|||
explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd); |
|||
} else { |
|||
explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd); |
|||
} |
|||
if (rewardModel.hasStateActionRewards()) { |
|||
// For deterministic models we can translate the action rewards easily
|
|||
explicitActionRewards = rewardModel.getStateActionRewardVector().toVector(odd); |
|||
} |
|||
} |
|||
STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine."); |
|||
// Continuous time information
|
|||
std::vector<ValueType> explicitExitRateVector; |
|||
storm::storage::BitVector explicitMarkovianStates; |
|||
if (isContinuousTime()) { |
|||
explicitExitRateVector = _exitRates->toVector(odd); |
|||
if (_markovianStates) { |
|||
explicitMarkovianStates = _markovianStates->toVector(odd); |
|||
} |
|||
} |
|||
auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd); |
|||
auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr); |
|||
return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult)); |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
bool HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::isContinuousTime() const { |
|||
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." ); |
|||
return _exitRates != nullptr; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
template <bool N, std::enable_if_t<N, int>> |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const { |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result; |
|||
if (isContinuousTime()) { |
|||
result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, markovianStates, exitRates); |
|||
} else { |
|||
result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix); |
|||
} |
|||
storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); }); |
|||
STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine."); |
|||
return result; |
|||
} |
|||
|
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
template <bool N, std::enable_if_t<!N, int>> |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const { |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result; |
|||
if (isContinuousTime()) { |
|||
result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, exitRates); |
|||
} else { |
|||
result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix); |
|||
} |
|||
storm::modelchecker::helper::setInformationFromOtherHelperDeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); }); |
|||
return result; |
|||
} |
|||
|
|||
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, false>; |
|||
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, true>; |
|||
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, false>; |
|||
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, true>; |
|||
template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, false>; |
|||
template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, true>; |
|||
template class HybridInfiniteHorizonHelper<storm::RationalFunction, storm::dd::DdType::Sylvan, false>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,87 @@ |
|||
#pragma once |
|||
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h" |
|||
|
|||
#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h" |
|||
|
|||
#include "storm/models/symbolic/Model.h" |
|||
#include "storm/models/symbolic/StandardRewardModel.h" |
|||
|
|||
#include "storm/storage/dd/DdManager.h" |
|||
#include "storm/storage/dd/Add.h" |
|||
#include "storm/storage/dd/Bdd.h" |
|||
|
|||
namespace storm { |
|||
class Environment; |
|||
|
|||
namespace storage { |
|||
template <typename ValueType> class SparseMatrix; |
|||
class BitVector; |
|||
} |
|||
|
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType, bool Nondeterministic> class SparseInfiniteHorizonHelper; |
|||
|
|||
/*! |
|||
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system. |
|||
*/ |
|||
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic> |
|||
class HybridInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType, DdType> { |
|||
|
|||
public: |
|||
/*! |
|||
* Initializes the helper for a discrete time model (MDP or DTMC) |
|||
*/ |
|||
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix); |
|||
|
|||
/*! |
|||
* Initializes the helper for a Markov Automaton |
|||
*/ |
|||
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& _exitRates); |
|||
|
|||
/*! |
|||
* Initializes the helper for a CTMC |
|||
* @note The transition matrix must be probabilistic |
|||
*/ |
|||
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& _exitRates); |
|||
|
|||
/*! |
|||
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState |
|||
* @return a value for each state |
|||
*/ |
|||
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates); |
|||
|
|||
/*! |
|||
* Computes the long run average rewards, i.e., the average reward collected per time unit |
|||
* @return a value for each state |
|||
*/ |
|||
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel); |
|||
|
|||
protected: |
|||
|
|||
/*! |
|||
* @return true iff this is a computation on a continuous time model (i.e. MA) |
|||
*/ |
|||
bool isContinuousTime() const; |
|||
|
|||
/*! |
|||
* @return a sparse infinite horizon helper with the provided explicit model information. |
|||
* @param exitRates exit rates (ignored for discrete time models) |
|||
* @param markovianStates Markovian states or (ignored for non-MA) |
|||
*/ |
|||
template <bool N = Nondeterministic, std::enable_if_t<N, int> = 0> |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const; |
|||
template <bool N = Nondeterministic, std::enable_if_t<!N, int> = 0> |
|||
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const; |
|||
|
|||
|
|||
private: |
|||
storm::models::symbolic::Model<DdType, ValueType> const& _model; |
|||
storm::dd::Add<DdType, ValueType> const& _transitionMatrix; |
|||
storm::dd::Bdd<DdType> const* _markovianStates; |
|||
storm::dd::Add<DdType, ValueType> const* _exitRates; |
|||
}; |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,431 @@ |
|||
#include "SparseDeterministicInfiniteHorizonHelper.h"
|
|||
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
|
|||
|
|||
|
|||
#include "storm/storage/SparseMatrix.h"
|
|||
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
|
|||
#include "storm/storage/Scheduler.h"
|
|||
|
|||
#include "storm/solver/LinearEquationSolver.h"
|
|||
#include "storm/solver/Multiplier.h"
|
|||
#include "storm/solver/LpSolver.h"
|
|||
|
|||
#include "storm/utility/SignalHandler.h"
|
|||
#include "storm/utility/solver.h"
|
|||
#include "storm/utility/vector.h"
|
|||
|
|||
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
|
|||
#include "storm/environment/solver/TopologicalSolverEnvironment.h"
|
|||
|
|||
#include "storm/exceptions/UnmetRequirementException.h"
|
|||
#include "storm/exceptions/NotSupportedException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType> |
|||
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType> |
|||
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix, exitRates) { |
|||
// For the CTMC case we assert that the caller actually provided the probabilistic transitions
|
|||
STORM_LOG_ASSERT(this->_transitionMatrix.isProbabilistic(), "Non-probabilistic transitions"); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
void SparseDeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() { |
|||
if (this->_longRunComponentDecomposition == nullptr) { |
|||
// The decomposition has not been provided or computed, yet.
|
|||
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>>(this->_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs()); |
|||
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get(); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) { |
|||
// For deterministic models, we compute the LRA for a BSCC
|
|||
|
|||
STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model."); |
|||
|
|||
auto trivialResult = computeLraForTrivialBscc(env, stateValueGetter, actionValueGetter, component); |
|||
if (trivialResult.first) { |
|||
return trivialResult.second; |
|||
} |
|||
|
|||
// Solve nontrivial BSCC with the method specified in the settings
|
|||
storm::solver::LraMethod method = env.solver().lra().getDetLraMethod(); |
|||
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) { |
|||
method = storm::solver::LraMethod::GainBiasEquations; |
|||
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method."); |
|||
} else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) { |
|||
method = storm::solver::LraMethod::ValueIteration; |
|||
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); |
|||
} |
|||
STORM_LOG_TRACE("Computing LRA for BSCC of size " << component.size() << " using '" << storm::solver::toString(method) << "'."); |
|||
if (method == storm::solver::LraMethod::ValueIteration) { |
|||
return computeLraForBsccVi(env, stateValueGetter, actionValueGetter, component); |
|||
} else if (method == storm::solver::LraMethod::LraDistributionEquations) { |
|||
// We only need the first element of the pair as the lra distribution is not relevant at this point.
|
|||
return computeLraForBsccLraDistr(env, stateValueGetter, actionValueGetter, component).first; |
|||
} |
|||
STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << "."); |
|||
// We don't need the bias values
|
|||
return computeLraForBsccGainBias(env, stateValueGetter, actionValueGetter, component).first; |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) { |
|||
|
|||
// For deterministic models, we can catch the case where all values are the same. This includes the special case where the BSCC consist only of just one state.
|
|||
bool first = true; |
|||
ValueType val = storm::utility::zero<ValueType>(); |
|||
for (auto const& element : component) { |
|||
auto state = internal::getComponentElementState(element); |
|||
STORM_LOG_ASSERT(state == *internal::getComponentElementChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model."); |
|||
ValueType curr = stateValueGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionValueGetter(state) : actionValueGetter(state)); |
|||
if (first) { |
|||
val = curr; |
|||
first = false; |
|||
} else if (val != curr) { |
|||
return {false, storm::utility::zero<ValueType>()}; |
|||
} |
|||
} |
|||
// All values are the same
|
|||
return {true, val}; |
|||
} |
|||
|
|||
|
|||
template <> |
|||
storm::RationalFunction SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) { |
|||
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models."); |
|||
} |
|||
template <typename ValueType> |
|||
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) { |
|||
|
|||
// Collect parameters of the computation
|
|||
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()); |
|||
|
|||
// Now create a helper and perform the algorithm
|
|||
if (this->isContinuousTime()) { |
|||
// We assume a CTMC (with deterministic timed states and no instant states)
|
|||
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates); |
|||
return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter, this->_exitRates); |
|||
} else { |
|||
// We assume a DTMC (with deterministic timed states and no instant states)
|
|||
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor); |
|||
return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) { |
|||
// We build the equation system as in Line 3 of Algorithm 3 from
|
|||
// Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
|
|||
// https://doi.org/10.1007/978-3-319-68167-2_25
|
|||
// The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
|
|||
// No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
|
|||
// To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
|
|||
// LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
|
|||
// the uniformization rate cancels out. Hence, we obtain the equation system below.
|
|||
|
|||
// Get a mapping from global state indices to local ones.
|
|||
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap; |
|||
uint64_t localIndex = 0; |
|||
for (auto const& globalIndex : bscc) { |
|||
toLocalIndexMap[globalIndex] = localIndex; |
|||
++localIndex; |
|||
} |
|||
|
|||
// Prepare an environment for the underlying equation solver
|
|||
auto subEnv = env; |
|||
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) { |
|||
// Topological solver does not make any sense since the BSCC is connected.
|
|||
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault()); |
|||
} |
|||
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion()); |
|||
|
|||
// Build the equation system matrix and vector.
|
|||
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory; |
|||
bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem; |
|||
storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size()); |
|||
std::vector<ValueType> eqSysVector; |
|||
eqSysVector.reserve(bscc.size()); |
|||
// The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
|
|||
uint64_t row = 0; |
|||
ValueType entryValue; |
|||
for (auto const& globalState : bscc) { |
|||
ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalState] : storm::utility::one<ValueType>(); |
|||
// Coefficient for the gain variable
|
|||
if (isEquationSystemFormat) { |
|||
// '1-0' in row 0 and -(-1) in other rows
|
|||
builder.addNextValue(row, 0, storm::utility::one<ValueType>()); |
|||
} else if (row > 0) { |
|||
// No coeficient in row 0, othwerise substract the gain
|
|||
builder.addNextValue(row, 0, -storm::utility::one<ValueType>()); |
|||
} |
|||
// Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
|
|||
if (row > 0) { |
|||
if (isEquationSystemFormat) { |
|||
builder.addDiagonalEntry(row, rateAtState); |
|||
} else if (!storm::utility::isOne(rateAtState)) { |
|||
builder.addDiagonalEntry(row, storm::utility::one<ValueType>() - rateAtState); |
|||
} |
|||
} |
|||
for (auto const& entry : this->_transitionMatrix.getRow(globalState)) { |
|||
uint64_t col = toLocalIndexMap[entry.getColumn()]; |
|||
if (col == 0) { |
|||
//Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
|
|||
continue; |
|||
} |
|||
entryValue = entry.getValue() * rateAtState; |
|||
if (isEquationSystemFormat) { |
|||
entryValue = -entryValue; |
|||
} |
|||
builder.addNextValue(row, col, entryValue); |
|||
} |
|||
eqSysVector.push_back(stateValuesGetter(globalState) + rateAtState * actionValuesGetter(globalState)); |
|||
++row; |
|||
} |
|||
|
|||
// Create a linear equation solver
|
|||
auto solver = linearEquationSolverFactory.create(subEnv, builder.build()); |
|||
// Check solver requirements.
|
|||
auto requirements = solver->getRequirements(subEnv); |
|||
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); |
|||
// Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
|
|||
|
|||
std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>()); |
|||
// Take the mean of the rewards as an initial guess for the gain
|
|||
//eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
|
|||
solver->solveEquations(subEnv, eqSysSol, eqSysVector); |
|||
|
|||
ValueType gain = eqSysSol.front(); |
|||
// insert bias value for state 0
|
|||
eqSysSol.front() = storm::utility::zero<ValueType>(); |
|||
// Return the gain and the bias values
|
|||
return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol)); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) { |
|||
|
|||
// Let A be ab auxiliary Matrix with A[s,s] = R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
|
|||
// We build and solve the equation system for
|
|||
// x*A=0 & x_0+...+x_n=1 <=> A^t*x=0=x-x & x_0+...+x_n=1 <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
|
|||
// Then, x[i] will be the fraction of the time we are in state i.
|
|||
|
|||
// This method assumes that this BSCC consist of more than one state
|
|||
if (bscc.size() == 1) { |
|||
ValueType lraValue = stateValuesGetter(*bscc.begin()) + (this->isContinuousTime() ? (*this->_exitRates)[*bscc.begin()] * actionValuesGetter(*bscc.begin()) : actionValuesGetter(*bscc.begin())); |
|||
return { lraValue, {storm::utility::one<ValueType>()} }; |
|||
} |
|||
|
|||
// Prepare an environment for the underlying linear equation solver
|
|||
auto subEnv = env; |
|||
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) { |
|||
// Topological solver does not make any sense since the BSCC is connected.
|
|||
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault()); |
|||
} |
|||
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion()); |
|||
|
|||
// Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
|
|||
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap; |
|||
storm::storage::BitVector bsccStates(this->_transitionMatrix.getRowCount(), false); |
|||
uint64_t localIndex = 0; |
|||
for (auto const& globalIndex : bscc) { |
|||
bsccStates.set(globalIndex, true); |
|||
toLocalIndexMap[globalIndex] = localIndex; |
|||
++localIndex; |
|||
} |
|||
|
|||
// Build the auxiliary Matrix A.
|
|||
auto auxMatrix = this->_transitionMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
|
|||
uint64_t row = 0; |
|||
for (auto const& globalIndex : bscc) { |
|||
ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalIndex] : storm::utility::one<ValueType>(); |
|||
for (auto& entry : auxMatrix.getRow(row)) { |
|||
if (entry.getColumn() == row) { |
|||
// This value is non-zero since we have a BSCC with more than one state
|
|||
entry.setValue(rateAtState * (entry.getValue() - storm::utility::one<ValueType>())); |
|||
} else if (this->isContinuousTime()) { |
|||
entry.setValue(entry.getValue() * rateAtState); |
|||
} |
|||
} |
|||
++row; |
|||
} |
|||
assert(row == auxMatrix.getRowCount()); |
|||
|
|||
// We need to consider A^t. This will not delete diagonal entries since they are non-zero.
|
|||
auxMatrix = auxMatrix.transpose(); |
|||
|
|||
// Check whether we need the fixpoint characterization
|
|||
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory; |
|||
bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem; |
|||
if (isFixpointFormat) { |
|||
// Add a 1 on the diagonal
|
|||
for (row = 0; row < auxMatrix.getRowCount(); ++row) { |
|||
for (auto& entry : auxMatrix.getRow(row)) { |
|||
if (entry.getColumn() == row) { |
|||
entry.setValue(storm::utility::one<ValueType>() + entry.getValue()); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// We now build the equation system matrix.
|
|||
// We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
|
|||
// Phase 1: replace the existing entries of the last row with ones
|
|||
uint64_t col = 0; |
|||
uint64_t lastRow = auxMatrix.getRowCount() - 1; |
|||
for (auto& entry : auxMatrix.getRow(lastRow)) { |
|||
entry.setColumn(col); |
|||
if (isFixpointFormat) { |
|||
if (col == lastRow) { |
|||
entry.setValue(storm::utility::zero<ValueType>()); |
|||
} else { |
|||
entry.setValue(-storm::utility::one<ValueType>()); |
|||
} |
|||
} else { |
|||
entry.setValue(storm::utility::one<ValueType>()); |
|||
} |
|||
++col; |
|||
} |
|||
storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix)); |
|||
for (; col <= lastRow; ++col) { |
|||
if (isFixpointFormat) { |
|||
if (col != lastRow) { |
|||
builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>()); |
|||
} |
|||
} else { |
|||
builder.addNextValue(lastRow, col, storm::utility::one<ValueType>()); |
|||
} |
|||
} |
|||
|
|||
std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>()); |
|||
bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>(); |
|||
|
|||
// Create a linear equation solver
|
|||
auto solver = linearEquationSolverFactory.create(subEnv, builder.build()); |
|||
solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>()); |
|||
// Check solver requirements.
|
|||
auto requirements = solver->getRequirements(subEnv); |
|||
requirements.clearLowerBounds(); |
|||
requirements.clearUpperBounds(); |
|||
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); |
|||
|
|||
std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size())); |
|||
solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide); |
|||
|
|||
// Calculate final LRA Value
|
|||
ValueType result = storm::utility::zero<ValueType>(); |
|||
auto solIt = lraDistr.begin(); |
|||
for (auto const& globalState : bscc) { |
|||
if (this->isContinuousTime()) { |
|||
result += (*solIt) * (stateValuesGetter(globalState) + (*this->_exitRates)[globalState] * actionValuesGetter(globalState)); |
|||
} else { |
|||
result += (*solIt) * (stateValuesGetter(globalState) + actionValuesGetter(globalState)); |
|||
} |
|||
++solIt; |
|||
} |
|||
assert(solIt == lraDistr.end()); |
|||
|
|||
return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr)); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem) { |
|||
|
|||
// Create SSP Matrix.
|
|||
// In contrast to the version for nondeterministic models, we eliminate the auxiliary states representing each BSCC on the fly
|
|||
|
|||
// Probability mass that would lead to a BSCC will be considered in the rhs of the equation system
|
|||
auto sspMatrix = this->_transitionMatrix.getSubmatrix(false, statesNotInComponent, statesNotInComponent, asEquationSystem); |
|||
if (asEquationSystem) { |
|||
sspMatrix.convertToEquationSystem(); |
|||
} |
|||
|
|||
// Create the SSP right-hand-side
|
|||
std::vector<ValueType> rhs; |
|||
rhs.reserve(sspMatrix.getRowCount()); |
|||
for (auto const& state : statesNotInComponent) { |
|||
ValueType stateValue = storm::utility::zero<ValueType>(); |
|||
for (auto const& transition : this->_transitionMatrix.getRow(state)) { |
|||
if (!statesNotInComponent.get(transition.getColumn())) { |
|||
// This transition leads to a BSCC!
|
|||
stateValue += transition.getValue() * bsccLraValues[inputStateToBsccIndexMap[transition.getColumn()]]; |
|||
} |
|||
} |
|||
rhs.push_back(std::move(stateValue)); |
|||
} |
|||
|
|||
return std::make_pair(std::move(sspMatrix), std::move(rhs)); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::vector<ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) { |
|||
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet."); |
|||
|
|||
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
|
|||
// which redirects all transitions leading to a former BSCC state to a new (imaginary) auxiliary state.
|
|||
// Each auxiliary state gets assigned the value of that BSCC and we compute expected rewards (aka stochastic shortest path, SSP) on that new system.
|
|||
// For efficiency reasons, we actually build the system where the auxiliary states are already eliminated.
|
|||
|
|||
// First gather the states that are part of a component
|
|||
// and create a mapping from states that lie in a component to the corresponding component index.
|
|||
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount()); |
|||
std::vector<uint64_t> stateIndexMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max()); |
|||
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) { |
|||
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) { |
|||
uint64_t state = internal::getComponentElementState(element); |
|||
statesInComponents.set(state); |
|||
stateIndexMap[state] = currentComponentIndex; |
|||
} |
|||
} |
|||
// Map the non-component states to their index in the SSP. Note that the order of these states will be preserved.
|
|||
uint64_t numberOfNonComponentStates = 0; |
|||
storm::storage::BitVector statesNotInComponent = ~statesInComponents; |
|||
for (auto const& nonComponentState : statesNotInComponent) { |
|||
stateIndexMap[nonComponentState] = numberOfNonComponentStates; |
|||
++numberOfNonComponentStates; |
|||
} |
|||
|
|||
// The next step is to create the equation system solving the SSP (unless the whole system consists of BSCCs)
|
|||
std::vector<ValueType> sspValues; |
|||
if (numberOfNonComponentStates > 0) { |
|||
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory; |
|||
bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem; |
|||
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, stateIndexMap, statesNotInComponent, isEqSysFormat); |
|||
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(env, sspMatrixVector.first); |
|||
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end()); |
|||
solver->setBounds(*lowerUpperBounds.first, *lowerUpperBounds.second); |
|||
// Check solver requirements
|
|||
auto requirements = solver->getRequirements(env); |
|||
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); |
|||
sspValues.assign(sspMatrixVector.first.getRowCount(), (*lowerUpperBounds.first + *lowerUpperBounds.second) / storm::utility::convertNumber<ValueType,uint64_t>(2)); |
|||
solver->solveEquations(env, sspValues, sspMatrixVector.second); |
|||
} |
|||
|
|||
// Prepare result vector.
|
|||
std::vector<ValueType> result(this->_transitionMatrix.getRowGroupCount()); |
|||
for (uint64_t state = 0; state < stateIndexMap.size(); ++state) { |
|||
if (statesNotInComponent.get(state)) { |
|||
result[state] = sspValues[stateIndexMap[state]]; |
|||
} else { |
|||
result[state] = componentLraValues[stateIndexMap[state]]; |
|||
} |
|||
} |
|||
return result; |
|||
} |
|||
|
|||
template class SparseDeterministicInfiniteHorizonHelper<double>; |
|||
template class SparseDeterministicInfiniteHorizonHelper<storm::RationalNumber>; |
|||
template class SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,75 @@ |
|||
#pragma once |
|||
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h" |
|||
|
|||
|
|||
namespace storm { |
|||
|
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system. |
|||
* @tparam ValueType the type a value can have |
|||
*/ |
|||
template <typename ValueType> |
|||
class SparseDeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, false> { |
|||
|
|||
public: |
|||
/*! |
|||
* Function mapping from indices to values |
|||
*/ |
|||
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter; |
|||
|
|||
/*! |
|||
* Initializes the helper for a discrete time model (i.e. DTMC) |
|||
*/ |
|||
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix); |
|||
|
|||
/*! |
|||
* Initializes the helper for a continuous time model (i.e. CTMC) |
|||
* @note The transition matrix shall be probabilistic (i.e. the rows sum up to one) |
|||
*/ |
|||
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates); |
|||
|
|||
/*! |
|||
* @param stateValuesGetter a function returning a value for a given state index |
|||
* @param actionValuesGetter a function returning a value for a given (global) choice index |
|||
* @return the (unique) optimal LRA value for the given component. |
|||
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected. |
|||
*/ |
|||
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& component) override; |
|||
|
|||
protected: |
|||
|
|||
virtual void createDecomposition() override; |
|||
|
|||
std::pair<bool, ValueType> computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc); |
|||
|
|||
/*! |
|||
* As computeLraForComponent but uses value iteration as a solution method (independent of what is set in env) |
|||
*/ |
|||
ValueType computeLraForBsccVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc); |
|||
|
|||
/*! |
|||
* As computeLraForComponent but solves a linear equation system encoding gain and bias (independent of what is set in env) |
|||
* @see Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017), https://doi.org/10.1007/978-3-319-68167-2_25 |
|||
*/ |
|||
std::pair<ValueType, std::vector<ValueType>> computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc); |
|||
/*! |
|||
* As computeLraForComponent but solves a linear equation system consisting encoding the long run average (steady state) distribution (independent of what is set in env) |
|||
*/ |
|||
std::pair<ValueType, std::vector<ValueType>> computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc); |
|||
|
|||
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem); |
|||
|
|||
/*! |
|||
* @return Lra values for each state |
|||
*/ |
|||
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override; |
|||
|
|||
}; |
|||
|
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,161 @@ |
|||
#include "SparseInfiniteHorizonHelper.h"
|
|||
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
|
|||
|
|||
#include "storm/models/sparse/StandardRewardModel.h"
|
|||
|
|||
#include "storm/storage/SparseMatrix.h"
|
|||
#include "storm/storage/MaximalEndComponentDecomposition.h"
|
|||
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
|
|||
|
|||
#include "storm/solver/MinMaxLinearEquationSolver.h"
|
|||
#include "storm/solver/LinearEquationSolver.h"
|
|||
#include "storm/solver/Multiplier.h"
|
|||
#include "storm/solver/LpSolver.h"
|
|||
|
|||
#include "storm/utility/SignalHandler.h"
|
|||
#include "storm/utility/solver.h"
|
|||
#include "storm/utility/vector.h"
|
|||
|
|||
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
|
|||
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
|
|||
|
|||
#include "storm/exceptions/UnmetRequirementException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before."); |
|||
_backwardTransitions = &backwardTransitions; |
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition) { |
|||
STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before."); |
|||
_longRunComponentDecomposition = &decomposition; |
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) { |
|||
return computeLongRunAverageValues(env, |
|||
[&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); }, |
|||
[] (uint64_t) { return storm::utility::zero<ValueType>(); } |
|||
); |
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) { |
|||
ValueGetter stateRewardsGetter; |
|||
if (rewardModel.hasStateRewards()) { |
|||
stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); }; |
|||
} else { |
|||
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); }; |
|||
} |
|||
ValueGetter actionRewardsGetter; |
|||
if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) { |
|||
if (rewardModel.hasTransitionRewards()) { |
|||
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); }; |
|||
} else { |
|||
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); }; |
|||
} |
|||
} else { |
|||
actionRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); }; |
|||
} |
|||
|
|||
return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter); |
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) { |
|||
ValueGetter stateValuesGetter; |
|||
if (stateValues) { |
|||
stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; }; |
|||
} else { |
|||
stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); }; |
|||
} |
|||
ValueGetter actionValuesGetter; |
|||
if (actionValues) { |
|||
actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; }; |
|||
} else { |
|||
actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); }; |
|||
} |
|||
|
|||
return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter); |
|||
|
|||
} |
|||
|
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) { |
|||
// We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states.
|
|||
// For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
|
|||
|
|||
// Prepare an environment for the underlying solvers.
|
|||
auto underlyingSolverEnvironment = env; |
|||
if (env.solver().isForceSoundness()) { |
|||
// For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
|
|||
storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2); |
|||
underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision); |
|||
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion()); |
|||
underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion()); |
|||
underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision); |
|||
} |
|||
|
|||
// If requested, allocate memory for the choices made
|
|||
if (Nondeterministic && this->isProduceSchedulerSet()) { |
|||
if (!_producedOptimalChoices.is_initialized()) { |
|||
_producedOptimalChoices.emplace(); |
|||
} |
|||
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount()); |
|||
} |
|||
STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model."); |
|||
|
|||
// Decompose the model to their bottom components (MECS or BSCCS)
|
|||
createDecomposition(); |
|||
|
|||
// Compute the long-run average for all components in isolation.
|
|||
std::vector<ValueType> componentLraValues; |
|||
componentLraValues.reserve(_longRunComponentDecomposition->size()); |
|||
for (auto const& c : *_longRunComponentDecomposition) { |
|||
componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c)); |
|||
} |
|||
|
|||
// Solve the resulting SSP where end components are collapsed into single auxiliary states
|
|||
return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues); |
|||
} |
|||
|
|||
template <typename ValueType, bool Nondeterministic> |
|||
bool SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const { |
|||
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." ); |
|||
return _exitRates != nullptr; |
|||
} |
|||
|
|||
template class SparseInfiniteHorizonHelper<double, true>; |
|||
template class SparseInfiniteHorizonHelper<storm::RationalNumber, true>; |
|||
|
|||
template class SparseInfiniteHorizonHelper<double, false>; |
|||
template class SparseInfiniteHorizonHelper<storm::RationalNumber, false>; |
|||
template class SparseInfiniteHorizonHelper<storm::RationalFunction, false>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,140 @@ |
|||
#pragma once |
|||
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h" |
|||
|
|||
#include "storm/storage/MaximalEndComponent.h" |
|||
#include "storm/storage/StronglyConnectedComponent.h" |
|||
#include "storm/storage/Decomposition.h" |
|||
#include "storm/storage/SparseMatrix.h" |
|||
|
|||
namespace storm { |
|||
class Environment; |
|||
|
|||
namespace models { |
|||
namespace sparse { |
|||
template <typename VT> class StandardRewardModel; |
|||
} |
|||
} |
|||
|
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system. |
|||
* @tparam ValueType the type a value can have |
|||
* @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA) |
|||
*/ |
|||
template <typename ValueType, bool Nondeterministic> |
|||
class SparseInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> { |
|||
|
|||
public: |
|||
|
|||
/*! |
|||
* The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models) |
|||
*/ |
|||
using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type; |
|||
|
|||
/*! |
|||
* Function mapping from indices to values |
|||
*/ |
|||
typedef std::function<ValueType(uint64_t)> ValueGetter; |
|||
|
|||
/*! |
|||
* Initializes the helper for a discrete time (i.e. DTMC, MDP) |
|||
*/ |
|||
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix); |
|||
|
|||
/*! |
|||
* Initializes the helper for continuous time (i.e. MA) |
|||
*/ |
|||
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates); |
|||
|
|||
/*! |
|||
* Initializes the helper for continuous time (i.e. CTMC) |
|||
*/ |
|||
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates); |
|||
|
|||
/*! |
|||
* Provides backward transitions that can be used during the computation. |
|||
* Providing them is optional. If they are not provided, they will be computed internally |
|||
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid. |
|||
*/ |
|||
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions); |
|||
|
|||
/*! |
|||
* Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation. |
|||
* Providing the decomposition is optional. If it is not provided, they will be computed internally. |
|||
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid. |
|||
*/ |
|||
void provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition); |
|||
|
|||
/*! |
|||
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState |
|||
* @return a value for each state |
|||
*/ |
|||
std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates); |
|||
|
|||
/*! |
|||
* Computes the long run average rewards, i.e., the average reward collected per time unit |
|||
* @return a value for each state |
|||
*/ |
|||
std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel); |
|||
|
|||
/*! |
|||
* Computes the long run average value given the provided state and action-based rewards. |
|||
* @param stateValues a vector containing a value for every state |
|||
* @param actionValues a vector containing a value for every choice |
|||
* @return a value for each state |
|||
*/ |
|||
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr); |
|||
|
|||
/*! |
|||
* Computes the long run average value given the provided state and action based rewards |
|||
* @param stateValuesGetter a function returning a value for a given state index |
|||
* @param actionValuesGetter a function returning a value for a given (global) choice index |
|||
* @return a value for each state |
|||
*/ |
|||
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter); |
|||
|
|||
/*! |
|||
* @param stateValuesGetter a function returning a value for a given state index |
|||
* @param actionValuesGetter a function returning a value for a given (global) choice index |
|||
* @return the (unique) optimal LRA value for the given component. |
|||
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected. |
|||
*/ |
|||
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component) = 0; |
|||
|
|||
protected: |
|||
|
|||
/*! |
|||
* @return true iff this is a computation on a continuous time model (i.e. CTMC, MA) |
|||
*/ |
|||
bool isContinuousTime() const; |
|||
|
|||
/*! |
|||
* @post _longRunComponentDecomposition points to a decomposition of the long run components (MECs, BSCCs) |
|||
*/ |
|||
virtual void createDecomposition() = 0; |
|||
|
|||
/*! |
|||
* @pre if scheduler production is enabled and Nondeterministic is true, a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components. |
|||
* @return Lra values for each state |
|||
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values. |
|||
*/ |
|||
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) = 0; |
|||
|
|||
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix; |
|||
storm::storage::BitVector const* _markovianStates; |
|||
std::vector<ValueType> const* _exitRates; |
|||
|
|||
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions; |
|||
storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition; |
|||
std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions; |
|||
std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition; |
|||
|
|||
boost::optional<std::vector<uint64_t>> _producedOptimalChoices; |
|||
}; |
|||
|
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,478 @@ |
|||
#include "SparseNondeterministicInfiniteHorizonHelper.h"
|
|||
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
|
|||
|
|||
#include "storm/storage/SparseMatrix.h"
|
|||
#include "storm/storage/MaximalEndComponentDecomposition.h"
|
|||
#include "storm/storage/Scheduler.h"
|
|||
|
|||
#include "storm/solver/MinMaxLinearEquationSolver.h"
|
|||
#include "storm/solver/Multiplier.h"
|
|||
#include "storm/solver/LpSolver.h"
|
|||
|
|||
#include "storm/utility/solver.h"
|
|||
#include "storm/utility/vector.h"
|
|||
|
|||
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
|
|||
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
|
|||
|
|||
#include "storm/exceptions/UnmetRequirementException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
template <typename ValueType> |
|||
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType> |
|||
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix, markovianStates, exitRates) { |
|||
// Intentionally left empty.
|
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const { |
|||
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested."); |
|||
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?"); |
|||
return this->_producedOptimalChoices.get(); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() { |
|||
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested."); |
|||
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?"); |
|||
return this->_producedOptimalChoices.get(); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const { |
|||
auto const& optimalChoices = getProducedOptimalChoices(); |
|||
storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size()); |
|||
for (uint64_t state = 0; state < optimalChoices.size(); ++state) { |
|||
scheduler.setChoice(optimalChoices[state], state); |
|||
} |
|||
return scheduler; |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() { |
|||
if (this->_longRunComponentDecomposition == nullptr) { |
|||
// The decomposition has not been provided or computed, yet.
|
|||
if (this->_backwardTransitions == nullptr) { |
|||
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true)); |
|||
this->_backwardTransitions = this->_computedBackwardTransitions.get(); |
|||
} |
|||
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions); |
|||
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get(); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) { |
|||
// For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
|
|||
|
|||
// Allocate memory for the nondeterministic choices.
|
|||
if (this->isProduceSchedulerSet()) { |
|||
if (!this->_producedOptimalChoices.is_initialized()) { |
|||
this->_producedOptimalChoices.emplace(); |
|||
} |
|||
this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount()); |
|||
} |
|||
|
|||
auto trivialResult = this->computeLraForTrivialMec(env, stateRewardsGetter, actionRewardsGetter, component); |
|||
if (trivialResult.first) { |
|||
return trivialResult.second; |
|||
} |
|||
|
|||
// Solve nontrivial MEC with the method specified in the settings
|
|||
storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod(); |
|||
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) { |
|||
STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method."); |
|||
method = storm::solver::LraMethod::LinearProgramming; |
|||
} else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) { |
|||
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method."); |
|||
method = storm::solver::LraMethod::ValueIteration; |
|||
} |
|||
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration."); |
|||
if (method == storm::solver::LraMethod::LinearProgramming) { |
|||
return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, component); |
|||
} else if (method == storm::solver::LraMethod::ValueIteration) { |
|||
return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, component); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialMec(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) { |
|||
|
|||
// If the component only consists of a single state, we compute the LRA value directly
|
|||
if (component.size() == 1) { |
|||
auto const& element = *component.begin(); |
|||
uint64_t state = internal::getComponentElementState(element); |
|||
auto choiceIt = internal::getComponentElementChoicesBegin(element); |
|||
if (!this->isContinuousTime()) { |
|||
// This is an MDP.
|
|||
// Find the choice with the highest/lowest reward
|
|||
ValueType bestValue = actionRewardsGetter(*choiceIt); |
|||
uint64_t bestChoice = *choiceIt; |
|||
for (++choiceIt; choiceIt != internal::getComponentElementChoicesEnd(element); ++choiceIt) { |
|||
ValueType currentValue = actionRewardsGetter(*choiceIt); |
|||
if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) { |
|||
bestValue = std::move(currentValue); |
|||
bestChoice = *choiceIt; |
|||
} |
|||
} |
|||
if (this->isProduceSchedulerSet()) { |
|||
this->_producedOptimalChoices.get()[state] = bestChoice - this->_transitionMatrix.getRowGroupIndices()[state]; |
|||
} |
|||
bestValue += stateRewardsGetter(state); |
|||
return {true, bestValue}; |
|||
} else { |
|||
// In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
|
|||
STORM_LOG_ASSERT(this->_markovianStates != nullptr, "Nondeterministic continuous time model without Markovian states... Is this a not a Markov Automaton?"); |
|||
STORM_LOG_THROW(this->_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported."); |
|||
STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior."); |
|||
if (this->isProduceSchedulerSet()) { |
|||
this->_producedOptimalChoices.get()[state] = 0; |
|||
} |
|||
ValueType result = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt)); |
|||
return {true, result}; |
|||
} |
|||
} |
|||
return {false, storm::utility::zero<ValueType>()}; |
|||
} |
|||
|
|||
|
|||
template <typename ValueType> |
|||
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) { |
|||
|
|||
// Collect some parameters of the computation
|
|||
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()); |
|||
std::vector<uint64_t>* optimalChoices = nullptr; |
|||
if (this->isProduceSchedulerSet()) { |
|||
optimalChoices = &this->_producedOptimalChoices.get(); |
|||
} |
|||
|
|||
// Now create a helper and perform the algorithm
|
|||
if (this->isContinuousTime()) { |
|||
// We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
|
|||
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates); |
|||
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates, &this->getOptimizationDirection(), optimalChoices); |
|||
} else { |
|||
// We assume an MDP (with nondeterministic timed states and no instant states)
|
|||
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor); |
|||
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) { |
|||
// Create an LP solver
|
|||
auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC"); |
|||
|
|||
// Now build the LP formulation as described in:
|
|||
// Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
|
|||
solver->setOptimizationDirection(invert(this->getOptimizationDirection())); |
|||
|
|||
// Create variables
|
|||
// TODO: Investigate whether we can easily make the variables bounded
|
|||
std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap; |
|||
for (auto const& stateChoicesPair : mec) { |
|||
std::string variableName = "x" + std::to_string(stateChoicesPair.first); |
|||
stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName); |
|||
} |
|||
storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>()); |
|||
solver->update(); |
|||
|
|||
// Add constraints.
|
|||
for (auto const& stateChoicesPair : mec) { |
|||
uint_fast64_t state = stateChoicesPair.first; |
|||
bool stateIsMarkovian = this->_markovianStates && this->_markovianStates->get(state); |
|||
|
|||
// Now create a suitable constraint for each choice
|
|||
// x_s {≤, ≥} -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
|
|||
for (auto choice : stateChoicesPair.second) { |
|||
std::vector<storm::expressions::Expression> summands; |
|||
auto matrixRow = this->_transitionMatrix.getRow(choice); |
|||
summands.reserve(matrixRow.getNumberOfEntries() + 2); |
|||
// add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
|
|||
if (stateIsMarkovian) { |
|||
summands.push_back(-(k / solver->getManager().rational((*this->_exitRates)[state]))); |
|||
} else if (!this->isContinuousTime()) { |
|||
summands.push_back(-k); |
|||
} |
|||
// add sum_s' P(s,act,s') * x_s'
|
|||
for (auto const& element : matrixRow) { |
|||
summands.push_back(stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue())); |
|||
} |
|||
// add value for state and choice
|
|||
ValueType value; |
|||
if (stateIsMarkovian) { |
|||
// divide state reward with exit rate
|
|||
value = stateRewardsGetter(state) / (*this->_exitRates)[state] + actionRewardsGetter(choice); |
|||
} else if (!this->isContinuousTime()) { |
|||
// in discrete time models no scaling is needed
|
|||
value = stateRewardsGetter(state) + actionRewardsGetter(choice); |
|||
} else { |
|||
// state is a probabilistic state of a Markov automaton. The state reward is not collected
|
|||
value = actionRewardsGetter(choice); |
|||
} |
|||
summands.push_back(solver->getConstant(value)); |
|||
storm::expressions::Expression constraint; |
|||
if (this->minimize()) { |
|||
constraint = stateToVariableMap.at(state) <= storm::expressions::sum(summands); |
|||
} else { |
|||
constraint = stateToVariableMap.at(state) >= storm::expressions::sum(summands); |
|||
} |
|||
solver->addConstraint("s" + std::to_string(state) + "," + std::to_string(choice), constraint); |
|||
} |
|||
} |
|||
|
|||
solver->optimize(); |
|||
STORM_LOG_THROW(!this->isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method."); |
|||
return solver->getContinuousValue(k); |
|||
} |
|||
|
|||
/*!
|
|||
* Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i.e., row) |
|||
* Transitions that lead to a Component state will be redirected to a new auxiliary state (there is one aux. state for each component). |
|||
* Transitions that don't lead to a Component state are copied (taking a state index mapping into account). |
|||
*/ |
|||
template <typename ValueType> |
|||
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfNonComponentStates, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) { |
|||
|
|||
// As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
|
|||
std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap; |
|||
|
|||
for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) { |
|||
if (!storm::utility::isZero(transition.getValue())) { |
|||
auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()]; |
|||
// Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
|
|||
// decide whether the transition leads to a component state or not
|
|||
if (sspTransitionTarget < numberOfNonComponentStates) { |
|||
// If the target state is not contained in a component, we can copy over the entry.
|
|||
sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue()); |
|||
} else { |
|||
// If the target state is contained in component i, we need to add the probability to the corresponding field in the vector
|
|||
// so that we are able to write the cumulative probability to the component into the matrix later.
|
|||
auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue()); |
|||
if (!insertionRes.second) { |
|||
// sspTransitionTarget already existed in the map, i.e., there already was a transition to that component.
|
|||
// Hence, we add up the probabilities.
|
|||
insertionRes.first->second += transition.getValue(); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Now insert all (cumulative) probability values that target a component.
|
|||
for (auto const& componentToProbEntry : auxiliaryStateToProbabilityMap) { |
|||
sspMatrixBuilder.addNextValue(currentSspChoice, componentToProbEntry.first, componentToProbEntry.second); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap) { |
|||
|
|||
auto const& choiceIndices = this->_transitionMatrix.getRowGroupIndices(); |
|||
|
|||
std::vector<ValueType> rhs; |
|||
uint64_t numberOfSspStates = numberOfNonComponentStates + this->_longRunComponentDecomposition->size(); |
|||
storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, true, true, numberOfSspStates); |
|||
// If the source state of a transition is not contained in any component, we copy its choices (and perform the necessary modifications).
|
|||
uint64_t currentSspChoice = 0; |
|||
for (auto const& nonComponentState : statesNotInComponent) { |
|||
sspMatrixBuilder.newRowGroup(currentSspChoice); |
|||
for (uint64_t choice = choiceIndices[nonComponentState]; choice < choiceIndices[nonComponentState + 1]; ++choice, ++currentSspChoice) { |
|||
rhs.push_back(storm::utility::zero<ValueType>()); |
|||
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder); |
|||
} |
|||
} |
|||
// Now we construct the choices for the auxiliary states which reflect former Component states.
|
|||
for (uint64_t componentIndex = 0; componentIndex < this->_longRunComponentDecomposition->size(); ++componentIndex) { |
|||
auto const& component = (*this->_longRunComponentDecomposition)[componentIndex]; |
|||
sspMatrixBuilder.newRowGroup(currentSspChoice); |
|||
// For nondeterministic models it might still be that we leave the component again. This needs to be reflected in the SSP
|
|||
// by adding the "exiting" choices of the MEC to the axiliary states
|
|||
for (auto const& element : component) { |
|||
uint64_t componentState = internal::getComponentElementState(element); |
|||
for (uint64_t choice = choiceIndices[componentState]; choice < choiceIndices[componentState + 1]; ++choice) { |
|||
// If the choice is not contained in the component itself, we have to add a similar distribution to the auxiliary state.
|
|||
if (!internal::componentElementChoicesContains(element, choice)) { |
|||
rhs.push_back(storm::utility::zero<ValueType>()); |
|||
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder); |
|||
if (sspComponentExitChoicesToOriginalMap) { |
|||
// Later we need to be able to map this choice back to the original input model
|
|||
sspComponentExitChoicesToOriginalMap->emplace_back(componentState, choice - choiceIndices[componentState]); |
|||
} |
|||
++currentSspChoice; |
|||
} |
|||
} |
|||
} |
|||
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the component.
|
|||
rhs.push_back(mecLraValues[componentIndex]); |
|||
if (sspComponentExitChoicesToOriginalMap) { |
|||
// Insert some invalid values so we can later detect that this choice is not an exit choice
|
|||
sspComponentExitChoicesToOriginalMap->emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max()); |
|||
} |
|||
++currentSspChoice; |
|||
} |
|||
return std::make_pair(sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates), std::move(rhs)); |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap) { |
|||
// We first take care of non-mec states
|
|||
storm::utility::vector::setVectorValues(this->_producedOptimalChoices.get(), statesNotInComponent, sspChoices); |
|||
// Secondly, we consider MEC states. There are 3 cases for each MEC state:
|
|||
// 1. The SSP choices encode that we want to stay in the MEC
|
|||
// 2. The SSP choices encode that we want to leave the MEC and
|
|||
// a) we take an exit (non-MEC) choice at the given state
|
|||
// b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
|
|||
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfNonComponentStates]; |
|||
for (auto const& mec : *this->_longRunComponentDecomposition) { |
|||
// Get the sspState of this MEC (using one representative mec state)
|
|||
auto const& sspState = inputToSspStateMap[mec.begin()->first]; |
|||
uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState]; |
|||
// Obtain the state and choice of the original model to which the selected choice corresponds.
|
|||
auto const& originalStateChoice = sspComponentExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset]; |
|||
// Check if we are in Case 1 or 2
|
|||
if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) { |
|||
// The optimal choice is to stay in this mec (Case 1)
|
|||
// In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
|
|||
STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC."); |
|||
} else { |
|||
// The best choice is to leave this MEC via the selected state and choice. (Case 2)
|
|||
// Set the exit choice (Case 2.a)
|
|||
this->_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second; |
|||
// The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
|
|||
// Perform a backwards search from the exit state, only using MEC choices
|
|||
// We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
|
|||
for (auto const& stateActions : mec) { |
|||
if (stateActions.first != originalStateChoice.first) { |
|||
this->_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max(); |
|||
} |
|||
} |
|||
// Ensure that backwards transitions are available
|
|||
if (this->_backwardTransitions == nullptr) { |
|||
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true)); |
|||
this->_backwardTransitions = this->_computedBackwardTransitions.get(); |
|||
} |
|||
// Now start a backwards DFS
|
|||
std::vector<uint64_t> stack = {originalStateChoice.first}; |
|||
while (!stack.empty()) { |
|||
uint64_t currentState = stack.back(); |
|||
stack.pop_back(); |
|||
for (auto const& backwardsTransition : this->_backwardTransitions->getRowGroup(currentState)) { |
|||
uint64_t predecessorState = backwardsTransition.getColumn(); |
|||
if (mec.containsState(predecessorState)) { |
|||
auto& selectedPredChoice = this->_producedOptimalChoices.get()[predecessorState]; |
|||
if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) { |
|||
// We don't already have a choice for this predecessor.
|
|||
// We now need to check whether there is a *MEC* choice leading to currentState
|
|||
for (auto const& predChoice : mec.getChoicesForState(predecessorState)) { |
|||
for (auto const& forwardTransition : this->_transitionMatrix.getRow(predChoice)) { |
|||
if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) { |
|||
// Playing this choice (infinitely often) will lead to current state (infinitely often)!
|
|||
selectedPredChoice = predChoice - this->_transitionMatrix.getRowGroupIndices()[predecessorState]; |
|||
stack.push_back(predecessorState); |
|||
break; |
|||
} |
|||
} |
|||
if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) { |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType> |
|||
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) { |
|||
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet."); |
|||
|
|||
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
|
|||
// which redirects all transitions leading to a former component state to a new auxiliary state.
|
|||
// There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
|
|||
|
|||
// First gather the states that are part of a component
|
|||
// and create a mapping from states that lie in a component to the corresponding component index.
|
|||
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount()); |
|||
std::vector<uint64_t> inputToSspStateMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max()); |
|||
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) { |
|||
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) { |
|||
uint64_t state = internal::getComponentElementState(element); |
|||
statesInComponents.set(state); |
|||
inputToSspStateMap[state] = currentComponentIndex; |
|||
} |
|||
} |
|||
// Now take care of the non-component states. Note that the order of these states will be preserved.
|
|||
uint64_t numberOfNonComponentStates = 0; |
|||
storm::storage::BitVector statesNotInComponent = ~statesInComponents; |
|||
for (auto const& nonComponentState : statesNotInComponent) { |
|||
inputToSspStateMap[nonComponentState] = numberOfNonComponentStates; |
|||
++numberOfNonComponentStates; |
|||
} |
|||
// Finalize the mapping for the component states which now still assigns component states to to their component index.
|
|||
// To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
|
|||
// number of states that are not in a component.
|
|||
for (auto const& mecState : statesInComponents) { |
|||
inputToSspStateMap[mecState] += numberOfNonComponentStates; |
|||
} |
|||
|
|||
// For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
|
|||
// corresponding choices in the original model.
|
|||
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspComponentExitChoicesToOriginalMap; |
|||
|
|||
// The next step is to create the SSP matrix and the right-hand side of the SSP.
|
|||
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, this->isProduceSchedulerSet() ? &sspComponentExitChoicesToOriginalMap : nullptr); |
|||
|
|||
// Set-up a solver
|
|||
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory; |
|||
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet()); |
|||
requirements.clearBounds(); |
|||
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked."); |
|||
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrixVector.first); |
|||
solver->setHasUniqueSolution(); |
|||
solver->setHasNoEndComponents(); |
|||
solver->setTrackScheduler(this->isProduceSchedulerSet()); |
|||
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end()); |
|||
solver->setLowerBound(*lowerUpperBounds.first); |
|||
solver->setUpperBound(*lowerUpperBounds.second); |
|||
solver->setRequirementsChecked(); |
|||
|
|||
// Solve the equation system
|
|||
std::vector<ValueType> x(sspMatrixVector.first.getRowGroupCount()); |
|||
solver->solveEquations(env, this->getOptimizationDirection(), x, sspMatrixVector.second); |
|||
|
|||
// Prepare scheduler (if requested)
|
|||
if (this->isProduceSchedulerSet() && solver->hasScheduler()) { |
|||
// Translate result for ssp matrix to original model
|
|||
constructOptimalChoices(solver->getSchedulerChoices(), sspMatrixVector.first, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, sspComponentExitChoicesToOriginalMap); |
|||
} else { |
|||
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated."); |
|||
} |
|||
|
|||
// Prepare result vector.
|
|||
// For efficiency reasons, we re-use the memory of our rhs for this!
|
|||
std::vector<ValueType> result = std::move(sspMatrixVector.second); |
|||
result.resize(this->_transitionMatrix.getRowGroupCount()); |
|||
result.shrink_to_fit(); |
|||
storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x); |
|||
return result; |
|||
} |
|||
|
|||
template class SparseNondeterministicInfiniteHorizonHelper<double>; |
|||
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>; |
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,101 @@ |
|||
#pragma once |
|||
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h" |
|||
|
|||
|
|||
namespace storm { |
|||
|
|||
namespace storage { |
|||
template <typename VT> class Scheduler; |
|||
} |
|||
|
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system. |
|||
* @tparam ValueType the type a value can have |
|||
*/ |
|||
template <typename ValueType> |
|||
class SparseNondeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, true> { |
|||
|
|||
public: |
|||
/*! |
|||
* Function mapping from indices to values |
|||
*/ |
|||
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter; |
|||
|
|||
/*! |
|||
* Initializes the helper for a discrete time model (i.e. MDP) |
|||
*/ |
|||
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix); |
|||
|
|||
/*! |
|||
* Initializes the helper for a continuous time model (i.e. MA) |
|||
*/ |
|||
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates); |
|||
|
|||
/*! |
|||
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled. |
|||
* @return the produced scheduler of the most recent call. |
|||
*/ |
|||
std::vector<uint64_t> const& getProducedOptimalChoices() const; |
|||
|
|||
/*! |
|||
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled. |
|||
* @return the produced scheduler of the most recent call. |
|||
*/ |
|||
std::vector<uint64_t>& getProducedOptimalChoices(); |
|||
|
|||
/*! |
|||
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled. |
|||
* @return a new scheduler containing optimal choices for each state that yield the long run average values of the most recent call. |
|||
*/ |
|||
storm::storage::Scheduler<ValueType> extractScheduler() const; |
|||
|
|||
/*! |
|||
* @param stateValuesGetter a function returning a value for a given state index |
|||
* @param actionValuesGetter a function returning a value for a given (global) choice index |
|||
* @return the (unique) optimal LRA value for the given component. |
|||
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected. |
|||
*/ |
|||
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component) override; |
|||
|
|||
protected: |
|||
|
|||
virtual void createDecomposition() override; |
|||
|
|||
std::pair<bool, ValueType> computeLraForTrivialMec(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec); |
|||
|
|||
/*! |
|||
* As computeLraForMec but uses value iteration as a solution method (independent of what is set in env) |
|||
*/ |
|||
ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec); |
|||
|
|||
/*! |
|||
* As computeLraForMec but uses linear programming as a solution method (independent of what is set in env) |
|||
* @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13 |
|||
*/ |
|||
ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec); |
|||
|
|||
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap); |
|||
|
|||
/*! |
|||
* @pre a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components. |
|||
* Translates optimal choices for MECS and SSP to the original model. |
|||
* @post getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values. |
|||
*/ |
|||
void constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap); |
|||
|
|||
/*! |
|||
* @pre if scheduler production is enabled a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components. |
|||
* @return Lra values for each state |
|||
* @post if scheduler production is enabled getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values. |
|||
*/ |
|||
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override; |
|||
|
|||
}; |
|||
|
|||
|
|||
} |
|||
} |
|||
} |
@ -0,0 +1,27 @@ |
|||
#pragma once |
|||
|
|||
#include "storm/storage/StronglyConnectedComponent.h" |
|||
#include "storm/storage/MaximalEndComponent.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
namespace internal { |
|||
|
|||
/// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models) |
|||
// BSCCS: |
|||
inline uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; } |
|||
inline uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model! |
|||
inline uint64_t const* getComponentElementChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; } |
|||
inline uint64_t const* getComponentElementChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; } |
|||
inline bool componentElementChoicesContains(typename storm::storage::StronglyConnectedComponent::value_type const& element, uint64_t choice) { return element == choice; } |
|||
// MECS: |
|||
inline uint64_t getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; } |
|||
inline uint64_t getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); } |
|||
inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); } |
|||
inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); } |
|||
inline bool componentElementChoicesContains(storm::storage::MaximalEndComponent::map_type::value_type const& element, uint64_t choice) { return element.second.contains(choice); } |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,491 @@ |
|||
#include "LraViHelper.h"
|
|||
|
|||
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
|
|||
|
|||
|
|||
#include "storm/storage/MaximalEndComponent.h"
|
|||
#include "storm/storage/StronglyConnectedComponent.h"
|
|||
|
|||
#include "storm/utility/graph.h"
|
|||
#include "storm/utility/vector.h"
|
|||
#include "storm/utility/macros.h"
|
|||
#include "storm/utility/SignalHandler.h"
|
|||
|
|||
#include "storm/environment/solver/SolverEnvironment.h"
|
|||
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
|
|||
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
|
|||
|
|||
|
|||
#include "storm/exceptions/UnmetRequirementException.h"
|
|||
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
namespace internal { |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs), _Tsx1IsCurrent(false) { |
|||
// Run through the component and collect some data:
|
|||
// We create two submodels, one consisting of the timed states of the component and one consisting of the instant states of the component.
|
|||
// For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
|
|||
std::map<uint64_t, uint64_t> toSubModelStateMapping; |
|||
// We also obtain state and choices counts of the two submodels
|
|||
uint64_t numTsSubModelStates(0), numTsSubModelChoices(0); |
|||
uint64_t numIsSubModelStates(0), numIsSubModelChoices(0); |
|||
// We will need to uniformize the timed MEC states by introducing a selfloop.
|
|||
// For this, we need to find a uniformization rate which will be a little higher (given by aperiodicFactor) than the maximum rate occurring in the component.
|
|||
_uniformizationRate = exitRates == nullptr ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); |
|||
// Now run over the MEC and collect the required data.
|
|||
for (auto const& element : _component) { |
|||
uint64_t componentState = getComponentElementState(element); |
|||
if (isTimedState(componentState)) { |
|||
toSubModelStateMapping.emplace(componentState, numTsSubModelStates); |
|||
++numTsSubModelStates; |
|||
numTsSubModelChoices += getComponentElementChoiceCount(element); |
|||
STORM_LOG_ASSERT(nondetTs() || getComponentElementChoiceCount(element) == 1, "Timed state has multiple choices but only a single choice was expected."); |
|||
if (exitRates) { |
|||
_uniformizationRate = std::max(_uniformizationRate, (*exitRates)[componentState]); |
|||
} |
|||
} else { |
|||
toSubModelStateMapping.emplace(componentState, numIsSubModelStates); |
|||
++numIsSubModelStates; |
|||
numIsSubModelChoices += getComponentElementChoiceCount(element); |
|||
STORM_LOG_ASSERT(nondetIs() || getComponentElementChoiceCount(element) == 1, "Instant state has multiple choices but only a single choice was expected."); |
|||
} |
|||
} |
|||
assert(numIsSubModelStates + numTsSubModelStates == _component.size()); |
|||
assert(_hasInstantStates || numIsSubModelStates == 0); |
|||
STORM_LOG_ASSERT(nondetTs() || numTsSubModelStates == numTsSubModelChoices, "Unexpected choice count of deterministic timed submodel."); |
|||
STORM_LOG_ASSERT(nondetIs() || numIsSubModelStates == numIsSubModelChoices, "Unexpected choice count of deterministic instant submodel."); |
|||
_hasInstantStates = _hasInstantStates && numIsSubModelStates > 0; |
|||
STORM_LOG_THROW(numTsSubModelStates > 0, storm::exceptions::InvalidOperationException, "Bottom Component has no timed states. Computation of Long Run Average values not supported. Is this a Markov Automaton with Zeno behavior?"); |
|||
|
|||
// We make sure that every timed state gets a selfloop to make the model aperiodic
|
|||
_uniformizationRate *= storm::utility::one<ValueType>() + aperiodicFactor; |
|||
|
|||
// Now build the timed and the instant submodels.
|
|||
// In addition, we also need the transitions between the two.
|
|||
storm::storage::SparseMatrixBuilder<ValueType> tsTransitionsBuilder(numTsSubModelChoices, numTsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0); |
|||
storm::storage::SparseMatrixBuilder<ValueType> tsToIsTransitionsBuilder, isTransitionsBuilder, isToTsTransitionsBuilder; |
|||
if (_hasInstantStates) { |
|||
tsToIsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numTsSubModelChoices, numIsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0); |
|||
isTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numIsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0); |
|||
isToTsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numTsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0); |
|||
_IsChoiceValues.reserve(numIsSubModelChoices); |
|||
} |
|||
ValueType uniformizationFactor = storm::utility::one<ValueType>() / _uniformizationRate; |
|||
uint64_t currTsRow = 0; |
|||
uint64_t currIsRow = 0; |
|||
for (auto const& element : _component) { |
|||
uint64_t componentState = getComponentElementState(element); |
|||
if (isTimedState(componentState)) { |
|||
// The currently processed state is timed.
|
|||
if (nondetTs()) { |
|||
tsTransitionsBuilder.newRowGroup(currTsRow); |
|||
if (_hasInstantStates) { |
|||
tsToIsTransitionsBuilder.newRowGroup(currTsRow); |
|||
} |
|||
} |
|||
// If there are exit rates, the uniformization factor needs to be updated.
|
|||
if (exitRates) { |
|||
uniformizationFactor = (*exitRates)[componentState] / _uniformizationRate; |
|||
} |
|||
// We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
|
|||
ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor; |
|||
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) { |
|||
tsTransitionsBuilder.addDiagonalEntry(currTsRow, selfLoopProb); |
|||
for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) { |
|||
uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()]; |
|||
if (isTimedState(entry.getColumn())) { |
|||
// We have a transition from a timed state to a timed state
|
|||
STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel"); |
|||
tsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue()); |
|||
} else { |
|||
// We have a transition from a timed to a instant state
|
|||
STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel"); |
|||
tsToIsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue()); |
|||
} |
|||
} |
|||
++currTsRow; |
|||
} |
|||
} else { |
|||
// The currently processed state is instant
|
|||
if (nondetIs()) { |
|||
isTransitionsBuilder.newRowGroup(currIsRow); |
|||
isToTsTransitionsBuilder.newRowGroup(currIsRow); |
|||
} |
|||
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) { |
|||
for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) { |
|||
uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()]; |
|||
if (isTimedState(entry.getColumn())) { |
|||
// We have a transition from an instant state to a timed state
|
|||
STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel"); |
|||
isToTsTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue()); |
|||
} else { |
|||
// We have a transition from an instant to an instant state
|
|||
STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel"); |
|||
isTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue()); |
|||
} |
|||
} |
|||
++currIsRow; |
|||
} |
|||
} |
|||
} |
|||
_TsTransitions = tsTransitionsBuilder.build(); |
|||
if (_hasInstantStates) { |
|||
_TsToIsTransitions = tsToIsTransitionsBuilder.build(); |
|||
_IsTransitions = isTransitionsBuilder.build(); |
|||
_IsToTsTransitions = isToTsTransitionsBuilder.build(); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) { |
|||
initializeNewValues(stateValueGetter, actionValueGetter, exitRates); |
|||
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()); |
|||
bool relative = env.solver().lra().getRelativeTerminationCriterion(); |
|||
boost::optional<uint64_t> maxIter; |
|||
if (env.solver().lra().isMaximalIterationCountSet()) { |
|||
maxIter = env.solver().lra().getMaximalIterationCount(); |
|||
} |
|||
|
|||
// start the iterations
|
|||
ValueType result = storm::utility::zero<ValueType>(); |
|||
uint64_t iter = 0; |
|||
while (!maxIter.is_initialized() || iter < maxIter.get()) { |
|||
++iter; |
|||
performIterationStep(env, dir); |
|||
|
|||
// Check if we are done
|
|||
auto convergenceCheckResult = checkConvergence(relative, precision); |
|||
result = convergenceCheckResult.currentValue; |
|||
if (convergenceCheckResult.isPrecisionAchieved) { |
|||
break; |
|||
} |
|||
if (storm::utility::resources::isTerminate()) { |
|||
break; |
|||
} |
|||
// If there will be a next iteration, we have to prepare it.
|
|||
prepareNextIteration(env); |
|||
|
|||
} |
|||
if (maxIter.is_initialized() && iter == maxIter.get()) { |
|||
STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations."); |
|||
} else if (storm::utility::resources::isTerminate()) { |
|||
STORM_LOG_WARN("LRA computation aborted after " << iter << " iterations."); |
|||
} else { |
|||
STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations."); |
|||
} |
|||
|
|||
if (choices) { |
|||
// We will be doing one more iteration step and track scheduler choices this time.
|
|||
prepareNextIteration(env); |
|||
performIterationStep(env, dir, choices); |
|||
} |
|||
return result; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
void LraViHelper<ValueType, ComponentType, TransitionsType>::initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates) { |
|||
// clear potential old values and reserve enough space for new values
|
|||
_TsChoiceValues.clear(); |
|||
_TsChoiceValues.reserve(_TsTransitions.getRowCount()); |
|||
if (_hasInstantStates) { |
|||
_IsChoiceValues.clear(); |
|||
_IsChoiceValues.reserve(_IsTransitions.getRowCount()); |
|||
} |
|||
|
|||
// Set the new choice-based values
|
|||
ValueType actionRewardScalingFactor = storm::utility::one<ValueType>() / _uniformizationRate; |
|||
for (auto const& element : _component) { |
|||
uint64_t componentState = getComponentElementState(element); |
|||
if (isTimedState(componentState)) { |
|||
if (exitRates) { |
|||
actionRewardScalingFactor = (*exitRates)[componentState] / _uniformizationRate; |
|||
} |
|||
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) { |
|||
// Compute the values obtained for this choice.
|
|||
_TsChoiceValues.push_back(stateValueGetter(componentState) / _uniformizationRate + actionValueGetter(*componentChoiceIt) * actionRewardScalingFactor); |
|||
} |
|||
} else { |
|||
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) { |
|||
// Compute the values obtained for this choice.
|
|||
// State values do not count here since no time passes in instant states.
|
|||
_IsChoiceValues.push_back(actionValueGetter(*componentChoiceIt)); |
|||
} |
|||
} |
|||
} |
|||
|
|||
// Set-up new iteration vectors for timed states
|
|||
_Tsx1.assign(_TsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>()); |
|||
_Tsx2 = _Tsx1; |
|||
|
|||
if (_hasInstantStates) { |
|||
// Set-up vectors for storing intermediate results for instant states.
|
|||
_Isx.resize(_IsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>()); |
|||
_Isb = _IsChoiceValues; |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareSolversAndMultipliers(const Environment& env, storm::solver::OptimizationDirection const* dir) { |
|||
_TsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsTransitions); |
|||
if (_hasInstantStates) { |
|||
if (_IsTransitions.getNonzeroEntryCount() > 0) { |
|||
// Set-up a solver for transitions within instant states
|
|||
_IsSolverEnv = std::make_unique<storm::Environment>(env); |
|||
if (env.solver().isForceSoundness()) { |
|||
// To get correct results, the inner equation systems are solved exactly.
|
|||
// TODO investigate how an error would propagate
|
|||
_IsSolverEnv->solver().setForceExact(true); |
|||
} |
|||
bool isAcyclic = !storm::utility::graph::hasCycle(_IsTransitions); |
|||
if (isAcyclic) { |
|||
STORM_LOG_INFO("Instant transitions are acyclic."); |
|||
if (_IsSolverEnv->solver().minMax().isMethodSetFromDefault()) { |
|||
_IsSolverEnv->solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic); |
|||
} |
|||
if (_IsSolverEnv->solver().isLinearEquationSolverTypeSetFromDefaultValue()) { |
|||
_IsSolverEnv->solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Acyclic); |
|||
} |
|||
} |
|||
if (nondetIs()) { |
|||
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> factory; |
|||
_NondetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions); |
|||
_NondetIsSolver->setHasUniqueSolution(true); // Assume non-zeno MA
|
|||
_NondetIsSolver->setHasNoEndComponents(true); // assume non-zeno MA
|
|||
_NondetIsSolver->setCachingEnabled(true); |
|||
auto req = _NondetIsSolver->getRequirements(*_IsSolverEnv, *dir); |
|||
req.clearUniqueSolution(); |
|||
if (isAcyclic) { |
|||
req.clearAcyclic(); |
|||
} |
|||
// Computing a priori lower/upper bounds is not particularly easy, as there might be selfloops with high probabilities
|
|||
// Which accumulate a lot of reward. Moreover, the right-hand-side of the equation system changes dynamically.
|
|||
STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared."); |
|||
_NondetIsSolver->setRequirementsChecked(true); |
|||
} else { |
|||
storm::solver::GeneralLinearEquationSolverFactory<ValueType> factory; |
|||
if (factory.getEquationProblemFormat(*_IsSolverEnv) != storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem) { |
|||
// We need to convert the transition matrix connecting instant states
|
|||
// TODO: This could have been done already during construction of the matrix.
|
|||
// Insert diagonal entries.
|
|||
storm::storage::SparseMatrix<ValueType> converted(_IsTransitions, true); |
|||
// Compute A' = 1-A
|
|||
converted.convertToEquationSystem(); |
|||
STORM_LOG_WARN("The selected equation solver requires to create a temporary " << converted.getDimensionsAsString()); |
|||
// Note that the solver has ownership of the converted matrix.
|
|||
_DetIsSolver = factory.create(*_IsSolverEnv, std::move(converted)); |
|||
} else { |
|||
_DetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions); |
|||
} |
|||
_DetIsSolver->setCachingEnabled(true); |
|||
auto req = _DetIsSolver->getRequirements(*_IsSolverEnv); |
|||
if (isAcyclic) { |
|||
req.clearAcyclic(); |
|||
} |
|||
// A priori lower/upper bounds are hard (see MinMax version of this above)
|
|||
STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared."); |
|||
} |
|||
} |
|||
|
|||
// Set up multipliers for transitions connecting timed and instant states
|
|||
_TsToIsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsToIsTransitions); |
|||
_IsToTsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _IsToTsTransitions); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
void LraViHelper<ValueType, ComponentType, TransitionsType>::setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToTimedStates, bool setChoiceZeroToInstantStates) const { |
|||
// Transform the local choices (within this mec) to choice indices for the input model
|
|||
uint64_t localState = 0; |
|||
for (auto const& element : _component) { |
|||
uint64_t elementState = getComponentElementState(element); |
|||
if ((setChoiceZeroToTimedStates && isTimedState(elementState)) || (setChoiceZeroToInstantStates && !isTimedState(elementState))) { |
|||
choices[elementState] = 0; |
|||
} else { |
|||
uint64_t choice = localMecChoices[localState]; |
|||
STORM_LOG_ASSERT(choice < getComponentElementChoiceCount(element), "The selected choice does not seem to exist."); |
|||
uint64_t globalChoiceIndex = *(getComponentElementChoicesBegin(element) + choice); |
|||
choices[elementState] = globalChoiceIndex - _transitionMatrix.getRowGroupIndices()[elementState]; |
|||
++localState; |
|||
} |
|||
} |
|||
STORM_LOG_ASSERT(localState == localMecChoices.size(), "Did not traverse all component states."); |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
void LraViHelper<ValueType, ComponentType, TransitionsType>::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) { |
|||
STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism"); |
|||
// Initialize value vectors, multiplers, and solver if this has not been done, yet
|
|||
if (!_TsMultiplier) { |
|||
prepareSolversAndMultipliers(env, dir); |
|||
} |
|||
|
|||
// Compute new x values for the timed states
|
|||
// Flip what is new and what is old
|
|||
_Tsx1IsCurrent = !_Tsx1IsCurrent; |
|||
// At this point, xOld() points to what has been computed in the most recent call of performIterationStep (initially, this is the 0-vector).
|
|||
// The result of this ongoing computation will be stored in xNew()
|
|||
|
|||
// Compute the values obtained by a single uniformization step between timed states only
|
|||
if (nondetTs()) { |
|||
if (choices == nullptr) { |
|||
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew()); |
|||
} else { |
|||
// Also keep track of the choices made.
|
|||
std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount()); |
|||
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices); |
|||
// Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs)
|
|||
// Hence, in this branch we don't have to care for choices at instant states.
|
|||
STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states."); |
|||
setInputModelChoices(*choices, tsChoices); |
|||
} |
|||
} else { |
|||
_TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew()); |
|||
} |
|||
if (_hasInstantStates) { |
|||
// Add the values obtained by taking a single uniformization step that leads to an instant state followed by arbitrarily many instant steps.
|
|||
// First compute the total values when taking arbitrarily many instant transitions (in no time)
|
|||
if (_NondetIsSolver) { |
|||
// We might need to track the optimal choices.
|
|||
if (choices == nullptr) { |
|||
_NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb); |
|||
} else { |
|||
_NondetIsSolver->setTrackScheduler(); |
|||
_NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb); |
|||
setInputModelChoices(*choices, _NondetIsSolver->getSchedulerChoices(), true); |
|||
} |
|||
} else if (_DetIsSolver) { |
|||
_DetIsSolver->solveEquations(*_IsSolverEnv, _Isx, _Isb); |
|||
} else { |
|||
STORM_LOG_ASSERT(_IsTransitions.getNonzeroEntryCount() == 0, "If no solver was initialized, an empty matrix would have been expected."); |
|||
if (nondetIs()) { |
|||
if (choices == nullptr) { |
|||
storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices()); |
|||
} else { |
|||
std::vector<uint64_t> psChoices(_IsTransitions.getRowGroupCount()); |
|||
storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices(), &psChoices); |
|||
setInputModelChoices(*choices, psChoices, true); |
|||
} |
|||
} else { |
|||
// For deterministic instant states, there is nothing to reduce, i.e., we could just set _Isx = _Isb.
|
|||
// For efficiency reasons, we do a swap instead:
|
|||
_Isx.swap(_Isb); |
|||
// Note that at this point we have changed the contents of _Isb, but they will be overwritten anyway.
|
|||
if (choices) { |
|||
// Set choice 0 to all states.
|
|||
setInputModelChoices(*choices, {}, true, true); |
|||
} |
|||
} |
|||
} |
|||
// Now add the (weighted) values of the instant states to the values of the timed states.
|
|||
_TsToIsMultiplier->multiply(env, _Isx, &xNew(), xNew()); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
typename LraViHelper<ValueType, ComponentType, TransitionsType>::ConvergenceCheckResult LraViHelper<ValueType, ComponentType, TransitionsType>::checkConvergence(bool relative, ValueType precision) const { |
|||
STORM_LOG_ASSERT(_TsMultiplier, "tried to check for convergence without doing an iteration first."); |
|||
// All values are scaled according to the uniformizationRate.
|
|||
// We need to 'revert' this scaling when computing the absolute precision.
|
|||
// However, for relative precision, the scaling cancels out.
|
|||
ValueType threshold = relative ? precision : ValueType(precision / _uniformizationRate); |
|||
|
|||
ConvergenceCheckResult res = { true, storm::utility::one<ValueType>() }; |
|||
// Now check whether the currently produced results are precise enough
|
|||
STORM_LOG_ASSERT(threshold > storm::utility::zero<ValueType>(), "Did not expect a non-positive threshold."); |
|||
auto x1It = xOld().begin(); |
|||
auto x1Ite = xOld().end(); |
|||
auto x2It = xNew().begin(); |
|||
ValueType maxDiff = (*x2It - *x1It); |
|||
ValueType minDiff = maxDiff; |
|||
// The difference between maxDiff and minDiff is zero at this point. Thus, it doesn't make sense to check the threshold now.
|
|||
for (++x1It, ++x2It; x1It != x1Ite; ++x1It, ++x2It) { |
|||
ValueType diff = (*x2It - *x1It); |
|||
// Potentially update maxDiff or minDiff
|
|||
bool skipCheck = false; |
|||
if (maxDiff < diff) { |
|||
maxDiff = diff; |
|||
} else if (minDiff > diff) { |
|||
minDiff = diff; |
|||
} else { |
|||
skipCheck = true; |
|||
} |
|||
// Check convergence
|
|||
if (!skipCheck && (maxDiff - minDiff) > (relative ? (threshold * minDiff) : threshold)) { |
|||
res.isPrecisionAchieved = false; |
|||
break; |
|||
} |
|||
} |
|||
|
|||
// Compute the average of the maximal and the minimal difference.
|
|||
ValueType avgDiff = (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0)); |
|||
|
|||
// "Undo" the scaling of the values
|
|||
res.currentValue = avgDiff * _uniformizationRate; |
|||
return res; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareNextIteration(Environment const& env) { |
|||
// To avoid large (and numerically unstable) x-values, we substract a reference value.
|
|||
ValueType referenceValue = xNew().front(); |
|||
storm::utility::vector::applyPointwise<ValueType, ValueType>(xNew(), xNew(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; }); |
|||
if (_hasInstantStates) { |
|||
// Update the RHS of the equation system for the instant states by taking the new values of timed states into account.
|
|||
STORM_LOG_ASSERT(!nondetTs(), "Nondeterministic timed states not expected when there are also instant states."); |
|||
_IsToTsMultiplier->multiply(env, xNew(), &_IsChoiceValues, _Isb); |
|||
} |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
bool LraViHelper<ValueType, ComponentType, TransitionsType>::isTimedState(uint64_t const& inputModelStateIndex) const { |
|||
STORM_LOG_ASSERT(!_hasInstantStates || _timedStates != nullptr, "Model has instant states but no partition into timed and instant states is given."); |
|||
STORM_LOG_ASSERT(!_hasInstantStates || inputModelStateIndex < _timedStates->size(), "Unable to determine whether state " << inputModelStateIndex << " is timed."); |
|||
return !_hasInstantStates || _timedStates->get(inputModelStateIndex); |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() { |
|||
return _Tsx1IsCurrent ? _Tsx1 : _Tsx2; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() const { |
|||
return _Tsx1IsCurrent ? _Tsx1 : _Tsx2; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() { |
|||
return _Tsx1IsCurrent ? _Tsx2 : _Tsx1; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() const { |
|||
return _Tsx1IsCurrent ? _Tsx2 : _Tsx1; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetTs() const { |
|||
return TransitionsType == LraViTransitionsType::NondetTsNoIs; |
|||
} |
|||
|
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetIs() const { |
|||
return TransitionsType == LraViTransitionsType::DetTsNondetIs; |
|||
} |
|||
|
|||
template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>; |
|||
template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>; |
|||
template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>; |
|||
template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>; |
|||
|
|||
template class LraViHelper<double, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>; |
|||
template class LraViHelper<storm::RationalNumber, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>; |
|||
|
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,149 @@ |
|||
#pragma once |
|||
|
|||
|
|||
#include "storm/storage/SparseMatrix.h" |
|||
#include "storm/solver/LinearEquationSolver.h" |
|||
#include "storm/solver/MinMaxLinearEquationSolver.h" |
|||
#include "storm/solver/Multiplier.h" |
|||
|
|||
namespace storm { |
|||
class Environment; |
|||
|
|||
|
|||
namespace modelchecker { |
|||
namespace helper { |
|||
namespace internal { |
|||
|
|||
/*! |
|||
* Specifies differnt kinds of transition types with which this helper can be used |
|||
* Ts means timed states (cf. Markovian states in a Markov Automaton) and Is means instant states (cf. probabilistic states in a Markov automaton). |
|||
* The way to think about this is that time can only pass in a timed state, whereas transitions emerging from an instant state fire immediately |
|||
* In an MDP, all states are seen as timed. |
|||
* In this enum, we also specify whether there can be a nondeterministic choice at the corresponding states or not. |
|||
*/ |
|||
enum class LraViTransitionsType { |
|||
DetTsNoIs, /// deterministic choice at timed states, no instant states (as in DTMCs and CTMCs) |
|||
DetTsNondetIs, /// deterministic choice at timed states, nondeterministic choice at instant states (as in Markov Automata) |
|||
DetTsDetIs, /// deterministic choice at timed states, deterministic choice at instant states (as in Markov Automata without any nondeterminisim) |
|||
NondetTsNoIs /// nondeterministic choice at timed states, no instant states (as in MDPs) |
|||
}; |
|||
|
|||
/*! |
|||
* Helper class that performs iterations of the value iteration method. |
|||
* The purpose of the template parameters ComponentType and TransitionsType are used to make this work for various model types. |
|||
* |
|||
* @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10 |
|||
* @see Butkova, Wimmer, Hermanns: Long-Run Rewards for Markov Automata (TACAS'17), https://doi.org/10.1007/978-3-662-54580-5_11 |
|||
* |
|||
* @tparam ValueType The type of a value |
|||
* @tparam ComponentType The type of a 'bottom component' of the model (e.g. a BSCC (for purely deterministic models) or a MEC (for models with potential nondeterminism). |
|||
* @tparam TransitionsType The kind of transitions that occur. |
|||
*/ |
|||
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> |
|||
class LraViHelper { |
|||
public: |
|||
/// Function mapping from indices to values |
|||
typedef std::function<ValueType(uint64_t)> ValueGetter; |
|||
|
|||
/*! |
|||
* Initializes a new VI helper for the provided MEC or BSCC |
|||
* @param component the MEC or BSCC |
|||
* @param transitionMatrix The transition matrix of the input model |
|||
* @param aperiodicFactor a non-zero factor that is used for making the MEC aperiodic (by adding selfloops to each state) |
|||
* @param timedStates States in which time can pass (Markovian states in a Markov automaton). If nullptr, it is assumed that all states are timed states |
|||
* @param exitRates The exit rates of the timed states (relevant for continuous time models). If nullptr, all rates are assumed to be 1 (which corresponds to a discrete time model) |
|||
* @note All indices and vectors must be w.r.t. the states as described by the provided transition matrix |
|||
*/ |
|||
LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates = nullptr, std::vector<ValueType> const* exitRates = nullptr); |
|||
|
|||
/*! |
|||
* Performs value iteration with the given state- and action values. |
|||
* @param env The environment, containing information on the precision of this computation. |
|||
* @param stateValueGetter function that returns for each state index (w.r.t. the input transition matrix) the reward for staying in state. Will only be called for timed states. |
|||
* @param actionValueGetter function that returns for each global choice index (w.r.t. the input transition matrix) the reward for taking that choice |
|||
* @param exitRates (as in the constructor) |
|||
* @param dir Optimization direction. Must be not nullptr in case of nondeterminism |
|||
* @param choices if not nullptr, the optimal choices will be inserted in this vector. The vector's size must then be equal to the number of row groups of the input transition matrix. |
|||
* @return The (optimal) long run average value of the specified component. |
|||
* @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching. |
|||
*/ |
|||
ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr); |
|||
|
|||
private: |
|||
|
|||
/*! |
|||
* Initializes the value iterations with the provided values. |
|||
* Resets all information from potential previous calls. |
|||
* Must be called before the first call to performIterationStep. |
|||
* @param stateValueGetter Function that returns for each state index (w.r.t. the input transitions) the value (e.g. reward) for that state |
|||
* @param stateValueGetter Function that returns for each global choice index (w.r.t. the input transitions) the value (e.g. reward) for that choice |
|||
*/ |
|||
void initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr); |
|||
|
|||
/*! |
|||
* Performs a single iteration step. |
|||
* @param env The environment. |
|||
* @param dir The optimization direction. Has to be given if there is nondeterminism (otherwise it will be ignored) |
|||
* @param choices If given, the optimal choices will be inserted at the appropriate states. |
|||
* Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model |
|||
* @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this. |
|||
*/ |
|||
void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr); |
|||
|
|||
struct ConvergenceCheckResult { |
|||
bool isPrecisionAchieved; |
|||
ValueType currentValue; |
|||
}; |
|||
|
|||
/*! |
|||
* Checks whether the curently computed value achieves the desired precision |
|||
*/ |
|||
ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) const; |
|||
|
|||
/*! |
|||
* Must be called between two calls of performIterationStep. |
|||
*/ |
|||
void prepareNextIteration(Environment const& env); |
|||
|
|||
/// Prepares the necessary solvers and multipliers for doing the iterations. |
|||
void prepareSolversAndMultipliers(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr); |
|||
|
|||
void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const; |
|||
|
|||
/// Returns true iff the given state is a timed state |
|||
bool isTimedState(uint64_t const& inputModelStateIndex) const; |
|||
|
|||
/// The result for timed states of the most recent iteration |
|||
std::vector<ValueType>& xNew(); |
|||
std::vector<ValueType> const& xNew() const; |
|||
|
|||
/// The result for timed states of the previous iteration |
|||
std::vector<ValueType>& xOld(); |
|||
std::vector<ValueType> const& xOld() const; |
|||
|
|||
/// @return true iff there potentially is a nondeterministic choice at timed states |
|||
bool nondetTs() const; |
|||
|
|||
/// @return true iff there potentially is a nondeterministic choice at instant states. Returns false if there are no instant states. |
|||
bool nondetIs() const; |
|||
|
|||
|
|||
|
|||
ComponentType const& _component; |
|||
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix; |
|||
storm::storage::BitVector const* _timedStates; // e.g. Markovian states of a Markov automaton. |
|||
bool _hasInstantStates; |
|||
ValueType _uniformizationRate; |
|||
storm::storage::SparseMatrix<ValueType> _TsTransitions, _TsToIsTransitions, _IsTransitions, _IsToTsTransitions; |
|||
std::vector<ValueType> _Tsx1, _Tsx2, _TsChoiceValues; |
|||
bool _Tsx1IsCurrent; |
|||
std::vector<ValueType> _Isx, _Isb, _IsChoiceValues; |
|||
std::unique_ptr<storm::solver::Multiplier<ValueType>> _TsMultiplier, _TsToIsMultiplier, _IsToTsMultiplier; |
|||
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> _NondetIsSolver; |
|||
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> _DetIsSolver; |
|||
std::unique_ptr<storm::Environment> _IsSolverEnv; |
|||
}; |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,46 @@ |
|||
#pragma once |
|||
|
|||
#include "storm/modelchecker/CheckTask.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Forwards relevant information stored in the given CheckTask to the given helper |
|||
*/ |
|||
template <typename HelperType, typename FormulaType, typename ModelType> |
|||
void setInformationFromCheckTaskNondeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) { |
|||
// Relevancy of initial states. |
|||
if (checkTask.isOnlyInitialStatesRelevantSet()) { |
|||
helper.setRelevantStates(model.getInitialStates()); |
|||
} |
|||
// Value threshold to which the result will be compared |
|||
if (checkTask.isBoundSet()) { |
|||
helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold()); |
|||
} |
|||
// Optimization direction |
|||
if (checkTask.isOptimizationDirectionSet()) { |
|||
helper.setOptimizationDirection(checkTask.getOptimizationDirection()); |
|||
} |
|||
// Scheduler Production |
|||
helper.setProduceScheduler(checkTask.isProduceSchedulersSet()); |
|||
} |
|||
|
|||
/*! |
|||
* Forwards relevant information stored in the given CheckTask to the given helper |
|||
*/ |
|||
template <typename HelperType, typename FormulaType, typename ModelType> |
|||
void setInformationFromCheckTaskDeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) { |
|||
// Relevancy of initial states. |
|||
if (checkTask.isOnlyInitialStatesRelevantSet()) { |
|||
helper.setRelevantStates(model.getInitialStates()); |
|||
} |
|||
// Value threshold to which the result will be compared |
|||
if (checkTask.isBoundSet()) { |
|||
helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold()); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,46 @@ |
|||
#pragma once |
|||
|
|||
#include "storm/modelchecker/CheckTask.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace helper { |
|||
|
|||
/*! |
|||
* Forwards relevant information stored in another helper to the given helper |
|||
*/ |
|||
template <typename TargetHelperType, typename SourceHelperType> |
|||
void setInformationFromOtherHelperNondeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) { |
|||
// Relevancy of initial states. |
|||
if (sourceHelperType.hasRelevantStates()) { |
|||
targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates())); |
|||
} |
|||
// Value threshold to which the result will be compared |
|||
if (sourceHelperType.isValueThresholdSet()) { |
|||
targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue())); |
|||
} |
|||
// Optimization direction |
|||
if (sourceHelperType.isOptimizationDirectionSet()) { |
|||
targetHelper.setOptimizationDirection(sourceHelperType.getOptimizationDirection()); |
|||
} |
|||
// Scheduler Production |
|||
targetHelper.setProduceScheduler(sourceHelperType.isProduceSchedulerSet()); |
|||
} |
|||
|
|||
/*! |
|||
* Forwards relevant information stored in another helper to the given helper |
|||
*/ |
|||
template <typename TargetHelperType, typename SourceHelperType> |
|||
void setInformationFromOtherHelperDeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) { |
|||
// Relevancy of initial states. |
|||
if (sourceHelperType.hasRelevantStates()) { |
|||
targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates())); |
|||
} |
|||
// Value threshold to which the result will be compared |
|||
if (sourceHelperType.isValueThresholdSet()) { |
|||
targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue())); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
} |
Reference in new issue
xxxxxxxxxx