Browse Source

Towards using the infinite horizon helpers also for deterministic models.

tempestpy_adaptions
TimQu 4 years ago
parent
commit
485d75f466
  1. 282
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
  2. 75
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

282
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp

@ -1,7 +1,14 @@
#include "SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
@ -20,38 +27,38 @@ namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
template <typename ValueType, bool Nondeterministic>
SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
// Intentionally left empty.
}
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
template <typename ValueType, bool Nondeterministic>
SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
// Intentionally left empty.
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or set before.");
template <typename ValueType, bool Nondeterministic>
void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
_backwardTransitions = &backwardTransitions;
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition) {
STORM_LOG_WARN_COND(_mecDecomposition == nullptr, "Backwards transitions were provided but they were already computed or set before.");
_mecDecomposition = &mecDecomposition;
template <typename ValueType, bool Nondeterministic>
void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponent> const& decomposition) {
STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
_longRunComponentDecomposition = &decomposition;
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
return computeLongRunAverageValues(env,
[&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
[] (uint64_t) { return storm::utility::zero<ValueType>(); }
);
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
if (rewardModel.hasStateRewards()) {
stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
@ -72,8 +79,8 @@ namespace storm {
return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
std::function<ValueType(uint64_t stateIndex)> stateValuesGetter;
if (stateValues) {
stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
@ -91,8 +98,8 @@ namespace storm {
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
// We will compute the long run average value for each MEC individually and then set-up a MinMax Equation system to compute the value also at non-mec states.
// For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
@ -108,51 +115,58 @@ namespace storm {
}
// If requested, allocate memory for the choices made
if (this->isProduceSchedulerSet()) {
if (Nondeterministic && this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
}
STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
// Start by decomposing the Model into its MECs.
if (_mecDecomposition == nullptr) {
if (_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = _transitionMatrix.transpose(true);
_backwardTransitions = &_computedBackwardTransitions;
if (Nondeterministic) {
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix>(_transitionMatrix.transpose(true));
_backwardTransitions = _computedBackwardTransitions.get();
}
_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
} else {
_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>(_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
}
_computedMecDecomposition = storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
_mecDecomposition = &_computedMecDecomposition;
_longRunComponentDecomposition = _computedLongRunComponentDecomposition.get();
}
// Compute the long-run average for all end components in isolation.
std::vector<ValueType> mecLraValues;
mecLraValues.reserve(_mecDecomposition->size());
for (auto const& mec : *_mecDecomposition) {
mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, mec));
// Compute the long-run average for all components in isolation.
std::vector<ValueType> componentLraValues;
mecLraValues.reserve(_longRunComponentDecomposition->size());
for (auto const& c : *_longRunComponentDecomposition) {
componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
}
// Solve the resulting SSP where end components are collapsed into single auxiliary states
return buildAndSolveSsp(underlyingSolverEnvironment, mecLraValues);
return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
}
template <typename ValueType>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
template <typename ValueType, bool Nondeterministic>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() const {
STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
}
template <typename ValueType>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
template <typename ValueType, bool Nondeterministic>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() {
STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
}
template <typename ValueType>
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
template <typename ValueType, bool Nondeterministic>
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::extractScheduler() const {
auto const& optimalChoices = getProducedOptimalChoices();
storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
@ -161,43 +175,46 @@ namespace storm {
return scheduler;
}
template <typename ValueType>
bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) == (_exitRates == nullptr), "Inconsistent information given: Have Markovian states but no exit rates (or vice versa)." );
return _markovianStates != nullptr;
template <typename ValueType, bool Nondeterministic>
bool SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
return _exitRates != nullptr;
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
template <typename ValueType, bool Nondeterministic>
template < typename = typename std::enable_if< !Nondeterministic >::type >
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
// For deterministic models, we compute the LRA for a BSCC
// If the mec only consists of a single state, we compute the LRA value directly
if (mec.size() == 1) {
uint64_t state = mec.begin()->first;
auto choiceIt = mec.begin()->second.begin();
if (isContinuousTime()) {
// Singleton MECs have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
STORM_LOG_THROW(_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has Nondeterministic behavior.");
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = 0;
}
return stateRewardsGetter(state) + (*_exitRates)[state] * actionRewardsGetter(*choiceIt);
} else {
// Find the choice with the highest/lowest reward
ValueType bestValue = actionRewardsGetter(*choiceIt);
uint64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
ValueType currentValue = actionRewardsGetter(*choiceIt);
if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
bestValue = std::move(currentValue);
bestChoice = *choiceIt;
}
}
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
}
return bestValue + stateRewardsGetter(state);
STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
// Solve nontrivial BSCC with the method specified in the settings
// TODO
}
template <typename ValueType, bool Nondeterministic>
template < typename = typename std::enable_if< Nondeterministic >::type >
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
// For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
// Allocate memory for the nondeterministic choices.
if (this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
}
auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
// Solve nontrivial MEC with the method specified in the settings
@ -219,8 +236,64 @@ namespace storm {
}
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
template <typename ValueType, bool Nondeterministic>
std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForTrivialComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
// If the component only consists of a single state, we compute the LRA value directly
if (component.size() == 1) {
auto element const& = *component.begin();
uint64_t state = internal::getComponentElementState(element);
auto choiceIt = internal::getComponentChoicesBegin(element);
if (Nondeterministic && !isContinuousTime()) {
// This is an MDP.
// Find the choice with the highest/lowest reward
ValueType bestValue = actionRewardsGetter(*choiceIt);
uint64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != getComponentChoicesEnd(element); ++choiceIt) {
ValueType currentValue = actionRewardsGetter(*choiceIt);
if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
bestValue = std::move(currentValue);
bestChoice = *choiceIt;
}
}
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
}
bestValue += stateRewardsGetter(state);
return {true, bestValue};
} else {
// In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
STORM_LOG_THROW(!Nondeterministic || (_markovianStates != nullptr && _markovianStates->get(state)), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
if (Nondeterministic && this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = 0;
}
ValueType result = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
return {true, result};
}
} else if (!Nondeterministic) {
// For deterministic models, we can also easily catch the case where all values are the same
bool first = true;
ValueType val = storm::utility::zero<ValueType>();
for (auto const& element : component) {
auto state = getComponentElementState(element);
STORM_LOG_ASSERT(state == *getComponentChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
ValueType curr = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
if (first) {
first = false;
} else if (val != curr) {
return {false, storm::utility::zero<ValueType>()};
}
}
// All values are the same
return {true, val};
} else {
return {false, storm::utility::zero<ValueType>()};
}
}
template <typename ValueType, bool Nondeterministic>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Collect some parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
@ -241,8 +314,8 @@ namespace storm {
}
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
template <typename ValueType, bool Nondeterministic>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Create an LP solver
auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
@ -311,11 +384,11 @@ namespace storm {
/*!
* Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i.e., row)
* Transitions that lead to a MEC state will be redirected to a new auxiliary state (there is one aux. state for each MEC).
* Transitions that don't lead to a MEC state are copied (taking a state index mapping into account).
* Transitions that lead to a Component state will be redirected to a new auxiliary state (there is one aux. state for each component).
* Transitions that don't lead to a Component state are copied (taking a state index mapping into account).
*/
template <typename ValueType>
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInMecs, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInComponents, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
// As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
@ -323,17 +396,17 @@ namespace storm {
for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
if (!storm::utility::isZero(transition.getValue())) {
auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
// Since the auxiliary MEC states are appended at the end of the matrix, we can use this check to
// decide whether the transition leads to a MEC state or not
// Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
// decide whether the transition leads to a component state or not
if (sspTransitionTarget < numberOfStatesNotInMecs) {
// If the target state is not contained in a MEC, we can copy over the entry.
// If the target state is not contained in a component, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
} else {
// If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the MEC into the matrix.
// If the target state is contained in component i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the component into the matrix later.
auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue());
if (!insertionRes.second) {
// sspTransitionTarget already existed in the map, i.e., there already was a transition to that MEC.
// sspTransitionTarget already existed in the map, i.e., there already was a transition to that component.
// Hence, we add up the probabilities.
insertionRes.first->second += transition.getValue();
}
@ -341,30 +414,28 @@ namespace storm {
}
}
// Now insert all (cumulative) probability values that target a MEC.
for (auto const& mecToProbEntry : auxiliaryStateToProbabilityMap) {
sspMatrixBuilder.addNextValue(currentSspChoice, mecToProbEntry.first, mecToProbEntry.second);
// Now insert all (cumulative) probability values that target a component.
for (auto const& componentToProbEntry : auxiliaryStateToProbabilityMap) {
sspMatrixBuilder.addNextValue(currentSspChoice, componentToProbEntry.first, componentToProbEntry.second);
}
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
STORM_LOG_ASSERT(_mecDecomposition != nullptr, "Decomposition not computed, yet.");
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
STORM_LOG_ASSERT(_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
// Let's improve readability a bit
uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
auto const& nondeterministicChoiceIndices = _transitionMatrix.getRowGroupIndices();
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former MEC state to a new auxiliary state.
// There will be one auxiliary state for each MEC. These states will be appended to the end of the matrix.
// which redirects all transitions leading to a former component state to a new auxiliary state.
// There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
// First gather the states that are part of a MEC
// and create a mapping from states that lie in a MEC to the corresponding MEC index.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInMecs(numberOfStates);
std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
for (uint64_t currentMecIndex = 0; currentMecIndex < _mecDecomposition->size(); ++currentMecIndex) {
for (auto const& stateChoicesPair : (*_mecDecomposition)[currentMecIndex]) {
for (uint64_t currentMecIndex = 0; currentMecIndex < _longRunComponentDecomposition->size(); ++currentMecIndex) {
for (auto const& stateChoicesPair : (*_longRunComponentDecomposition)[currentMecIndex]) {
statesInMecs.set(stateChoicesPair.first);
inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
}
@ -389,7 +460,7 @@ namespace storm {
// The next step is to create the SSP matrix and the right-hand side of the SSP.
std::vector<ValueType> rhs;
uint64_t numberOfSspStates = numberOfStatesNotInMecs + _mecDecomposition->size();
uint64_t numberOfSspStates = numberOfStatesNotInMecs + _longRunComponentDecomposition->size();
typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
// If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
uint64_t currentSspChoice = 0;
@ -402,8 +473,8 @@ namespace storm {
}
}
// Now we construct the choices for the auxiliary states which reflect former MEC states.
for (uint64_t mecIndex = 0; mecIndex < _mecDecomposition->size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = (*_mecDecomposition)[mecIndex];
for (uint64_t mecIndex = 0; mecIndex < _longRunComponentDecomposition->size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = (*_longRunComponentDecomposition)[mecIndex];
sspMatrixBuilder.newRowGroup(currentSspChoice);
for (auto const& stateChoicesPair : mec) {
uint64_t const& mecState = stateChoicesPair.first;
@ -461,7 +532,7 @@ namespace storm {
// a) we take an exit (non-MEC) choice at the given state
// b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
for (auto const& mec : *_mecDecomposition) {
for (auto const& mec : *_longRunComponentDecomposition) {
// Get the sspState of this MEC (using one representative mec state)
auto const& sspState = inputToSspStateMap[mec.begin()->first];
uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
@ -533,8 +604,11 @@ namespace storm {
return result;
}
template class SparseNondeterministicInfiniteHorizonHelper<double>;
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
template class SparseNondeterministicInfiniteHorizonHelper<double, false>;
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, false>;
//template class SparseNondeterministicInfiniteHorizonHelper<double, true>;
//template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, true>;
}
}
}

75
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

@ -1,23 +1,45 @@
#pragma once
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/models/sparse/StandardRewardModel.h"
namespace storm {
class Environment;
namespace models {
namespace sparse {
template <VT> class StandardRewardModel;
}
}
namespace storage {
template <typename C> class Decomposition<C>;
class MaximalEndComponent;
template <typename VT> class SparseMatrix;
class StronglyConnectedComponent;
}
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
* @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
*/
template <typename ValueType>
template <typename ValueType, bool Nondeterministic>
class SparseNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
public:
/*!
* The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
*/
using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
/*!
* Function mapping from indices to values
*/
typedef std::function<ValueType(uint64_t)> ValueGetter;
/*!
* Initializes the helper for a discrete time (i.e. MDP)
*/
@ -36,11 +58,11 @@ namespace storm {
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
/*!
* Provides the maximal end component decomposition that can be used during the computation.
* Providing the decomposition is optional. If they are not provided, they will be computed internally
* Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
* Providing the decomposition is optional. If it is not provided, they will be computed internally.
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
*/
void provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& decomposition);
void provideLongRunComponentDecomposition(storm::storage::Decomposition<ComponentType> const& decomposition);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
@ -55,16 +77,20 @@ namespace storm {
std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
/*!
* Computes the long run average value given the provided action-based rewards
* Computes the long run average value given the provided state and action-based rewards.
* @param stateValues a vector containing a value for every state
* @param actionValues a vector containing a value for every choice
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
/*!
* Computes the long run average value given the provided state-action-based rewards
* Computes the long run average value given the provided state and action based rewards
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter);
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, sValueGetter const& actionValuesGetter);
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
@ -84,6 +110,17 @@ namespace storm {
*/
storm::storage::Scheduler<ValueType> extractScheduler() const;
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
template < typename = typename std::enable_if< true >::type >
ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
template < typename = typename std::enable_if< false >::type >
ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
protected:
/*!
@ -92,21 +129,21 @@ namespace storm {
bool isContinuousTime() const;
/*!
* @pre if scheduler production is enabled, the _producedOptimalChoices vector should be initialized and sufficiently large
* @return the (unique) optimal LRA value for the given mec.
* @post _producedOptimalChoices contains choices for the states of the given MEC which yield the returned LRA value.
* Checks if the component can trivially be solved without much overhead.
* @return either true and the (unique) optimal LRA value for the given component or false and an arbitrary value
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
ValueType computeLraForMec(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
std::pair<bool, ValueType> computeLraForTrivialComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
/*!
* As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
*/
ValueType computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
/*!
* As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
* @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
*/
ValueType computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
/*!
* @return Lra values for each state
@ -116,9 +153,9 @@ namespace storm {
private:
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
storm::storage::SparseMatrix<ValueType> _computedBackwardTransitions;
storm::storage::MaximalEndComponentDecomposition<ValueType> const* _mecDecomposition;
storm::storage::MaximalEndComponentDecomposition<ValueType> _computedMecDecomposition;
std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
storm::storage::BitVector const* _markovianStates;
std::vector<ValueType> const* _exitRates;
boost::optional<std::vector<uint64_t>> _producedOptimalChoices;

Loading…
Cancel
Save