Browse Source

First version of sparse infinite horizon helpers for deterministic and nondeterministic models.

tempestpy_adaptions
Tim Quatmann 5 years ago
parent
commit
0cc2b1c749
  1. 209
      src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
  2. 65
      src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
  3. 161
      src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
  4. 140
      src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h
  5. 482
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
  6. 118
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

209
src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp

@ -0,0 +1,209 @@
#include "SparseDeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/storage/Scheduler.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/SignalHandler.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType>
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix) {
// Intentionally left empty.
}
template <typename ValueType>
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix, exitRates) {
// For the CTMC case we assert that the caller actually provided the probabilistic transitions
STORM_LOG_ASSERT(this->_transitionMatrix.isProbabilistic(), "Non-probabilistic transitions");
}
template <typename ValueType>
void SparseDeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
if (this->_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>>(this->_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
}
}
template <typename ValueType>
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
// For deterministic models, we compute the LRA for a BSCC
STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
auto trivialResult = computeLraForTrivialBscc(env, stateRewardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
// Solve nontrivial BSCC with the method specified in the settings
storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::GainBiasEquations;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
} else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::ValueIteration;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
}
STORM_LOG_TRACE("Computing LRA for BSCC of size " << component.size() << " using '" << storm::solver::toString(method) << "'.");
if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForBsccVi(env, stateRewardsGetter, actionRewardsGetter, component);
}/* else if (method == storm::solver::LraMethod::LraDistributionEquations) {
// We only need the first element of the pair as the lra distribution is not relevant at this point.
return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
}
STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
// We don't need the bias values
return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;*/
}
template <typename ValueType>
std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& component) {
// For deterministic models, we can catch the case where all values are the same. This includes the special case where the BSCC consist only of just one state.
bool first = true;
ValueType val = storm::utility::zero<ValueType>();
for (auto const& element : component) {
auto state = internal::getComponentElementState(element);
STORM_LOG_ASSERT(state == *internal::getComponentElementChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
ValueType curr = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
if (first) {
first = false;
} else if (val != curr) {
return {false, storm::utility::zero<ValueType>()};
}
}
// All values are the same
return {true, val};
}
template <typename ValueType>
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::StronglyConnectedComponent const& bscc) {
// Collect parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
// Now create a helper and perform the algorithm
if (this->isContinuousTime()) {
// We assume a CTMC (with deterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates);
} else {
// We assume a DTMC (with deterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter);
}
}
template <typename ValueType>
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem) {
// Create SSP Matrix.
// In contrast to the version for nondeterministic models, we eliminate the auxiliary states representing each BSCC on the fly
// Probability mass that would lead to a BSCC will be considered in the rhs of the equation system
auto sspMatrix = this->_transitionMatrix.getSubmatrix(false, statesNotInComponent, statesNotInComponent, asEquationSystem);
if (asEquationSystem) {
sspMatrix.convertToEquationSystem();
}
// Create the SSP right-hand-side
std::vector<ValueType> rhs;
rhs.reserve(sspMatrix.getRowCount());
for (auto const& state : statesNotInComponent) {
ValueType stateValue = storm::utility::zero<ValueType>();
for (auto const& transition : this->_transitionMatrix.getRow(state)) {
if (!statesNotInComponent.get(transition.getColumn())) {
// This transition leads to a BSCC!
stateValue += transition.getValue() * bsccLraValues[inputStateToBsccIndexMap[transition.getColumn()]];
}
}
rhs.push_back(std::move(stateValue));
}
return std::make_pair(std::move(sspMatrix), std::move(rhs));
}
template <typename ValueType>
std::vector<ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former BSCC state to a new (imaginary) auxiliary state.
// Each auxiliary state gets assigned the value of that BSCC and we compute expected rewards (aka stochastic shortest path, SSP) on that new system.
// For efficiency reasons, we actually build the system where the auxiliary states are already eliminated.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
std::vector<uint64_t> stateIndexMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
uint64_t state = internal::getComponentElementState(element);
statesInComponents.set(state);
stateIndexMap[state] = currentComponentIndex;
}
}
// Map the non-component states to their index in the SSP. Note that the order of these states will be preserved.
uint64_t numberOfNonComponentStates = 0;
storm::storage::BitVector statesNotInComponent = ~statesInComponents;
for (auto const& nonComponentState : statesNotInComponent) {
stateIndexMap[nonComponentState] = numberOfNonComponentStates;
++numberOfNonComponentStates;
}
// The next step is to create the equation system solving the SSP (unless the whole system consists of BSCCs)
std::vector<ValueType> sspValues;
if (numberOfNonComponentStates > 0) {
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, stateIndexMap, statesNotInComponent, isEqSysFormat);
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(env, sspMatrixVector.first);
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
solver->setBounds(*lowerUpperBounds.first, *lowerUpperBounds.second);
// Check solver requirements
auto requirements = solver->getRequirements(env);
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
sspValues.assign(sspMatrixVector.first.getRowCount(), (*lowerUpperBounds.first + *lowerUpperBounds.second) / storm::utility::convertNumber<ValueType,uint64_t>(2));
solver->solveEquations(env, sspValues, sspMatrixVector.second);
}
// Prepare result vector.
std::vector<ValueType> result(this->_transitionMatrix.getRowGroupCount());
for (uint64_t state = 0; state < stateIndexMap.size(); ++state) {
if (statesNotInComponent.get(state)) {
result[state] = sspValues[stateIndexMap[state]];
} else {
result[state] = componentLraValues[stateIndexMap[state]];
}
}
return result;
}
template class SparseDeterministicInfiniteHorizonHelper<double>;
template class SparseDeterministicInfiniteHorizonHelper<storm::RationalNumber>;
}
}
}

65
src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h

@ -0,0 +1,65 @@
#pragma once
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
*/
template <typename ValueType>
class SparseDeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, false> {
public:
/*!
* Function mapping from indices to values
*/
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
/*!
* Initializes the helper for a discrete time model (i.e. DTMC)
*/
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a continuous time model (i.e. CTMC)
* @note The transition matrix shall be probabilistic (i.e. the rows sum up to one)
*/
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& component) override;
protected:
virtual void createDecomposition() override;
std::pair<bool, ValueType> computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
/*!
* As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
*/
ValueType computeLraForBsccVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem);
/*!
* @return Lra values for each state
*/
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
};
}
}
}

161
src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp

@ -0,0 +1,161 @@
#include "SparseInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/SignalHandler.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
_backwardTransitions = &backwardTransitions;
}
template <typename ValueType, bool Nondeterministic>
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition) {
STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
_longRunComponentDecomposition = &decomposition;
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
return computeLongRunAverageValues(env,
[&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
[] (uint64_t) { return storm::utility::zero<ValueType>(); }
);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
ValueGetter stateRewardsGetter;
if (rewardModel.hasStateRewards()) {
stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
} else {
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
ValueGetter actionRewardsGetter;
if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
if (rewardModel.hasTransitionRewards()) {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
} else {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
}
} else {
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
ValueGetter stateValuesGetter;
if (stateValues) {
stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
} else {
stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
ValueGetter actionValuesGetter;
if (actionValues) {
actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
} else {
actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) {
// We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states.
// For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
// Prepare an environment for the underlying solvers
auto underlyingSolverEnvironment = env;
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
}
// If requested, allocate memory for the choices made
if (Nondeterministic && this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
}
STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
// Decompose the model to their bottom components (MECS or BSCCS)
createDecomposition();
// Compute the long-run average for all components in isolation.
std::vector<ValueType> componentLraValues;
componentLraValues.reserve(_longRunComponentDecomposition->size());
for (auto const& c : *_longRunComponentDecomposition) {
componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
}
// Solve the resulting SSP where end components are collapsed into single auxiliary states
return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
}
template <typename ValueType, bool Nondeterministic>
bool SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
return _exitRates != nullptr;
}
template class SparseInfiniteHorizonHelper<double, true>;
template class SparseInfiniteHorizonHelper<storm::RationalNumber, true>;
template class SparseInfiniteHorizonHelper<storm::RationalFunction, true>;
template class SparseInfiniteHorizonHelper<double, false>;
template class SparseInfiniteHorizonHelper<storm::RationalNumber, false>;
}
}
}

140
src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h

@ -0,0 +1,140 @@
#pragma once
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
#include "storm/storage/MaximalEndComponent.h"
#include "storm/storage/StronglyConnectedComponent.h"
#include "storm/storage/Decomposition.h"
#include "storm/storage/SparseMatrix.h"
namespace storm {
class Environment;
namespace models {
namespace sparse {
template <typename VT> class StandardRewardModel;
}
}
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
* @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
*/
template <typename ValueType, bool Nondeterministic>
class SparseInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
public:
/*!
* The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
*/
using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
/*!
* Function mapping from indices to values
*/
typedef std::function<ValueType(uint64_t)> ValueGetter;
/*!
* Initializes the helper for a discrete time (i.e. DTMC, MDP)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for continuous time (i.e. MA)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
/*!
* Initializes the helper for continuous time (i.e. CTMC)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
/*!
* Provides backward transitions that can be used during the computation.
* Providing them is optional. If they are not provided, they will be computed internally
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
*/
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
/*!
* Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
* Providing the decomposition is optional. If it is not provided, they will be computed internally.
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
*/
void provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
/*!
* Computes the long run average rewards, i.e., the average reward collected per time unit
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
/*!
* Computes the long run average value given the provided state and action-based rewards.
* @param stateValues a vector containing a value for every state
* @param actionValues a vector containing a value for every choice
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
/*!
* Computes the long run average value given the provided state and action based rewards
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter);
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component) = 0;
protected:
/*!
* @return true iff this is a computation on a continuous time model (i.e. CTMC, MA)
*/
bool isContinuousTime() const;
/*!
* @post _longRunComponentDecomposition points to a decomposition of the long run components (MECs, BSCCs)
*/
virtual void createDecomposition() = 0;
/*!
* @pre if scheduler production is enabled and Nondeterministic is true, a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* @return Lra values for each state
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) = 0;
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::BitVector const* _markovianStates;
std::vector<ValueType> const* _exitRates;
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
};
}
}
}

482
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp

@ -3,18 +3,14 @@
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/storage/Scheduler.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/SignalHandler.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
@ -27,146 +23,32 @@ namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, bool Nondeterministic>
SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix, markovianStates, exitRates) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
_backwardTransitions = &backwardTransitions;
}
template <typename ValueType, bool Nondeterministic>
void SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponent> const& decomposition) {
STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
_longRunComponentDecomposition = &decomposition;
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
return computeLongRunAverageValues(env,
[&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
[] (uint64_t) { return storm::utility::zero<ValueType>(); }
);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
std::function<ValueType(uint64_t stateIndex)> stateRewardsGetter;
if (rewardModel.hasStateRewards()) {
stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
} else {
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
std::function<ValueType(uint64_t globalChoiceIndex)> actionRewardsGetter;
if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
if (rewardModel.hasTransitionRewards()) {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
} else {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
}
} else {
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
std::function<ValueType(uint64_t stateIndex)> stateValuesGetter;
if (stateValues) {
stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
} else {
stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
std::function<ValueType(uint64_t actionIndex)> actionValuesGetter;
if (actionValues) {
actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
} else {
actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter) {
// We will compute the long run average value for each MEC individually and then set-up a MinMax Equation system to compute the value also at non-mec states.
// For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
// Prepare an environment for the underlying solvers
auto underlyingSolverEnvironment = env;
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
}
// If requested, allocate memory for the choices made
if (Nondeterministic && this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
}
STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
// Start by decomposing the Model into its MECs.
if (_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
if (Nondeterministic) {
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix>(_transitionMatrix.transpose(true));
_backwardTransitions = _computedBackwardTransitions.get();
}
_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
} else {
_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>(_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
}
_longRunComponentDecomposition = _computedLongRunComponentDecomposition.get();
}
// Compute the long-run average for all components in isolation.
std::vector<ValueType> componentLraValues;
mecLraValues.reserve(_longRunComponentDecomposition->size());
for (auto const& c : *_longRunComponentDecomposition) {
componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
}
// Solve the resulting SSP where end components are collapsed into single auxiliary states
return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
}
template <typename ValueType, bool Nondeterministic>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() const {
STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
template <typename ValueType>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return this->_producedOptimalChoices.get();
}
template <typename ValueType, bool Nondeterministic>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::getProducedOptimalChoices() {
STORM_LOG_WARN_COND(Nondeterministic, "Getting optimal choices for deterministic model.");
template <typename ValueType>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return this->_producedOptimalChoices.get();
}
template <typename ValueType, bool Nondeterministic>
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::extractScheduler() const {
template <typename ValueType>
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
auto const& optimalChoices = getProducedOptimalChoices();
storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
@ -175,44 +57,32 @@ namespace storm {
return scheduler;
}
template <typename ValueType, bool Nondeterministic>
bool SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
return _exitRates != nullptr;
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
if (this->_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
if (this->_backwardTransitions == nullptr) {
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
this->_backwardTransitions = this->_computedBackwardTransitions.get();
}
template <typename ValueType, bool Nondeterministic>
template < typename = typename std::enable_if< !Nondeterministic >::type >
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
// For deterministic models, we compute the LRA for a BSCC
STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions);
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
}
// Solve nontrivial BSCC with the method specified in the settings
// TODO
}
template <typename ValueType, bool Nondeterministic>
template < typename = typename std::enable_if< Nondeterministic >::type >
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
// For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
// Allocate memory for the nondeterministic choices.
if (this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
if (!this->_producedOptimalChoices.is_initialized()) {
this->_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount());
}
auto trivialResult = computeLraForTrivialComponent(env, stateReardsGetter, actionRewardsGetter, component);
auto trivialResult = this->computeLraForTrivialMec(env, stateRewardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
@ -228,28 +98,28 @@ namespace storm {
}
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, mec);
return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, component);
} else if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, mec);
return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, component);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
}
}
template <typename ValueType, bool Nondeterministic>
std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForTrivialComponent(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, LongRunComponentType const& component) {
template <typename ValueType>
std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialMec(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
// If the component only consists of a single state, we compute the LRA value directly
if (component.size() == 1) {
auto element const& = *component.begin();
auto const& element = *component.begin();
uint64_t state = internal::getComponentElementState(element);
auto choiceIt = internal::getComponentChoicesBegin(element);
if (Nondeterministic && !isContinuousTime()) {
auto choiceIt = internal::getComponentElementChoicesBegin(element);
if (!this->isContinuousTime()) {
// This is an MDP.
// Find the choice with the highest/lowest reward
ValueType bestValue = actionRewardsGetter(*choiceIt);
uint64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != getComponentChoicesEnd(element); ++choiceIt) {
for (++choiceIt; choiceIt != internal::getComponentElementChoicesEnd(element); ++choiceIt) {
ValueType currentValue = actionRewardsGetter(*choiceIt);
if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
bestValue = std::move(currentValue);
@ -257,65 +127,50 @@ namespace storm {
}
}
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
this->_producedOptimalChoices.get()[state] = bestChoice - this->_transitionMatrix.getRowGroupIndices()[state];
}
bestValue += stateRewardsGetter(state);
return {true, bestValue};
} else {
// In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
STORM_LOG_THROW(!Nondeterministic || (_markovianStates != nullptr && _markovianStates->get(state)), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(this->_markovianStates != nullptr, "Nondeterministic continuous time model without Markovian states... Is this a not a Markov Automaton?");
STORM_LOG_THROW(this->_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
if (Nondeterministic && this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = 0;
if (this->isProduceSchedulerSet()) {
this->_producedOptimalChoices.get()[state] = 0;
}
ValueType result = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
ValueType result = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
return {true, result};
}
} else if (!Nondeterministic) {
// For deterministic models, we can also easily catch the case where all values are the same
bool first = true;
ValueType val = storm::utility::zero<ValueType>();
for (auto const& element : component) {
auto state = getComponentElementState(element);
STORM_LOG_ASSERT(state == *getComponentChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
ValueType curr = stateRewardsGetter(state) + (isContinuousTime() ? (*_exitRates)[state] * actionRewardsGetter(state) : actionRewardsGetter(state));
if (first) {
first = false;
} else if (val != curr) {
return {false, storm::utility::zero<ValueType>()};
}
}
// All values are the same
return {true, val};
} else {
return {false, storm::utility::zero<ValueType>()};
}
}
template <typename ValueType, bool Nondeterministic>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecVi(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Collect some parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
std::vector<uint64_t>* optimalChoices = nullptr;
if (this->isProduceSchedulerSet()) {
optimalChoices = &_producedOptimalChoices.get();
optimalChoices = &this->_producedOptimalChoices.get();
}
// Now create a helper and perform the algorithm
if (isContinuousTime()) {
if (this->isContinuousTime()) {
// We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, _transitionMatrix, aperiodicFactor, _markovianStates, _exitRates);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, _exitRates, &this->getOptimizationDirection(), optimalChoices);
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates, &this->getOptimizationDirection(), optimalChoices);
} else {
// We assume an MDP (with nondeterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, _transitionMatrix, aperiodicFactor);
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
}
}
template <typename ValueType, bool Nondeterministic>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLraForMecLp(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateRewardsGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Create an LP solver
auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
@ -336,18 +191,18 @@ namespace storm {
// Add constraints.
for (auto const& stateChoicesPair : mec) {
uint_fast64_t state = stateChoicesPair.first;
bool stateIsMarkovian = _markovianStates && _markovianStates->get(state);
bool stateIsMarkovian = this->_markovianStates && this->_markovianStates->get(state);
// Now create a suitable constraint for each choice
// x_s {≤, ≥} -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
for (auto choice : stateChoicesPair.second) {
std::vector<storm::expressions::Expression> summands;
auto matrixRow = _transitionMatrix.getRow(choice);
auto matrixRow = this->_transitionMatrix.getRow(choice);
summands.reserve(matrixRow.getNumberOfEntries() + 2);
// add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
if (stateIsMarkovian) {
summands.push_back(-(k / solver->getManager().rational((*_exitRates)[state])));
} else if (!isContinuousTime()) {
summands.push_back(-(k / solver->getManager().rational((*this->_exitRates)[state])));
} else if (!this->isContinuousTime()) {
summands.push_back(-k);
}
// add sum_s' P(s,act,s') * x_s'
@ -358,8 +213,8 @@ namespace storm {
ValueType value;
if (stateIsMarkovian) {
// divide state reward with exit rate
value = stateRewardsGetter(state) / (*_exitRates)[state] + actionRewardsGetter(choice);
} else if (!isContinuousTime()) {
value = stateRewardsGetter(state) / (*this->_exitRates)[state] + actionRewardsGetter(choice);
} else if (!this->isContinuousTime()) {
// in discrete time models no scaling is needed
value = stateRewardsGetter(state) + actionRewardsGetter(choice);
} else {
@ -388,7 +243,7 @@ namespace storm {
* Transitions that don't lead to a Component state are copied (taking a state index mapping into account).
*/
template <typename ValueType>
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfStatesNotInComponents, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfNonComponentStates, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
// As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
@ -398,7 +253,7 @@ namespace storm {
auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
// Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
// decide whether the transition leads to a component state or not
if (sspTransitionTarget < numberOfStatesNotInMecs) {
if (sspTransitionTarget < numberOfNonComponentStates) {
// If the target state is not contained in a component, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
} else {
@ -420,124 +275,71 @@ namespace storm {
}
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType, Nondeterministic>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
STORM_LOG_ASSERT(_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former component state to a new auxiliary state.
// There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInMecs(numberOfStates);
std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
for (uint64_t currentMecIndex = 0; currentMecIndex < _longRunComponentDecomposition->size(); ++currentMecIndex) {
for (auto const& stateChoicesPair : (*_longRunComponentDecomposition)[currentMecIndex]) {
statesInMecs.set(stateChoicesPair.first);
inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
}
}
// Now take care of the non-mec states. Note that the order of these states will be preserved.
uint64_t numberOfStatesNotInMecs = 0;
storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
for (auto const& nonMecState : statesNotContainedInAnyMec) {
inputToSspStateMap[nonMecState] = numberOfStatesNotInMecs;
++numberOfStatesNotInMecs;
}
// Finalize the mapping for the mec states which now still assigns mec states to to their Mec index.
// To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
// number of states that are not in a mec.
for (auto const& mecState : statesInMecs) {
inputToSspStateMap[mecState] += numberOfStatesNotInMecs;
}
template <typename ValueType>
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap) {
// For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
// corresponding choices in the original model.
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecExitChoicesToOriginalMap;
auto const& choiceIndices = this->_transitionMatrix.getRowGroupIndices();
// The next step is to create the SSP matrix and the right-hand side of the SSP.
std::vector<ValueType> rhs;
uint64_t numberOfSspStates = numberOfStatesNotInMecs + _longRunComponentDecomposition->size();
typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
// If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
uint64_t numberOfSspStates = numberOfNonComponentStates + this->_longRunComponentDecomposition->size();
storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, true, true, numberOfSspStates);
// If the source state of a transition is not contained in any component, we copy its choices (and perform the necessary modifications).
uint64_t currentSspChoice = 0;
for (auto const& nonMecState : statesNotContainedInAnyMec) {
for (auto const& nonComponentState : statesNotInComponent) {
sspMatrixBuilder.newRowGroup(currentSspChoice);
for (uint64_t choice = nondeterministicChoiceIndices[nonMecState]; choice < nondeterministicChoiceIndices[nonMecState + 1]; ++choice, ++currentSspChoice) {
for (uint64_t choice = choiceIndices[nonComponentState]; choice < choiceIndices[nonComponentState + 1]; ++choice, ++currentSspChoice) {
rhs.push_back(storm::utility::zero<ValueType>());
addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
}
}
// Now we construct the choices for the auxiliary states which reflect former MEC states.
for (uint64_t mecIndex = 0; mecIndex < _longRunComponentDecomposition->size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = (*_longRunComponentDecomposition)[mecIndex];
// Now we construct the choices for the auxiliary states which reflect former Component states.
for (uint64_t componentIndex = 0; componentIndex < this->_longRunComponentDecomposition->size(); ++componentIndex) {
auto const& component = (*this->_longRunComponentDecomposition)[componentIndex];
sspMatrixBuilder.newRowGroup(currentSspChoice);
for (auto const& stateChoicesPair : mec) {
uint64_t const& mecState = stateChoicesPair.first;
auto const& choicesInMec = stateChoicesPair.second;
for (uint64_t choice = nondeterministicChoiceIndices[mecState]; choice < nondeterministicChoiceIndices[mecState + 1]; ++choice) {
// If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
if (choicesInMec.find(choice) == choicesInMec.end()) {
// For nondeterministic models it might still be that we leave the component again. This needs to be reflected in the SSP
// by adding the "exiting" choices of the MEC to the axiliary states
for (auto const& element : component) {
uint64_t componentState = internal::getComponentElementState(element);
for (uint64_t choice = choiceIndices[componentState]; choice < choiceIndices[componentState + 1]; ++choice) {
// If the choice is not contained in the component itself, we have to add a similar distribution to the auxiliary state.
if (!internal::componentElementChoicesContains(element, choice)) {
rhs.push_back(storm::utility::zero<ValueType>());
addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
if (this->isProduceSchedulerSet()) {
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
if (sspComponentExitChoicesToOriginalMap) {
// Later we need to be able to map this choice back to the original input model
sspMecExitChoicesToOriginalMap.emplace_back(mecState, choice - nondeterministicChoiceIndices[mecState]);
sspComponentExitChoicesToOriginalMap->emplace_back(componentState, choice - choiceIndices[componentState]);
}
++currentSspChoice;
}
}
}
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
rhs.push_back(mecLraValues[mecIndex]);
if (this->isProduceSchedulerSet()) {
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the component.
rhs.push_back(mecLraValues[componentIndex]);
if (sspComponentExitChoicesToOriginalMap) {
// Insert some invalid values so we can later detect that this choice is not an exit choice
sspMecExitChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
sspComponentExitChoicesToOriginalMap->emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
}
++currentSspChoice;
}
storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates);
// Set-up a solver
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrix);
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(this->isProduceSchedulerSet());
auto lowerUpperBounds = std::minmax_element(mecLraValues.begin(), mecLraValues.end());
solver->setLowerBound(*lowerUpperBounds.first);
solver->setUpperBound(*lowerUpperBounds.second);
solver->setRequirementsChecked();
// Solve the equation system
std::vector<ValueType> x(numberOfSspStates);
solver->solveEquations(env, this->getOptimizationDirection(), x, rhs);
return std::make_pair(sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates), std::move(rhs));
}
// Prepare scheduler (if requested)
if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
auto const& sspChoices = solver->getSchedulerChoices();
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap) {
// We first take care of non-mec states
storm::utility::vector::setVectorValues(_producedOptimalChoices.get(), statesNotContainedInAnyMec, sspChoices);
storm::utility::vector::setVectorValues(this->_producedOptimalChoices.get(), statesNotInComponent, sspChoices);
// Secondly, we consider MEC states. There are 3 cases for each MEC state:
// 1. The SSP choices encode that we want to stay in the MEC
// 2. The SSP choices encode that we want to leave the MEC and
// a) we take an exit (non-MEC) choice at the given state
// b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
for (auto const& mec : *_longRunComponentDecomposition) {
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfNonComponentStates];
for (auto const& mec : *this->_longRunComponentDecomposition) {
// Get the sspState of this MEC (using one representative mec state)
auto const& sspState = inputToSspStateMap[mec.begin()->first];
uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
// Obtain the state and choice of the original model to which the selected choice corresponds.
auto const& originalStateChoice = sspMecExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
auto const& originalStateChoice = sspComponentExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
// Check if we are in Case 1 or 2
if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
// The optimal choice is to stay in this mec (Case 1)
@ -546,37 +348,37 @@ namespace storm {
} else {
// The best choice is to leave this MEC via the selected state and choice. (Case 2)
// Set the exit choice (Case 2.a)
_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
this->_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
// The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
// Perform a backwards search from the exit state, only using MEC choices
// We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
for (auto const& stateActions : mec) {
if (stateActions.first != originalStateChoice.first) {
_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
this->_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
}
}
// Ensure that backwards transitions are available
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = _transitionMatrix.transpose(true);
_backwardTransitions = &_computedBackwardTransitions;
if (this->_backwardTransitions == nullptr) {
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
this->_backwardTransitions = this->_computedBackwardTransitions.get();
}
// Now start a backwards DFS
std::vector<uint64_t> stack = {originalStateChoice.first};
while (!stack.empty()) {
uint64_t currentState = stack.back();
stack.pop_back();
for (auto const& backwardsTransition : _backwardTransitions->getRowGroup(currentState)) {
for (auto const& backwardsTransition : this->_backwardTransitions->getRowGroup(currentState)) {
uint64_t predecessorState = backwardsTransition.getColumn();
if (mec.containsState(predecessorState)) {
auto& selectedPredChoice = _producedOptimalChoices.get()[predecessorState];
auto& selectedPredChoice = this->_producedOptimalChoices.get()[predecessorState];
if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
// We don't already have a choice for this predecessor.
// We now need to check whether there is a *MEC* choice leading to currentState
for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
for (auto const& forwardTransition : _transitionMatrix.getRow(predChoice)) {
for (auto const& forwardTransition : this->_transitionMatrix.getRow(predChoice)) {
if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
// Playing this choice (infinitely often) will lead to current state (infinitely often)!
selectedPredChoice = predChoice - nondeterministicChoiceIndices[predecessorState];
selectedPredChoice = predChoice - this->_transitionMatrix.getRowGroupIndices()[predecessorState];
stack.push_back(predecessorState);
break;
}
@ -591,24 +393,86 @@ namespace storm {
}
}
}
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former component state to a new auxiliary state.
// There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
std::vector<uint64_t> inputToSspStateMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
uint64_t state = internal::getComponentElementState(element);
statesInComponents.set(state);
inputToSspStateMap[state] = currentComponentIndex;
}
}
// Now take care of the non-component states. Note that the order of these states will be preserved.
uint64_t numberOfNonComponentStates = 0;
storm::storage::BitVector statesNotInComponent = ~statesInComponents;
for (auto const& nonComponentState : statesNotInComponent) {
inputToSspStateMap[nonComponentState] = numberOfNonComponentStates;
++numberOfNonComponentStates;
}
// Finalize the mapping for the component states which now still assigns component states to to their component index.
// To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
// number of states that are not in a component.
for (auto const& mecState : statesInComponents) {
inputToSspStateMap[mecState] += numberOfNonComponentStates;
}
// For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
// corresponding choices in the original model.
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspComponentExitChoicesToOriginalMap;
// The next step is to create the SSP matrix and the right-hand side of the SSP.
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, this->isProduceSchedulerSet() ? &sspComponentExitChoicesToOriginalMap : nullptr);
// Set-up a solver
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrixVector.first);
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(this->isProduceSchedulerSet());
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
solver->setLowerBound(*lowerUpperBounds.first);
solver->setUpperBound(*lowerUpperBounds.second);
solver->setRequirementsChecked();
// Solve the equation system
std::vector<ValueType> x(sspMatrixVector.first.getRowGroupCount());
solver->solveEquations(env, this->getOptimizationDirection(), x, sspMatrixVector.second);
// Prepare scheduler (if requested)
if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
constructOptimalChoices(solver->getSchedulerChoices(), sspMatrixVector.first, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, sspComponentExitChoicesToOriginalMap);
} else {
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
}
// Prepare result vector.
// For efficiency reasons, we re-use the memory of our rhs for this!
std::vector<ValueType> result = std::move(rhs);
result.resize(numberOfStates);
std::vector<ValueType> result = std::move(sspMatrixVector.second);
result.resize(this->_transitionMatrix.getRowGroupCount());
result.shrink_to_fit();
storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x);
return result;
}
template class SparseNondeterministicInfiniteHorizonHelper<double, false>;
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, false>;
template class SparseNondeterministicInfiniteHorizonHelper<double>;
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
//template class SparseNondeterministicInfiniteHorizonHelper<double, true>;
//template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber, true>;
}
}
}

118
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

@ -1,20 +1,11 @@
#pragma once
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
namespace storm {
class Environment;
namespace models {
namespace sparse {
template <VT> class StandardRewardModel;
}
}
namespace storage {
template <typename C> class Decomposition<C>;
class MaximalEndComponent;
template <typename VT> class SparseMatrix;
class StronglyConnectedComponent;
template <typename VT> class Scheduler;
}
namespace modelchecker {
@ -23,75 +14,26 @@ namespace storm {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
* @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
*/
template <typename ValueType, bool Nondeterministic>
class SparseNondeterministicInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
template <typename ValueType>
class SparseNondeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, true> {
public:
/*!
* The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
*/
using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
/*!
* Function mapping from indices to values
*/
typedef std::function<ValueType(uint64_t)> ValueGetter;
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
/*!
* Initializes the helper for a discrete time (i.e. MDP)
* Initializes the helper for a discrete time model (i.e. MDP)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a continuous time (i.e. MA)
* Initializes the helper for a continuous time model (i.e. MA)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
/*!
* Provides backward transitions that can be used during the computation.
* Providing them is optional. If they are not provided, they will be computed internally
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
*/
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
/*!
* Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
* Providing the decomposition is optional. If it is not provided, they will be computed internally.
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
*/
void provideLongRunComponentDecomposition(storm::storage::Decomposition<ComponentType> const& decomposition);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
/*!
* Computes the long run average rewards, i.e., the average reward collected per time unit
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
/*!
* Computes the long run average value given the provided state and action-based rewards.
* @param stateValues a vector containing a value for every state
* @param actionValues a vector containing a value for every choice
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
/*!
* Computes the long run average value given the provided state and action based rewards
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, sValueGetter const& actionValuesGetter);
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
* @return the produced scheduler of the most recent call.
@ -116,49 +58,41 @@ namespace storm {
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
template < typename = typename std::enable_if< true >::type >
ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
template < typename = typename std::enable_if< false >::type >
ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component) override;
protected:
/*!
* @return true iff this is a computation on a continuous time model (i.e. MA)
*/
bool isContinuousTime() const;
virtual void createDecomposition() override;
/*!
* Checks if the component can trivially be solved without much overhead.
* @return either true and the (unique) optimal LRA value for the given component or false and an arbitrary value
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
std::pair<bool, ValueType> computeLraForTrivialComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component);
std::pair<bool, ValueType> computeLraForTrivialMec(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
/*!
* As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
*/
ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
/*!
* As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
* @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
*/
ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& mec);
ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap);
/*!
* @pre a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* Translates optimal choices for MECS and SSP to the original model.
* @post getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
void constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap);
/*!
* @pre if scheduler production is enabled a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* @return Lra values for each state
* @post if scheduler production is enabled getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues);
private:
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
storm::storage::BitVector const* _markovianStates;
std::vector<ValueType> const* _exitRates;
boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
};

Loading…
Cancel
Save