Browse Source

InfiniteHorizon: Fixed storing backwardstransition properly. Allowed to specify a mec decomposition. Pushed _produceScheduler flag to the SingleValueModelCheckerHelper.

main
Tim Quatmann 5 years ago
parent
commit
626b7a819a
  1. 4
      src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
  2. 15
      src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
  3. 14
      src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
  4. 91
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
  5. 36
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
  6. 4
      src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

4
src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp

@ -142,7 +142,7 @@ namespace storm {
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
@ -159,7 +159,7 @@ namespace storm {
STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute long run average rewards in non-closed Markov automaton.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());

15
src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp

@ -5,6 +5,11 @@ namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, storm::dd::DdType DdType>
SingleValueModelCheckerHelper<ValueType, DdType>::SingleValueModelCheckerHelper() : _produceScheduler(false) {
// Intentionally left empty
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::setOptimizationDirection(storm::solver::OptimizationDirection const& direction) {
_optimizationDirection = direction;
@ -67,6 +72,16 @@ namespace storm {
STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
return _valueThreshold->second;
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::setProduceScheduler(bool value) {
_produceScheduler = value;
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::isProduceSchedulerSet() const {
return _produceScheduler;
}
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::None>;
template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;

14
src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h

@ -17,7 +17,8 @@ namespace storm {
template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
class SingleValueModelCheckerHelper : public ModelCheckerHelper<ValueType, DdType> {
public:
SingleValueModelCheckerHelper() = default;
SingleValueModelCheckerHelper();
~SingleValueModelCheckerHelper() = default;
/*!
@ -91,9 +92,20 @@ namespace storm {
*/
ValueType const& getValueThresholdValue() const;
/*!
* Sets whether an optimal scheduler shall be constructed during the computation
*/
void setProduceScheduler(bool value);
/*!
* @return whether an optimal scheduler shall be constructed during the computation
*/
bool isProduceSchedulerSet() const;
private:
boost::optional<storm::solver::OptimizationDirection> _optimizationDirection;
boost::optional<std::pair<storm::logic::ComparisonType, ValueType>> _valueThreshold;
bool _produceScheduler;
};
}
}

91
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp

@ -21,15 +21,27 @@ namespace storm {
namespace helper {
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(nullptr), _exitRates(nullptr), _produceScheduler(false) {
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(nullptr), _exitRates(nullptr) {
// Intentionally left empty.
}
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(backwardTransitions), _markovianStates(&markovianStates), _exitRates(&exitRates), _produceScheduler(false) {
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _backwardTransitions(nullptr), _mecDecomposition(nullptr), _markovianStates(&markovianStates), _exitRates(&exitRates) {
// Intentionally left empty.
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or set before.");
_backwardTransitions = &backwardTransitions;
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition) {
STORM_LOG_WARN_COND(_mecDecomposition == nullptr, "Backwards transitions were provided but they were already computed or set before.");
_mecDecomposition = &mecDecomposition;
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
return computeLongRunAverageValues(env,
@ -96,7 +108,7 @@ namespace storm {
}
// If requested, allocate memory for the choices made
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
@ -104,40 +116,37 @@ namespace storm {
}
// Start by decomposing the Model into its MECs.
storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(_transitionMatrix, _backwardTransitions);
if (_mecDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = _transitionMatrix.transpose(true);
_backwardTransitions = &_computedBackwardTransitions;
}
_computedMecDecomposition = storm::storage::MaximalEndComponentDecomposition<ValueType>(_transitionMatrix, *_backwardTransitions);
_mecDecomposition = &_computedMecDecomposition;
}
// Compute the long-run average for all end components in isolation.
std::vector<ValueType> mecLraValues;
mecLraValues.reserve(mecDecomposition.size());
for (auto const& mec : mecDecomposition) {
mecLraValues.reserve(_mecDecomposition->size());
for (auto const& mec : *_mecDecomposition) {
mecLraValues.push_back(computeLraForMec(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, mec));
}
// Solve the resulting SSP where end components are collapsed into single auxiliary states
return buildAndSolveSsp(underlyingSolverEnvironment, mecDecomposition, mecLraValues);
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::setProduceScheduler(bool value) {
_produceScheduler = value;
}
template <typename ValueType>
bool SparseNondeterministicInfiniteHorizonHelper<ValueType>::isProduceSchedulerSet() const {
return _produceScheduler;
return buildAndSolveSsp(underlyingSolverEnvironment, mecLraValues);
}
template <typename ValueType>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
}
template <typename ValueType>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
STORM_LOG_ASSERT(isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return _producedOptimalChoices.get();
}
@ -169,7 +178,7 @@ namespace storm {
// Singleton MECs have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
STORM_LOG_THROW(_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has Nondeterministic behavior.");
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = 0;
}
return stateRewardsGetter(state) + (*_exitRates)[state] * actionRewardsGetter(*choiceIt);
@ -184,7 +193,7 @@ namespace storm {
bestChoice = *choiceIt;
}
}
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
_producedOptimalChoices.get()[state] = bestChoice - _transitionMatrix.getRowGroupIndices()[state];
}
return bestValue + stateRewardsGetter(state);
@ -200,7 +209,7 @@ namespace storm {
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::ValueIteration;
}
STORM_LOG_ERROR_COND(!isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, mec);
} else if (method == storm::solver::LraMethod::ValueIteration) {
@ -216,7 +225,7 @@ namespace storm {
// Collect some parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
std::vector<uint64_t>* optimalChoices = nullptr;
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
optimalChoices = &_producedOptimalChoices.get();
}
@ -296,7 +305,7 @@ namespace storm {
}
solver->optimize();
STORM_LOG_THROW(!isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
STORM_LOG_THROW(!this->isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
return solver->getContinuousValue(k);
}
@ -339,7 +348,8 @@ namespace storm {
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues) {
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) {
STORM_LOG_ASSERT(_mecDecomposition != nullptr, "Decomposition not computed, yet.");
// Let's improve readability a bit
uint64_t numberOfStates = _transitionMatrix.getRowGroupCount();
@ -353,8 +363,8 @@ namespace storm {
// and create a mapping from states that lie in a MEC to the corresponding MEC index.
storm::storage::BitVector statesInMecs(numberOfStates);
std::vector<uint64_t> inputToSspStateMap(numberOfStates, std::numeric_limits<uint64_t>::max());
for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
for (auto const& stateChoicesPair : mecDecomposition[currentMecIndex]) {
for (uint64_t currentMecIndex = 0; currentMecIndex < _mecDecomposition->size(); ++currentMecIndex) {
for (auto const& stateChoicesPair : (*_mecDecomposition)[currentMecIndex]) {
statesInMecs.set(stateChoicesPair.first);
inputToSspStateMap[stateChoicesPair.first] = currentMecIndex;
}
@ -379,7 +389,7 @@ namespace storm {
// The next step is to create the SSP matrix and the right-hand side of the SSP.
std::vector<ValueType> rhs;
uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
uint64_t numberOfSspStates = numberOfStatesNotInMecs + _mecDecomposition->size();
typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
// If the source state of a transition is not contained in any MEC, we copy its choices (and perform the necessary modifications).
uint64_t currentSspChoice = 0;
@ -392,8 +402,8 @@ namespace storm {
}
}
// Now we construct the choices for the auxiliary states which reflect former MEC states.
for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
for (uint64_t mecIndex = 0; mecIndex < _mecDecomposition->size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = (*_mecDecomposition)[mecIndex];
sspMatrixBuilder.newRowGroup(currentSspChoice);
for (auto const& stateChoicesPair : mec) {
uint64_t const& mecState = stateChoicesPair.first;
@ -403,7 +413,7 @@ namespace storm {
if (choicesInMec.find(choice) == choicesInMec.end()) {
rhs.push_back(storm::utility::zero<ValueType>());
addSspMatrixChoice(choice, _transitionMatrix, inputToSspStateMap, numberOfStatesNotInMecs, currentSspChoice, sspMatrixBuilder);
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
// Later we need to be able to map this choice back to the original input model
sspMecExitChoicesToOriginalMap.emplace_back(mecState, choice - nondeterministicChoiceIndices[mecState]);
}
@ -413,7 +423,7 @@ namespace storm {
}
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
rhs.push_back(mecLraValues[mecIndex]);
if (isProduceSchedulerSet()) {
if (this->isProduceSchedulerSet()) {
// Insert some invalid values so we can later detect that this choice is not an exit choice
sspMecExitChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
}
@ -429,7 +439,7 @@ namespace storm {
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrix);
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(isProduceSchedulerSet());
solver->setTrackScheduler(this->isProduceSchedulerSet());
auto lowerUpperBounds = std::minmax_element(mecLraValues.begin(), mecLraValues.end());
solver->setLowerBound(*lowerUpperBounds.first);
solver->setUpperBound(*lowerUpperBounds.second);
@ -440,7 +450,7 @@ namespace storm {
solver->solveEquations(env, this->getOptimizationDirection(), x, rhs);
// Prepare scheduler (if requested)
if (isProduceSchedulerSet() && solver->hasScheduler()) {
if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
auto const& sspChoices = solver->getSchedulerChoices();
// We first take care of non-mec states
@ -451,7 +461,7 @@ namespace storm {
// a) we take an exit (non-MEC) choice at the given state
// b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfStatesNotInMecs];
for (auto const& mec : mecDecomposition) {
for (auto const& mec : *_mecDecomposition) {
// Get the sspState of this MEC (using one representative mec state)
auto const& sspState = inputToSspStateMap[mec.begin()->first];
uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
@ -474,12 +484,17 @@ namespace storm {
_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
}
}
// Ensure that backwards transitions are available
if (_backwardTransitions == nullptr) {
_computedBackwardTransitions = _transitionMatrix.transpose(true);
_backwardTransitions = &_computedBackwardTransitions;
}
// Now start a backwards DFS
std::vector<uint64_t> stack = {originalStateChoice.first};
while (!stack.empty()) {
uint64_t currentState = stack.back();
stack.pop_back();
for (auto const& backwardsTransition : _backwardTransitions.getRowGroup(currentState)) {
for (auto const& backwardsTransition : _backwardTransitions->getRowGroup(currentState)) {
uint64_t predecessorState = backwardsTransition.getColumn();
if (mec.containsState(predecessorState)) {
auto& selectedPredChoice = _producedOptimalChoices.get()[predecessorState];
@ -506,7 +521,7 @@ namespace storm {
}
}
} else {
STORM_LOG_ERROR_COND(!isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
}
// Prepare result vector.

36
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

@ -21,12 +21,26 @@ namespace storm {
/*!
* Initializes the helper for a discrete time (i.e. MDP)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions);
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a continuous time (i.e. MA)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
/*!
* Provides backward transitions that can be used during the computation.
* Providing them is optional. If they are not provided, they will be computed internally
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
*/
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
/*!
* Provides the maximal end component decomposition that can be used during the computation.
* Providing the decomposition is optional. If they are not provided, they will be computed internally
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
*/
void provideMaximalEndComponentDecomposition(storm::storage::MaximalEndComponentDecomposition<ValueType> const& decomposition);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
@ -52,16 +66,6 @@ namespace storm {
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::function<ValueType(uint64_t stateIndex)> const& stateValuesGetter, std::function<ValueType(uint64_t globalChoiceIndex)> const& actionValuesGetter);
/*!
* Sets whether an optimal scheduler shall be constructed during the computation
*/
void setProduceScheduler(bool value);
/*!
* @return whether an optimal scheduler shall be constructed during the computation
*/
bool isProduceSchedulerSet() const;
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
* @return the produced scheduler of the most recent call.
@ -107,14 +111,16 @@ namespace storm {
/*!
* @return Lra values for each state
*/
std::vector<ValueType> buildAndSolveSsp(Environment const& env, storm::storage::MaximalEndComponentDecomposition<ValueType> const& mecDecomposition, std::vector<ValueType> const& mecLraValues);
std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues);
private:
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::SparseMatrix<ValueType> const& _backwardTransitions;
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
storm::storage::SparseMatrix<ValueType> _computedBackwardTransitions;
storm::storage::MaximalEndComponentDecomposition<ValueType> const* _mecDecomposition;
storm::storage::MaximalEndComponentDecomposition<ValueType> _computedMecDecomposition;
storm::storage::BitVector const* _markovianStates;
std::vector<ValueType> const* _exitRates;
bool _produceScheduler;
boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
};

4
src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

@ -227,7 +227,7 @@ namespace storm {
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
@ -242,7 +242,7 @@ namespace storm {
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions());
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));

Loading…
Cancel
Save