Browse Source

Merge branch 'modelchecker-helper-refactor'

main
Tim Quatmann 5 years ago
parent
commit
b6fc409dd8
  1. 25
      resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h
  2. 12
      src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp
  3. 25
      src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp
  4. 17
      src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp
  5. 25
      src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp
  6. 55
      src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp
  7. 6
      src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h
  8. 54
      src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp
  9. 7
      src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h
  10. 562
      src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
  11. 21
      src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h
  12. 475
      src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp
  13. 28
      src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h
  14. 47
      src/storm/modelchecker/helper/ModelCheckerHelper.cpp
  15. 74
      src/storm/modelchecker/helper/ModelCheckerHelper.h
  16. 98
      src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp
  17. 111
      src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h
  18. 147
      src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp
  19. 87
      src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h
  20. 431
      src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp
  21. 75
      src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h
  22. 161
      src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp
  23. 140
      src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h
  24. 478
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
  25. 101
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h
  26. 27
      src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h
  27. 491
      src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
  28. 149
      src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h
  29. 46
      src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h
  30. 46
      src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h
  31. 12
      src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp
  32. 21
      src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp
  33. 26
      src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
  34. 26
      src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp
  35. 4
      src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h
  36. 15
      src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp
  37. 6
      src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h
  38. 467
      src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
  39. 13
      src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
  40. 12
      src/storm/models/sparse/Ctmc.cpp
  41. 6
      src/storm/models/sparse/Ctmc.h
  42. 33
      src/storm/models/sparse/StandardRewardModel.cpp
  43. 9
      src/storm/models/sparse/StandardRewardModel.h
  44. 4
      src/storm/models/symbolic/Ctmc.cpp
  45. 6
      src/storm/models/symbolic/Ctmc.h
  46. 12
      src/storm/solver/GmmxxMultiplier.cpp
  47. 4
      src/storm/storage/MaximalEndComponent.cpp
  48. 5
      src/storm/storage/MaximalEndComponent.h
  49. 86
      src/storm/storage/SparseMatrix.cpp
  50. 13
      src/storm/storage/SparseMatrix.h
  51. 1
      src/storm/storage/dd/DdType.h
  52. 5
      src/storm/utility/vector.h
  53. 7
      src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp
  54. 2
      src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp
  55. 73
      src/test/storm/storage/SparseMatrixTest.cpp

25
resources/3rdparty/gmm-5.2/include/gmm/gmm_blas.h

@ -1797,6 +1797,11 @@ namespace gmm {
void mult_add_by_row_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4, abstract_dense) {
tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l4), 10), TbbMultAddFunctor<L1, L2, L3, L4>(l1, l2, l3, l4));
}
template <typename L1, typename L2, typename L3>
void mult_add_by_row_parallel(const L1& l1, const L2& l2, L3& l3, abstract_dense) {
tbb::parallel_for(tbb::blocked_range<unsigned long>(0, vect_size(l3), 10), TbbMultAddFunctor<L1, L2, L3, L3>(l1, l2, l3, l3));
}
#endif
template <typename L1, typename L2, typename L3>
@ -1949,6 +1954,22 @@ namespace gmm {
}
}
/** Multiply-accumulate. l3 += l1*l2; */
template <typename L1, typename L2, typename L3> inline
void mult_add_parallel(const L1& l1, const L2& l2, L3& l3) {
size_type m = mat_nrows(l1), n = mat_ncols(l1);
if (!m || !n) return;
GMM_ASSERT2(n==vect_size(l2) && m==vect_size(l3), "dimensions mismatch");
if (!same_origin(l2, l3)) {
mult_add_parallel_spec(l1, l2, l3, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
} else {
GMM_WARNING2("Warning, A temporary is used for mult\n");
typename temporary_vector<L3>::vector_type temp(vect_size(l2));
copy(l2, temp);
mult_add_parallel_spec(l1, temp, l3, typename principal_orientation_type<typename linalg_traits<L1>::sub_orientation>::potype());
}
}
/** Multiply-accumulate. l4 = l1*l2 + l3; */
template <typename L1, typename L2, typename L3, typename L4> inline
void mult_add_parallel(const L1& l1, const L2& l2, const L3& l3, L4& l4) {
@ -2056,6 +2077,10 @@ namespace gmm {
template <typename L1, typename L2, typename L3, typename L4> inline
void mult_add_parallel_spec(const L1& l1, const L2& l2, const L3& l3, L4& l4, row_major)
{ mult_add_by_row_parallel(l1, l2, l3, l4, typename linalg_traits<L4>::storage_type()); }
template <typename L1, typename L2, typename L3> inline
void mult_add_parallel_spec(const L1& l1, const L2& l2, L3& l3, row_major)
{ mult_add_by_row_parallel(l1, l2, l3, typename linalg_traits<L3>::storage_type()); }
#endif
template <typename L1, typename L2, typename L3> inline

12
src/storm/modelchecker/csl/HybridCtmcCslModelChecker.cpp

@ -4,6 +4,8 @@
#include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
#include "storm/modelchecker/csl/helper/HybridCtmcCslHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
@ -123,13 +125,19 @@ namespace storm {
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageProbabilities<DdType, ValueType>(env, this->getModel(), checkTask.isOnlyInitialStatesRelevantSet(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), subResult.getTruthValuesVector());
auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), probabilisticTransitions, this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
}
template<typename ModelType>
std::unique_ptr<CheckResult> HybridCtmcCslModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
return storm::modelchecker::helper::HybridCtmcCslHelper::computeLongRunAverageRewards<DdType, ValueType>(env, this->getModel(), checkTask.isOnlyInitialStatesRelevantSet(), this->getModel().getTransitionMatrix(), this->getModel().getExitRateVector(), rewardModel.get());
auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), probabilisticTransitions, this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageRewards(env, rewardModel.get());
}
// Explicitly instantiate the model checker.

25
src/storm/modelchecker/csl/HybridMarkovAutomatonCslModelChecker.cpp

@ -5,6 +5,8 @@
#include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h"
#include "storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h"
#include "storm/modelchecker/prctl/helper/HybridMdpPrctlHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
@ -100,20 +102,23 @@ namespace storm {
template<typename ModelType>
std::unique_ptr<CheckResult> HybridMarkovAutomatonCslModelChecker<ModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), subResult.getTruthValuesVector());
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, true> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
}
template<typename ModelType>
std::unique_ptr<CheckResult> HybridMarkovAutomatonCslModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
return storm::modelchecker::helper::HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, checkTask.getOptimizationDirection(), this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector(), rewardModel.get());
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, true> helper(this->getModel(), this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageRewards(env, rewardModel.get());
}
// Explicitly instantiate the model checker.

17
src/storm/modelchecker/csl/SparseCtmcCslModelChecker.cpp

@ -2,6 +2,8 @@
#include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
#include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/models/sparse/StandardRewardModel.h"
@ -127,15 +129,22 @@ namespace storm {
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), &this->getModel().getExitRateVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(probabilisticTransitions, this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
}
template <typename SparseCtmcModelType>
std::unique_ptr<CheckResult> SparseCtmcCslModelChecker<SparseCtmcModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), &this->getModel().getExitRateVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
auto probabilisticTransitions = this->getModel().computeProbabilityMatrix();
storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(probabilisticTransitions, this->getModel().getExitRateVector());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
}
template <typename SparseCtmcModelType>

25
src/storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.cpp

@ -1,6 +1,8 @@
#include "storm/modelchecker/csl/SparseMarkovAutomatonCslModelChecker.h"
#include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
@ -140,8 +142,15 @@ namespace storm {
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
std::vector<ValueType> result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), subResult.getTruthValuesVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(result)));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
}
return result;
}
template<typename SparseMarkovAutomatonModelType>
@ -149,8 +158,16 @@ namespace storm {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
STORM_LOG_THROW(this->getModel().isClosed(), storm::exceptions::InvalidPropertyException, "Unable to compute long run average rewards in non-closed Markov automaton.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
std::vector<ValueType> result = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards<ValueType, RewardModelType>(env, checkTask.getOptimizationDirection(), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), this->getModel().getExitRates(), this->getModel().getMarkovianStates(), rewardModel.get());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(result)));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix(), this->getModel().getMarkovianStates(), this->getModel().getExitRates());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
}
return result;
}
template<typename SparseMarkovAutomatonModelType>

55
src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.cpp

@ -411,53 +411,6 @@ namespace storm {
STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing cumulative rewards is unsupported for this value type.");
}
template<storm::dd::DdType DdType, class ValueType>
std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates) {
storm::utility::Stopwatch conversionWatch(true);
// Create ODD for the translation.
storm::dd::Odd odd = model.getReachableStates().createOdd();
storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
storm::solver::SolveGoal<ValueType> goal;
if (onlyInitialStatesRelevant) {
goal.setRelevantValues(model.getInitialStates().toVector(odd));
}
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities(env, std::move(goal), explicitRateMatrix, psiStates.toVector(odd), &explicitExitRateVector);
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
}
template<storm::dd::DdType DdType, class ValueType>
std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel) {
STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
storm::dd::Add<DdType, ValueType> probabilityMatrix = computeProbabilityMatrix(rateMatrix, exitRateVector);
storm::utility::Stopwatch conversionWatch(true);
// Create ODD for the translation.
storm::dd::Odd odd = model.getReachableStates().createOdd();
storm::storage::SparseMatrix<ValueType> explicitRateMatrix = rateMatrix.toMatrix(odd, odd);
std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
storm::solver::SolveGoal<ValueType> goal;
if (onlyInitialStatesRelevant) {
goal.setRelevantValues(model.getInitialStates().toVector(odd));
}
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
std::vector<ValueType> result = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards(env, std::move(goal), explicitRateMatrix, rewardModel.getTotalRewardVector(probabilityMatrix, model.getColumnVariables(), exitRateVector, true).toVector(odd), &explicitExitRateVector);
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
}
template<storm::dd::DdType DdType, class ValueType>
storm::dd::Add<DdType, ValueType> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& maybeStates, ValueType uniformizationRate) {
STORM_LOG_DEBUG("Computing uniformized matrix using uniformization rate " << uniformizationRate << ".");
@ -489,9 +442,7 @@ namespace storm {
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, double timeBound);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& phiStates, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::CUDD> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& nextStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel);
template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::CUDD, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector);
template storm::dd::Add<storm::dd::DdType::CUDD, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& maybeStates, double uniformizationRate);
@ -501,9 +452,7 @@ namespace storm {
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, double timeBound);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel);
template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, double> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector);
template storm::dd::Add<storm::dd::DdType::Sylvan, double> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, double uniformizationRate);
@ -513,9 +462,7 @@ namespace storm {
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, double timeBound);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel);
template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector);
template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalNumber uniformizationRate);
@ -525,9 +472,7 @@ namespace storm {
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, double timeBound);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeUntilProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nextStates);
template std::unique_ptr<CheckResult> HybridCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::RewardModelType const& rewardModel);
template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeProbabilityMatrix(storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& rateMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector);
template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> HybridCtmcCslHelper::computeUniformizedMatrix(storm::models::symbolic::Ctmc<storm::dd::DdType::Sylvan, storm::RationalFunction> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& transitionMatrix, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& maybeStates, storm::RationalFunction uniformizationRate);

6
src/storm/modelchecker/csl/helper/HybridCtmcCslHelper.h

@ -44,15 +44,9 @@ namespace storm {
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeReachabilityRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates);
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeNextProbabilities(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& nextStates);
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Ctmc<DdType, ValueType> const& model, bool onlyInitialStatesRelevant, storm::dd::Add<DdType, ValueType> const& rateMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel);
/*!
* Converts the given rate-matrix into a time-abstract probability matrix.
*

54
src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.cpp

@ -77,73 +77,19 @@ namespace storm {
}
template<storm::dd::DdType DdType, class ValueType>
std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates) {
// Convert this query to an instance for the sparse engine.
storm::utility::Stopwatch conversionWatch(true);
// Create ODD for the translation.
storm::dd::Odd odd = model.getReachableStates().createOdd();
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd);
std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), psiStates.toVector(odd));
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(explicitResult)));
}
template<storm::dd::DdType DdType, class ValueType>
std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel) {
// Convert this query to an instance for the sparse engine.
storm::utility::Stopwatch conversionWatch(true);
// Create ODD for the translation.
storm::dd::Odd odd = model.getReachableStates().createOdd();
std::vector<ValueType> explicitExitRateVector = exitRateVector.toVector(odd);
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
boost::optional<std::vector<ValueType>> optionalStateRewards, optionalStateActionRewards;
if (rewardModel.hasStateRewards()) {
optionalStateRewards = rewardModel.getStateRewardVector().toVector(odd);
}
if (rewardModel.hasStateActionRewards()) {
auto matrixRewards = transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), model.getNondeterminismVariables(), odd, odd);
explicitTransitionMatrix = std::move(matrixRewards.first);
optionalStateActionRewards = std::move(matrixRewards.second);
} else {
explicitTransitionMatrix = transitionMatrix.toMatrix(model.getNondeterminismVariables(), odd, odd);
}
STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
storm::models::sparse::StandardRewardModel<ValueType> explicitRewardModel(optionalStateRewards, optionalStateActionRewards);
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
auto explicitResult = storm::modelchecker::helper::SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(env, dir, explicitTransitionMatrix, explicitTransitionMatrix.transpose(true), explicitExitRateVector, markovianStates.toVector(odd), explicitRewardModel);
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(explicitResult)));
}
// Explicit instantiations.
// Cudd, double.
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::CUDD> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& phiStates, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates, bool qualitative, double lowerBound, double upperBound);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& psiStates);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::CUDD, double> const& model, storm::dd::Add<storm::dd::DdType::CUDD, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::CUDD> const& markovianStates, storm::dd::Add<storm::dd::DdType::CUDD, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>::RewardModelType const& rewardModel);
// Sylvan, double.
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative, double lowerBound, double upperBound);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, double> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, double> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, double>::RewardModelType const& rewardModel);
// Sylvan, rational number.
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& targetStates, bool qualitative);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& phiStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates, bool qualitative, double lowerBound, double upperBound);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& psiStates);
template std::unique_ptr<CheckResult> HybridMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<storm::dd::DdType::Sylvan, storm::RationalNumber> const& model, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& transitionMatrix, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& markovianStates, storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> const& exitRateVector, typename storm::models::symbolic::Model<storm::dd::DdType::Sylvan, storm::RationalNumber>::RewardModelType const& rewardModel);
}
}

7
src/storm/modelchecker/csl/helper/HybridMarkovAutomatonCslHelper.h

@ -27,13 +27,6 @@ namespace storm {
template<storm::dd::DdType DdType, typename ValueType, typename std::enable_if<!storm::NumberTraits<ValueType>::SupportsExponential, int>::type = 0>
static std::unique_ptr<CheckResult> computeBoundedUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& phiStates, storm::dd::Bdd<DdType> const& psiStates, bool qualitative, double lowerBound, double upperBound);
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, storm::dd::Bdd<DdType> const& psiStates);
template<storm::dd::DdType DdType, typename ValueType>
static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::models::symbolic::MarkovAutomaton<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector, typename storm::models::symbolic::Model<DdType, ValueType>::RewardModelType const& rewardModel);
};
}

562
src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp

@ -482,555 +482,6 @@ namespace storm {
return storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeTotalRewards(env, std::move(goal), probabilityMatrix, backwardTransitions, dtmcRewardModel, qualitative);
}
template <typename ValueType>
std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector) {
// If there are no goal states, we avoid the computation and directly return zero.
uint_fast64_t numberOfStates = rateMatrix.getRowCount();
if (psiStates.empty()) {
return std::vector<ValueType>(numberOfStates, storm::utility::zero<ValueType>());
}
// Likewise, if all bits are set, we can avoid the computation.
if (psiStates.full()) {
return std::vector<ValueType>(numberOfStates, storm::utility::one<ValueType>());
}
ValueType zero = storm::utility::zero<ValueType>();
ValueType one = storm::utility::one<ValueType>();
return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
[&zero, &one, &psiStates] (storm::storage::sparse::state_type const& state) -> ValueType {
if (psiStates.get(state)) {
return one;
}
return zero;
},
exitRateVector);
}
template <typename ValueType, typename RewardModelType>
std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector) {
// Only compute the result if the model has a state-based reward model.
STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
[&] (storm::storage::sparse::state_type const& state) -> ValueType {
ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero<ValueType>();
if (rewardModel.hasStateActionRewards()) {
// State action rewards are multiplied with the exit rate r(s). Then, multiplying the reward with the expected time we stay at s (i.e. 1/r(s)) yields the original state reward
if (exitRateVector) {
result += rewardModel.getStateActionReward(state) * (*exitRateVector)[state];
} else {
result += rewardModel.getStateActionReward(state);
}
}
if (rewardModel.hasTransitionRewards()) {
// Transition rewards are already multiplied with the rates
result += rateMatrix.getPointwiseProductRowSum(rewardModel.getTransitionRewardMatrix(), state);
}
return result;
},
exitRateVector);
}
template <typename ValueType>
std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector) {
return computeLongRunAverages<ValueType>(env, std::move(goal), rateMatrix,
[&stateRewardVector] (storm::storage::sparse::state_type const& state) -> ValueType {
return stateRewardVector[state];
},
exitRateVector);
}
template <typename ValueType>
std::vector<ValueType> SparseCtmcCslHelper::computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector){
storm::storage::SparseMatrix<ValueType> probabilityMatrix;
if (exitRateVector) {
probabilityMatrix = computeProbabilityMatrix(rateMatrix, *exitRateVector);
} else {
probabilityMatrix = rateMatrix;
}
uint_fast64_t numberOfStates = rateMatrix.getRowCount();
// Start by decomposing the CTMC into its BSCCs.
storm::storage::StronglyConnectedComponentDecomposition<ValueType> bsccDecomposition(rateMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
STORM_LOG_DEBUG("Found " << bsccDecomposition.size() << " BSCCs.");
// Prepare the vector holding the LRA values for each of the BSCCs.
std::vector<ValueType> bsccLra;
bsccLra.reserve(bsccDecomposition.size());
auto underlyingSolverEnvironment = env;
auto precision = env.solver().lra().getPrecision();
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precision.
precision /= storm::utility::convertNumber<storm::RationalNumber>(2);
underlyingSolverEnvironment.solver().lra().setPrecision(precision);
}
underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(precision, env.solver().lra().getRelativeTerminationCriterion());
// Keep track of the maximal and minimal value occuring in one of the BSCCs
ValueType maxValue, minValue;
storm::storage::BitVector statesInBsccs(numberOfStates);
for (auto const& bscc : bsccDecomposition) {
for (auto const& state : bscc) {
statesInBsccs.set(state);
}
bsccLra.push_back(computeLongRunAveragesForBscc<ValueType>(underlyingSolverEnvironment, bscc, rateMatrix, valueGetter, exitRateVector));
if (bsccLra.size() == 1) {
maxValue = bsccLra.back();
minValue = bsccLra.back();
} else {
maxValue = std::max(bsccLra.back(), maxValue);
minValue = std::min(bsccLra.back(), minValue);
}
}
storm::storage::BitVector statesNotInBsccs = ~statesInBsccs;
STORM_LOG_DEBUG("Found " << statesInBsccs.getNumberOfSetBits() << " states in BSCCs.");
std::vector<uint64_t> stateToBsccMap(statesInBsccs.size(), -1);
for (uint64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
for (auto const& state : bsccDecomposition[bsccIndex]) {
stateToBsccMap[state] = bsccIndex;
}
}
std::vector<ValueType> rewardSolution;
if (!statesNotInBsccs.empty()) {
// Calculate LRA for states not in bsccs as expected reachability rewards.
// Target states are states in bsccs, transition reward is the lra of the bscc for each transition into a
// bscc and 0 otherwise. This corresponds to the sum of LRAs in BSCC weighted by the reachability probability
// of the BSCC.
std::vector<ValueType> rewardRightSide;
rewardRightSide.reserve(statesNotInBsccs.getNumberOfSetBits());
for (auto state : statesNotInBsccs) {
ValueType reward = storm::utility::zero<ValueType>();
for (auto entry : rateMatrix.getRow(state)) {
if (statesInBsccs.get(entry.getColumn())) {
if (exitRateVector) {
reward += (entry.getValue() / (*exitRateVector)[state]) * bsccLra[stateToBsccMap[entry.getColumn()]];
} else {
reward += entry.getValue() * bsccLra[stateToBsccMap[entry.getColumn()]];
}
}
}
rewardRightSide.push_back(reward);
}
// Compute reachability rewards
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(underlyingSolverEnvironment) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
storm::storage::SparseMatrix<ValueType> rewardEquationSystemMatrix = rateMatrix.getSubmatrix(false, statesNotInBsccs, statesNotInBsccs, isEqSysFormat);
if (exitRateVector) {
uint64_t localRow = 0;
for (auto const& globalRow : statesNotInBsccs) {
for (auto& entry : rewardEquationSystemMatrix.getRow(localRow)) {
entry.setValue(entry.getValue() / (*exitRateVector)[globalRow]);
}
++localRow;
}
}
if (isEqSysFormat) {
rewardEquationSystemMatrix.convertToEquationSystem();
}
rewardSolution = std::vector<ValueType>(rewardEquationSystemMatrix.getColumnCount(), (maxValue + minValue) / storm::utility::convertNumber<ValueType,uint64_t>(2));
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(underlyingSolverEnvironment, std::move(rewardEquationSystemMatrix));
solver->setBounds(minValue, maxValue);
// Check solver requirements
auto requirements = solver->getRequirements(underlyingSolverEnvironment);
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
solver->solveEquations(underlyingSolverEnvironment, rewardSolution, rewardRightSide);
}
// Fill the result vector.
std::vector<ValueType> result(numberOfStates);
auto rewardSolutionIter = rewardSolution.begin();
for (uint_fast64_t bsccIndex = 0; bsccIndex < bsccDecomposition.size(); ++bsccIndex) {
storm::storage::StronglyConnectedComponent const& bscc = bsccDecomposition[bsccIndex];
for (auto const& state : bscc) {
result[state] = bsccLra[bsccIndex];
}
}
for (auto state : statesNotInBsccs) {
STORM_LOG_ASSERT(rewardSolutionIter != rewardSolution.end(), "Too few elements in solution.");
// Take the value from the reward computation. Since the n-th state not in any bscc is the n-th
// entry in rewardSolution we can just take the next value from the iterator.
result[state] = *rewardSolutionIter;
++rewardSolutionIter;
}
return result;
}
template <typename ValueType>
ValueType SparseCtmcCslHelper::computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
// Catch the case where all values are the same (this includes the special case where the bscc is of size 1)
auto it = bscc.begin();
ValueType val = valueGetter(*it);
for (++it; it != bscc.end(); ++it) {
if (valueGetter(*it) != val) {
break;
}
}
if (it == bscc.end()) {
// All entries have the same LRA
return val;
}
storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::GainBiasEquations;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
} else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::ValueIteration;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
}
STORM_LOG_TRACE("Computing LRA for BSCC of size " << bscc.size() << " using '" << storm::solver::toString(method) << "'.");
if (method == storm::solver::LraMethod::ValueIteration) {
return computeLongRunAveragesForBsccVi<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector);
} else if (method == storm::solver::LraMethod::LraDistributionEquations) {
// We only need the first element of the pair as the lra distribution is not relevant at this point.
return computeLongRunAveragesForBsccLraDistr<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
}
STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
// We don't need the bias values
return computeLongRunAveragesForBsccGainBias<ValueType>(env, bscc, rateMatrix, valueGetter, exitRateVector).first;
}
template <>
storm::RationalFunction SparseCtmcCslHelper::computeLongRunAveragesForBsccVi<storm::RationalFunction>(Environment const&, storm::storage::StronglyConnectedComponent const&, storm::storage::SparseMatrix<storm::RationalFunction> const&, std::function<storm::RationalFunction (storm::storage::sparse::state_type const& state)> const&, std::vector<storm::RationalFunction> const*) {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models.");
}
template <typename ValueType>
ValueType SparseCtmcCslHelper::computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
// Initialize data about the bscc
storm::storage::BitVector bsccStates(rateMatrix.getRowGroupCount(), false);
for (auto const& state : bscc) {
bsccStates.set(state);
}
// Get the uniformization rate
ValueType uniformizationRate = storm::utility::one<ValueType>();
if (exitRateVector) {
uniformizationRate = storm::utility::vector::max_if(*exitRateVector, bsccStates);
}
// To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop.
// Hence, we increase the uniformization rate a little.
uniformizationRate *= (storm::utility::one<ValueType>() + storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()));
// Get the transitions of the submodel
typename storm::storage::SparseMatrix<ValueType> bsccMatrix = rateMatrix.getSubmatrix(true, bsccStates, bsccStates, true);
// Uniformize the transitions
uint64_t subState = 0;
for (auto state : bsccStates) {
for (auto& entry : bsccMatrix.getRow(subState)) {
if (entry.getColumn() == subState) {
if (exitRateVector) {
entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - (*exitRateVector)[state]) / uniformizationRate);
} else {
entry.setValue(storm::utility::one<ValueType>() + (entry.getValue() - storm::utility::one<ValueType>()) / uniformizationRate);
}
} else {
entry.setValue(entry.getValue() / uniformizationRate);
}
}
++subState;
}
// Compute the rewards obtained in a single uniformization step
std::vector<ValueType> markovianRewards;
markovianRewards.reserve(bsccMatrix.getRowCount());
for (auto const& state : bsccStates) {
markovianRewards.push_back(valueGetter(state) / uniformizationRate);
}
// start the iterations
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / uniformizationRate;
bool relative = env.solver().lra().getRelativeTerminationCriterion();
if (!relative) {
precision /= uniformizationRate;
}
std::vector<ValueType> x(bsccMatrix.getRowCount(), storm::utility::zero<ValueType>());
std::vector<ValueType> xPrime(x.size());
auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, bsccMatrix);
ValueType maxDiff, minDiff;
uint64_t iter = 0;
boost::optional<uint64_t> maxIter;
if (env.solver().lra().isMaximalIterationCountSet()) {
maxIter = env.solver().lra().getMaximalIterationCount();
}
while (!maxIter.is_initialized() || iter < maxIter.get()) {
++iter;
// Compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking)
multiplier->multiply(env, x, &markovianRewards, xPrime);
// update xPrime and check for convergence
// to avoid large (and numerically unstable) x-values, we substract a reference value.
auto xIt = x.begin();
auto xPrimeIt = xPrime.begin();
ValueType refVal = *xPrimeIt;
maxDiff = *xPrimeIt - *xIt;
minDiff = maxDiff;
*xPrimeIt -= refVal;
*xIt = *xPrimeIt;
for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
ValueType diff = *xPrimeIt - *xIt;
maxDiff = std::max(maxDiff, diff);
minDiff = std::min(minDiff, diff);
*xPrimeIt -= refVal;
*xIt = *xPrimeIt;
}
// Check for convergence. The uniformization rate is already incorporated into the precision parameter
if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
break;
}
if (storm::utility::resources::isTerminate()) {
break;
}
}
if (maxIter.is_initialized() && iter == maxIter.get()) {
STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
} else {
STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
}
return (maxDiff + minDiff) * uniformizationRate / (storm::utility::convertNumber<ValueType>(2.0));
}
template <typename ValueType>
std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
// We build the equation system as in Line 3 of Algorithm 3 from
// Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
// The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
// No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
// To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
// LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
// the uniformization rate cancels out. Hence, we obtain the equation system below.
// Get a mapping from global state indices to local ones.
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
uint64_t localIndex = 0;
for (auto const& globalIndex : bscc) {
toLocalIndexMap[globalIndex] = localIndex;
++localIndex;
}
// Prepare an environment for the underlying equation solver
auto subEnv = env;
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
// Topological solver does not make any sense since the BSCC is connected.
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
}
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
// Build the equation system matrix and vector.
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size());
std::vector<ValueType> eqSysVector;
eqSysVector.reserve(bscc.size());
// The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
uint64_t row = 0;
ValueType entryValue;
for (auto const& globalState : bscc) {
// Coefficient for the gain variable
if (isEquationSystemFormat) {
// '1-0' in row 0 and -(-1) in other rows
builder.addNextValue(row, 0, storm::utility::one<ValueType>());
} else if (row > 0) {
// No coeficient in row 0, othwerise substract the gain
builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
}
// Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
auto diagonalValue = storm::utility::zero<ValueType>();
if (row > 0) {
if (isEquationSystemFormat) {
diagonalValue = exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>();
} else {
diagonalValue = storm::utility::one<ValueType>() - (exitRateVector ? (*exitRateVector)[globalState] : storm::utility::one<ValueType>());
}
}
bool needDiagonalEntry = !storm::utility::isZero(diagonalValue);
for (auto const& entry : rateMatrix.getRow(globalState)) {
uint64_t col = toLocalIndexMap[entry.getColumn()];
if (col == 0) {
//Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
continue;
}
entryValue = entry.getValue();
if (isEquationSystemFormat) {
entryValue = -entryValue;
}
if (needDiagonalEntry && col >= row) {
if (col == row) {
entryValue += diagonalValue;
} else { // col > row
builder.addNextValue(row, row, diagonalValue);
}
needDiagonalEntry = false;
}
builder.addNextValue(row, col, entryValue);
}
if (needDiagonalEntry) {
builder.addNextValue(row, row, diagonalValue);
}
eqSysVector.push_back(valueGetter(globalState));
++row;
}
// Create a linear equation solver
auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
// Check solver requirements.
auto requirements = solver->getRequirements(subEnv);
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
// Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>());
// Take the mean of the rewards as an initial guess for the gain
//eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
solver->solveEquations(subEnv, eqSysSol, eqSysVector);
ValueType gain = eqSysSol.front();
// insert bias value for state 0
eqSysSol.front() = storm::utility::zero<ValueType>();
// Return the gain and the bias values
return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol));
}
template <typename ValueType>
std::pair<ValueType, std::vector<ValueType>> SparseCtmcCslHelper::computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector) {
// Let A be ab auxiliary Matrix with A[s,s] = R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
// We build and solve the equation system for
// x*A=0 & x_0+...+x_n=1 <=> A^t*x=0=x-x & x_0+...+x_n=1 <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
// Then, x[i] will be the fraction of the time we are in state i.
// This method assumes that this BSCC consist of more than one state
if (bscc.size() == 1) {
return { valueGetter(*bscc.begin()), {storm::utility::one<ValueType>()} };
}
// Prepare an environment for the underlying linear equation solver
auto subEnv = env;
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
// Topological solver does not make any sense since the BSCC is connected.
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
}
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
// Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
storm::storage::BitVector bsccStates(rateMatrix.getRowCount(), false);
uint64_t localIndex = 0;
for (auto const& globalIndex : bscc) {
bsccStates.set(globalIndex, true);
toLocalIndexMap[globalIndex] = localIndex;
++localIndex;
}
// Build the auxiliary Matrix A.
auto auxMatrix = rateMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
uint64_t row = 0;
for (auto const& globalIndex : bscc) {
for (auto& entry : auxMatrix.getRow(row)) {
if (entry.getColumn() == row) {
// This value is non-zero since we have a BSCC with more than one state
if (exitRateVector) {
entry.setValue(entry.getValue() - (*exitRateVector)[globalIndex]);
} else {
entry.setValue(entry.getValue() - storm::utility::one<ValueType>());
}
}
}
++row;
}
assert(row == auxMatrix.getRowCount());
// We need to consider A^t. This will not delete diagonal entries since they are non-zero.
auxMatrix = auxMatrix.transpose();
// Check whether we need the fixpoint characterization
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem;
if (isFixpointFormat) {
// Add a 1 on the diagonal
for (row = 0; row < auxMatrix.getRowCount(); ++row) {
for (auto& entry : auxMatrix.getRow(row)) {
if (entry.getColumn() == row) {
entry.setValue(storm::utility::one<ValueType>() + entry.getValue());
}
}
}
}
// We now build the equation system matrix.
// We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
// Phase 1: replace the existing entries of the last row with ones
uint64_t col = 0;
uint64_t lastRow = auxMatrix.getRowCount() - 1;
for (auto& entry : auxMatrix.getRow(lastRow)) {
entry.setColumn(col);
if (isFixpointFormat) {
if (col == lastRow) {
entry.setValue(storm::utility::zero<ValueType>());
} else {
entry.setValue(-storm::utility::one<ValueType>());
}
} else {
entry.setValue(storm::utility::one<ValueType>());
}
++col;
}
storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix));
for (; col <= lastRow; ++col) {
if (isFixpointFormat) {
if (col != lastRow) {
builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>());
}
} else {
builder.addNextValue(lastRow, col, storm::utility::one<ValueType>());
}
}
std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>());
bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>();
// Create a linear equation solver
auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>());
// Check solver requirements.
auto requirements = solver->getRequirements(subEnv);
requirements.clearLowerBounds();
requirements.clearUpperBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size()));
solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide);
// Calculate final LRA Value
ValueType result = storm::utility::zero<ValueType>();
auto solIt = lraDistr.begin();
for (auto const& globalState : bscc) {
result += valueGetter(globalState) * (*solIt);
++solIt;
}
assert(solIt == lraDistr.end());
return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr));
}
template <typename ValueType, typename std::enable_if<storm::NumberTraits<ValueType>::SupportsExponential, int>::type>
std::vector<ValueType> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<ValueType> const& exitRates, double timeBound) {
@ -1253,10 +704,6 @@ namespace storm {
template std::vector<double> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative);
template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<double> const* exitRateVector);
template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, std::vector<double> const* exitRateVector);
template std::vector<double> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& stateRewardVector, std::vector<double> const* exitRateVector);
template std::vector<double> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& rateMatrix, std::vector<double> const& exitRateVector, storm::models::sparse::StandardRewardModel<double> const& rewardModel, double timeBound);
template std::vector<double> SparseCtmcCslHelper::computeAllTransientProbabilities(Environment const& env, storm::storage::SparseMatrix<double> const& rateMatrix, storm::storage::BitVector const& initialStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::vector<double> const& exitRates, double timeBound);
@ -1290,15 +737,6 @@ namespace storm {
template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative);
template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::SparseMatrix<storm::RationalFunction> const& backwardTransitions, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, bool qualitative);
template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalNumber> const* exitRateVector);
template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<storm::RationalFunction> const* exitRateVector);
template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalNumber> const& rewardModel, std::vector<storm::RationalNumber> const* exitRateVector);
template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, storm::models::sparse::StandardRewardModel<RationalFunction> const& rewardModel, std::vector<storm::RationalFunction> const* exitRateVector);
template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& stateRewardVector, std::vector<storm::RationalNumber> const* exitRateVector);
template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& stateRewardVector, std::vector<storm::RationalFunction> const* exitRateVector);
template std::vector<storm::RationalNumber> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& rateMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, double timeBound);
template std::vector<storm::RationalFunction> SparseCtmcCslHelper::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalFunction>&& goal, storm::storage::SparseMatrix<storm::RationalFunction> const& rateMatrix, std::vector<storm::RationalFunction> const& exitRateVector, storm::models::sparse::StandardRewardModel<storm::RationalFunction> const& rewardModel, double timeBound);

21
src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.h

@ -55,15 +55,6 @@ namespace storm {
template <typename ValueType, typename RewardModelType>
static std::vector<ValueType> computeTotalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, RewardModelType const& rewardModel, bool qualitative);
template <typename ValueType>
static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::BitVector const& psiStates, std::vector<ValueType> const* exitRateVector);
template <typename ValueType, typename RewardModelType>
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, RewardModelType const& rewardModel, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::vector<ValueType> const& stateRewardVector, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& targetStates, bool qualitative);
@ -131,18 +122,6 @@ namespace storm {
template <typename ValueType>
static bool checkAndUpdateTransientProbabilityEpsilon(storm::Environment const& env, ValueType& epsilon, std::vector<ValueType> const& resultVector, storm::storage::BitVector const& relevantPositions);
private:
template <typename ValueType>
static std::vector<ValueType> computeLongRunAverages(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static ValueType computeLongRunAveragesForBscc(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static ValueType computeLongRunAveragesForBsccVi(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccGainBias(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
template <typename ValueType>
static std::pair<ValueType, std::vector<ValueType>> computeLongRunAveragesForBsccLraDistr(Environment const& env, storm::storage::StronglyConnectedComponent const& bscc, storm::storage::SparseMatrix<ValueType> const& rateMatrix, std::function<ValueType (storm::storage::sparse::state_type const& state)> const& valueGetter, std::vector<ValueType> const* exitRateVector);
};
}
}

475
src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.cpp

@ -717,198 +717,6 @@ namespace storm {
return SparseMdpPrctlHelper<ValueType>::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, scaledRewardModel, psiStates, false, produceScheduler);
}
template<typename ValueType>
std::vector<ValueType> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates) {
uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
// If there are no goal states, we avoid the computation and directly return zero.
if (psiStates.empty()) {
return std::vector<ValueType>(numberOfStates, storm::utility::zero<ValueType>());
}
// Likewise, if all bits are set, we can avoid the computation and set.
if (psiStates.full()) {
return std::vector<ValueType>(numberOfStates, storm::utility::one<ValueType>());
}
// Otherwise, reduce the long run average probabilities to long run average rewards.
// Every Markovian goal state gets reward one.
std::vector<ValueType> stateRewards(transitionMatrix.getRowGroupCount(), storm::utility::zero<ValueType>());
storm::utility::vector::setVectorValues(stateRewards, markovianStates & psiStates, storm::utility::one<ValueType>());
storm::models::sparse::StandardRewardModel<ValueType> rewardModel(std::move(stateRewards));
return computeLongRunAverageRewards(env, dir, transitionMatrix, backwardTransitions, exitRateVector, markovianStates, rewardModel);
}
template<typename ValueType, typename RewardModelType>
std::vector<ValueType> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel) {
uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
// Start by decomposing the Markov automaton into its MECs.
storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(transitionMatrix, backwardTransitions);
// Get some data members for convenience.
std::vector<uint64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
// Now start with compute the long-run average for all end components in isolation.
std::vector<ValueType> lraValuesForEndComponents;
// While doing so, we already gather some information for the following steps.
std::vector<uint64_t> stateToMecIndexMap(numberOfStates);
storm::storage::BitVector statesInMecs(numberOfStates);
auto underlyingSolverEnvironment = env;
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should be less then the user defined precsion.
underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
}
for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex];
// Gather information for later use.
for (auto const& stateChoicesPair : mec) {
uint64_t state = stateChoicesPair.first;
statesInMecs.set(state);
stateToMecIndexMap[state] = currentMecIndex;
}
// Compute the LRA value for the current MEC.
lraValuesForEndComponents.push_back(computeLraForMaximalEndComponent(underlyingSolverEnvironment, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec));
}
// For fast transition rewriting, we build some auxiliary data structures.
storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
uint64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits();
uint64_t lastStateNotInMecs = 0;
uint64_t numberOfStatesNotInMecs = 0;
std::vector<uint64_t> statesNotInMecsBeforeIndex;
statesNotInMecsBeforeIndex.reserve(numberOfStates);
for (auto state : statesNotContainedInAnyMec) {
while (lastStateNotInMecs <= state) {
statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs);
++lastStateNotInMecs;
}
++numberOfStatesNotInMecs;
}
uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
// Finally, we are ready to create the SSP matrix and right-hand side of the SSP.
std::vector<ValueType> b;
typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates);
// If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications).
uint64_t currentChoice = 0;
for (auto state : statesNotContainedInAnyMec) {
sspMatrixBuilder.newRowGroup(currentChoice);
for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) {
std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
b.push_back(storm::utility::zero<ValueType>());
for (auto element : transitionMatrix.getRow(choice)) {
if (statesNotContainedInAnyMec.get(element.getColumn())) {
// If the target state is not contained in an MEC, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
} else {
// If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the MEC into the matrix.
auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
}
}
// Now insert all (cumulative) probability values that target an MEC.
for (uint64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) {
if (auxiliaryStateToProbabilityMap[mecIndex] != 0) {
sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]);
}
}
}
}
// Now we are ready to construct the choices for the auxiliary states.
for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
sspMatrixBuilder.newRowGroup(currentChoice);
for (auto const& stateChoicesPair : mec) {
uint64_t state = stateChoicesPair.first;
storm::storage::FlatSet<uint64_t> const& choicesInMec = stateChoicesPair.second;
for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) {
// If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
if (choicesInMec.find(choice) == choicesInMec.end()) {
std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
b.push_back(storm::utility::zero<ValueType>());
for (auto element : transitionMatrix.getRow(choice)) {
if (statesNotContainedInAnyMec.get(element.getColumn())) {
// If the target state is not contained in an MEC, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
} else {
// If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the MEC into the matrix.
auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
}
}
// Now insert all (cumulative) probability values that target an MEC.
for (uint64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) {
if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) {
sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]);
}
}
++currentChoice;
}
}
}
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
++currentChoice;
b.push_back(lraValuesForEndComponents[mecIndex]);
}
// Finalize the matrix and solve the corresponding system of equations.
storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates);
std::vector<ValueType> x(numberOfSspStates);
// Check for requirements of the solver.
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, dir);
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(underlyingSolverEnvironment, sspMatrix);
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setLowerBound(storm::utility::zero<ValueType>());
solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end()));
solver->setRequirementsChecked();
solver->solveEquations(underlyingSolverEnvironment, dir, x, b);
// Prepare result vector.
std::vector<ValueType> result(numberOfStates);
// Set the values for states not contained in MECs.
storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x);
// Set the values for all states in MECs.
for (auto state : statesInMecs) {
result[state] = x[firstAuxiliaryStateIndex + stateToMecIndexMap[state]];
}
return result;
}
template <typename ValueType>
MDPSparseModelCheckingHelperReturnType<ValueType> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler) {
@ -922,268 +730,6 @@ namespace storm {
return SparseMdpPrctlHelper<ValueType>::computeReachabilityRewards(env, dir, transitionMatrix, backwardTransitions, rewardModel, psiStates, false, produceScheduler);
}
template<typename ValueType, typename RewardModelType>
ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
// If the mec only consists of a single state, we compute the LRA value directly
if (++mec.begin() == mec.end()) {
uint64_t state = mec.begin()->first;
STORM_LOG_THROW(markovianStates.get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero<ValueType>();
if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has nondeterministic behavior.");
uint64_t choice = *mec.begin()->second.begin();
result += exitRateVector[state] * rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>());
}
return result;
}
// Solve MEC with the method specified in the settings
storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::LinearProgramming;
} else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::ValueIteration;
}
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec);
} else if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
}
}
template<typename ValueType, typename RewardModelType>
ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
std::unique_ptr<storm::utility::solver::LpSolverFactory<ValueType>> lpSolverFactory(new storm::utility::solver::LpSolverFactory<ValueType>());
std::unique_ptr<storm::solver::LpSolver<ValueType>> solver = lpSolverFactory->create("LRA for MEC");
solver->setOptimizationDirection(invert(dir));
// First, we need to create the variables for the problem.
std::map<uint64_t, storm::expressions::Variable> stateToVariableMap;
for (auto const& stateChoicesPair : mec) {
std::string variableName = "x" + std::to_string(stateChoicesPair.first);
stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
}
storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>());
solver->update();
// Now we encode the problem as constraints.
std::vector<uint64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
for (auto const& stateChoicesPair : mec) {
uint64_t state = stateChoicesPair.first;
// Now, based on the type of the state, create a suitable constraint.
if (markovianStates.get(state)) {
STORM_LOG_ASSERT(stateChoicesPair.second.size() == 1, "Markovian state " << state << " is not deterministic: It has " << stateChoicesPair.second.size() << " choices.");
uint64_t choice = *stateChoicesPair.second.begin();
storm::expressions::Expression constraint = stateToVariableMap.at(state);
for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) {
constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational((element.getValue()));
}
constraint = constraint + solver->getManager().rational(storm::utility::one<ValueType>() / exitRateVector[state]) * k;
storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, (ValueType) (storm::utility::one<ValueType>() / exitRateVector[state])));
if (dir == OptimizationDirection::Minimize) {
constraint = constraint <= rightHandSide;
} else {
constraint = constraint >= rightHandSide;
}
solver->addConstraint("state" + std::to_string(state), constraint);
} else {
// For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action
// and the sum ranges over all states s'.
for (auto choice : stateChoicesPair.second) {
storm::expressions::Expression constraint = stateToVariableMap.at(state);
for (auto element : transitionMatrix.getRow(choice)) {
constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational(element.getValue());
}
storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>()));
if (dir == OptimizationDirection::Minimize) {
constraint = constraint <= rightHandSide;
} else {
constraint = constraint >= rightHandSide;
}
solver->addConstraint("state" + std::to_string(state), constraint);
}
}
}
solver->optimize();
return solver->getContinuousValue(k);
}
template<typename ValueType, typename RewardModelType>
ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
// Initialize data about the mec
storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false);
storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false);
for (auto const& stateChoicesPair : mec) {
mecStates.set(stateChoicesPair.first);
for (auto const& choice : stateChoicesPair.second) {
mecChoices.set(choice);
}
}
storm::storage::BitVector markovianMecStates = mecStates & markovianStates;
storm::storage::BitVector probabilisticMecStates = mecStates & ~markovianStates;
storm::storage::BitVector probabilisticMecChoices = transitionMatrix.getRowFilter(probabilisticMecStates) & mecChoices;
STORM_LOG_THROW(!markovianMecStates.empty(), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
bool hasProbabilisticStates = !probabilisticMecStates.empty();
// Get the uniformization rate
ValueType uniformizationRate = storm::utility::vector::max_if(exitRateVector, markovianMecStates);
// To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop.
// Hence, we increase the uniformization rate a little.
uniformizationRate *= (storm::utility::one<ValueType>() + storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor()));
// Get the transitions of the submodel, that is
// * a matrix aMarkovian with all (uniformized) transitions from Markovian mec states to all Markovian mec states.
// * a matrix aMarkovianToProbabilistic with all (uniformized) transitions from Markovian mec states to all probabilistic mec states.
// * a matrix aProbabilistic with all transitions from probabilistic mec states to other probabilistic mec states.
// * a matrix aProbabilisticToMarkovian with all transitions from probabilistic mec states to all Markovian mec states.
typename storm::storage::SparseMatrix<ValueType> aMarkovian = transitionMatrix.getSubmatrix(true, markovianMecStates, markovianMecStates, true);
typename storm::storage::SparseMatrix<ValueType> aMarkovianToProbabilistic, aProbabilistic, aProbabilisticToMarkovian;
if (hasProbabilisticStates) {
aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianMecStates, probabilisticMecStates);
aProbabilistic = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, probabilisticMecStates);
aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, markovianMecStates);
}
// The matrices with transitions from Markovian states need to be uniformized.
uint64_t subState = 0;
for (auto state : markovianMecStates) {
ValueType uniformizationFactor = exitRateVector[state] / uniformizationRate;
if (hasProbabilisticStates) {
for (auto& entry : aMarkovianToProbabilistic.getRow(subState)) {
entry.setValue(entry.getValue() * uniformizationFactor);
}
}
for (auto& entry : aMarkovian.getRow(subState)) {
if (entry.getColumn() == subState) {
entry.setValue(storm::utility::one<ValueType>() - uniformizationFactor * (storm::utility::one<ValueType>() - entry.getValue()));
} else {
entry.setValue(entry.getValue() * uniformizationFactor);
}
}
++subState;
}
// Compute the rewards obtained in a single uniformization step
std::vector<ValueType> markovianChoiceRewards;
markovianChoiceRewards.reserve(aMarkovian.getRowCount());
for (auto const& state : markovianMecStates) {
ValueType stateRewardScalingFactor = storm::utility::one<ValueType>() / uniformizationRate;
ValueType actionRewardScalingFactor = exitRateVector[state] / uniformizationRate;
assert(transitionMatrix.getRowGroupSize(state) == 1);
uint64_t choice = transitionMatrix.getRowGroupIndices()[state];
markovianChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, stateRewardScalingFactor, actionRewardScalingFactor));
}
std::vector<ValueType> probabilisticChoiceRewards;
if (hasProbabilisticStates) {
probabilisticChoiceRewards.reserve(aProbabilistic.getRowCount());
for (auto const& state : probabilisticMecStates) {
uint64_t groupStart = transitionMatrix.getRowGroupIndices()[state];
uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[state + 1];
for (uint64_t choice = probabilisticMecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = probabilisticMecChoices.getNextSetIndex(choice + 1)) {
probabilisticChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero<ValueType>()));
}
}
}
// start the iterations
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / uniformizationRate;
bool relative = env.solver().lra().getRelativeTerminationCriterion();
std::vector<ValueType> v(aMarkovian.getRowCount(), storm::utility::zero<ValueType>());
std::vector<ValueType> w = v;
std::vector<ValueType> x, b;
auto solverEnv = env;
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver;
if (hasProbabilisticStates) {
if (env.solver().isForceSoundness()) {
// To get correct results, the inner equation systems are solved exactly.
// TODO investigate how an error would propagate
solverEnv.solver().setForceExact(true);
}
x.resize(aProbabilistic.getRowGroupCount(), storm::utility::zero<ValueType>());
b = probabilisticChoiceRewards;
solver = setUpProbabilisticStatesSolver(solverEnv, dir, aProbabilistic);
}
uint64_t iter = 0;
boost::optional<uint64_t> maxIter;
if (env.solver().lra().isMaximalIterationCountSet()) {
maxIter = env.solver().lra().getMaximalIterationCount();
}
while (!maxIter.is_initialized() || iter < maxIter.get()) {
++iter;
// Compute the expected total rewards for the probabilistic states
if (hasProbabilisticStates) {
if (solver) {
solver->solveEquations(solverEnv, dir, x, b);
} else {
storm::utility::vector::reduceVectorMinOrMax(dir, b, x, aProbabilistic.getRowGroupIndices());
}
}
// now compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking)
auto vIt = v.begin();
uint64_t row = 0;
ValueType newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w);
if (hasProbabilisticStates) {
newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x);
}
ValueType maxDiff = newValue - *vIt;
ValueType minDiff = maxDiff;
*vIt = newValue;
for (++vIt, ++row; row < aMarkovian.getRowCount(); ++vIt, ++row) {
newValue = markovianChoiceRewards[row] + aMarkovian.multiplyRowWithVector(row, w);
if (hasProbabilisticStates) {
newValue += aMarkovianToProbabilistic.multiplyRowWithVector(row, x);
}
ValueType diff = newValue - *vIt;
maxDiff = std::max(maxDiff, diff);
minDiff = std::min(minDiff, diff);
*vIt = newValue;
}
// Check for convergence
if ((maxDiff - minDiff) <= (relative ? (precision * (v.front() + minDiff)) : precision)) {
break;
}
if (storm::utility::resources::isTerminate()) {
break;
}
// update the rhs of the MinMax equation system
ValueType referenceValue = v.front();
storm::utility::vector::applyPointwise<ValueType, ValueType>(v, w, [&referenceValue] (ValueType const& v_i) -> ValueType { return v_i - referenceValue; });
if (hasProbabilisticStates) {
aProbabilisticToMarkovian.multiplyWithVector(w, b);
storm::utility::vector::addVectors(b, probabilisticChoiceRewards, b);
}
}
if (maxIter.is_initialized() && iter == maxIter.get()) {
STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
} else {
STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
}
return v.front() * uniformizationRate;
}
template std::vector<double> SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair<double, double> const& boundsPair);
template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler);
@ -1192,18 +738,8 @@ namespace storm {
template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool produceScheduler);
template std::vector<double> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
template std::vector<double> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel);
template MDPSparseModelCheckingHelperReturnType<double> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, std::pair<double, double> const& boundsPair);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeUntilProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, bool produceScheduler);
@ -1212,18 +748,7 @@ namespace storm {
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeTotalRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool produceScheduler);
template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
template std::vector<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMarkovAutomatonCslHelper::computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
}
}
}

28
src/storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h

@ -34,37 +34,9 @@ namespace storm {
template <typename ValueType, typename RewardModelType>
static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::BitVector const& psiStates, bool produceScheduler);
template <typename ValueType>
static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates);
template <typename ValueType, typename RewardModelType>
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel);
template <typename ValueType>
static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityTimes(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, bool produceScheduler);
private:
/*!
* Computes the long-run average value for the given maximal end component of a Markov automaton.
*
* Implementations are based on Linear Programming (LP) and Value Iteration (VI).
*
* @param dir Sets whether the long-run average is to be minimized or maximized.
* @param transitionMatrix The transition matrix of the underlying Markov automaton.
* @param markovianStates A bit vector storing all markovian states.
* @param exitRateVector A vector with exit rates for all states. Exit rates of probabilistic states are
* assumed to be zero.
* @param rewardModel The considered reward model
* @param actionRewards The action rewards (earned instantaneously).
* @param mec The maximal end component to consider for computing the long-run average.
* @return The long-run average of being in a goal state for the given MEC.
*/
template <typename ValueType, typename RewardModelType>
static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template <typename ValueType, typename RewardModelType>
static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template <typename ValueType, typename RewardModelType>
static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
};

47
src/storm/modelchecker/helper/ModelCheckerHelper.cpp

@ -0,0 +1,47 @@
#include "ModelCheckerHelper.h"
#include "storm/utility/macros.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, storm::dd::DdType DdType>
void ModelCheckerHelper<ValueType, DdType>::setRelevantStates(StateSet const& relevantStates) {
_relevantStates = relevantStates;
}
template <typename ValueType, storm::dd::DdType DdType>
void ModelCheckerHelper<ValueType, DdType>::clearRelevantStates() {
_relevantStates = boost::none;
}
template <typename ValueType, storm::dd::DdType DdType>
bool ModelCheckerHelper<ValueType, DdType>::hasRelevantStates() const {
return _relevantStates.is_initialized();
}
template <typename ValueType, storm::dd::DdType DdType>
boost::optional<typename ModelCheckerHelper<ValueType, DdType>::StateSet> const& ModelCheckerHelper<ValueType, DdType>::getOptionalRelevantStates() const {
return _relevantStates;
}
template <typename ValueType, storm::dd::DdType DdType>
typename ModelCheckerHelper<ValueType, DdType>::StateSet const& ModelCheckerHelper<ValueType, DdType>::getRelevantStates() const {
STORM_LOG_ASSERT(hasRelevantStates(), "Retrieving relevant states although none have been set.");
return _relevantStates.get();
}
template class ModelCheckerHelper<double, storm::dd::DdType::None>;
template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;
template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>;
template class ModelCheckerHelper<double, storm::dd::DdType::Sylvan>;
template class ModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
template class ModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>;
template class ModelCheckerHelper<double, storm::dd::DdType::CUDD>;
}
}
}

74
src/storm/modelchecker/helper/ModelCheckerHelper.h

@ -0,0 +1,74 @@
#pragma once
#include <type_traits>
#include <boost/optional.hpp>
#include "storm/storage/dd/DdType.h"
#include "storm/storage/dd/Bdd.h"
#include "storm/storage/BitVector.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Helper class for solving a model checking query.
* @tparam VT The value type of a single value.
* @tparam DdType The used library for Dds (or None in case of a sparse representation).
*/
template <typename VT, storm::dd::DdType DdType = storm::dd::DdType::None>
class ModelCheckerHelper {
public:
typedef VT ValueType;
ModelCheckerHelper() = default;
virtual ~ModelCheckerHelper() = default;
/*!
* Identifies a subset of the model states
*/
using StateSet = typename std::conditional<DdType == storm::dd::DdType::None, storm::storage::BitVector, storm::dd::Bdd<DdType>>::type;
/*!
* Sets relevant states.
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
* In this case, an arbitrary result can be set to non-relevant states.
*/
void setRelevantStates(StateSet const& relevantStates);
/*!
* Clears the relevant states.
* If no relevant states are set, it is assumed that a result is required for all (initial- and non-initial) states.
*/
void clearRelevantStates();
/*!
* @return true if there are relevant states set.
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
* In this case, an arbitrary result can be set to non-relevant states.
*/
bool hasRelevantStates() const;
/*!
* @return relevant states (if there are any) or boost::none (otherwise).
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
* In this case, an arbitrary result can be set to non-relevant states.
*/
boost::optional<StateSet> const& getOptionalRelevantStates() const;
/*!
* @pre Relevant states have to be set before calling this.
* @return the relevant states. Should only be called if there are any.
* If relevant states are set, it is assumed that the model checking result is only relevant for the given states.
* In this case, an arbitrary result can be set to non-relevant states.
*
*/
StateSet const& getRelevantStates() const;
private:
boost::optional<StateSet> _relevantStates;
};
}
}
}

98
src/storm/modelchecker/helper/SingleValueModelCheckerHelper.cpp

@ -0,0 +1,98 @@
#include "SingleValueModelCheckerHelper.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, storm::dd::DdType DdType>
SingleValueModelCheckerHelper<ValueType, DdType>::SingleValueModelCheckerHelper() : _produceScheduler(false) {
// Intentionally left empty
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::setOptimizationDirection(storm::solver::OptimizationDirection const& direction) {
_optimizationDirection = direction;
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::clearOptimizationDirection() {
_optimizationDirection = boost::none;
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::isOptimizationDirectionSet() const {
return _optimizationDirection.is_initialized();
}
template <typename ValueType, storm::dd::DdType DdType>
storm::solver::OptimizationDirection const& SingleValueModelCheckerHelper<ValueType, DdType>::getOptimizationDirection() const {
STORM_LOG_ASSERT(isOptimizationDirectionSet(), "Requested optimization direction but none was set.");
return _optimizationDirection.get();
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::minimize() const {
return storm::solver::minimize(getOptimizationDirection());
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::maximize() const {
return storm::solver::maximize(getOptimizationDirection());
}
template <typename ValueType, storm::dd::DdType DdType>
boost::optional<storm::solver::OptimizationDirection> SingleValueModelCheckerHelper<ValueType, DdType>::getOptionalOptimizationDirection() const {
return _optimizationDirection;
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& threshold) {
_valueThreshold = std::make_pair(comparisonType, threshold);
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::clearValueThreshold() {
_valueThreshold = boost::none;
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::isValueThresholdSet() const {
return _valueThreshold.is_initialized();
}
template <typename ValueType, storm::dd::DdType DdType>
storm::logic::ComparisonType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdComparisonType() const {
STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
return _valueThreshold->first;
}
template <typename ValueType, storm::dd::DdType DdType>
ValueType const& SingleValueModelCheckerHelper<ValueType, DdType>::getValueThresholdValue() const {
STORM_LOG_ASSERT(isValueThresholdSet(), "Value Threshold comparison type was requested but not set before.");
return _valueThreshold->second;
}
template <typename ValueType, storm::dd::DdType DdType>
void SingleValueModelCheckerHelper<ValueType, DdType>::setProduceScheduler(bool value) {
_produceScheduler = value;
}
template <typename ValueType, storm::dd::DdType DdType>
bool SingleValueModelCheckerHelper<ValueType, DdType>::isProduceSchedulerSet() const {
return _produceScheduler;
}
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::None>;
template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::None>;
template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::None>;
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::Sylvan>;
template class SingleValueModelCheckerHelper<storm::RationalNumber, storm::dd::DdType::Sylvan>;
template class SingleValueModelCheckerHelper<storm::RationalFunction, storm::dd::DdType::Sylvan>;
template class SingleValueModelCheckerHelper<double, storm::dd::DdType::CUDD>;
}
}
}

111
src/storm/modelchecker/helper/SingleValueModelCheckerHelper.h

@ -0,0 +1,111 @@
#pragma once
#include "ModelCheckerHelper.h"
#include "storm/solver/OptimizationDirection.h"
#include "storm/logic/ComparisonType.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Helper for model checking queries where we are interested in (optimizing) a single value per state.
* @tparam ValueType The type of a value
* @tparam DdType The used library for Dds (or None in case of a sparse representation)
*/
template <typename ValueType, storm::dd::DdType DdType = storm::dd::DdType::None>
class SingleValueModelCheckerHelper : public ModelCheckerHelper<ValueType, DdType> {
public:
SingleValueModelCheckerHelper();
/*!
* Sets the optimization direction, i.e., whether we want to minimize or maximize the value for each state
* Has no effect for models without nondeterminism.
* Has to be set if there is nondeterminism in the model.
*/
void setOptimizationDirection(storm::solver::OptimizationDirection const& direction);
/*!
* Clears the optimization direction if it was set before.
*/
void clearOptimizationDirection();
/*!
* @return true if there is an optimization direction set
*/
bool isOptimizationDirectionSet() const;
/*!
* @pre an optimization direction has to be set before calling this.
* @return the optimization direction.
*/
storm::solver::OptimizationDirection const& getOptimizationDirection() const;
/*!
* @pre an optimization direction has to be set before calling this.
* @return true iff the optimization goal is to minimize the value for each state
*/
bool minimize() const;
/*!
* @pre an optimization direction has to be set before calling this.
* @return true iff the optimization goal is to maximize the value for each state
*/
bool maximize() const;
/*!
* @return The optimization direction (if it was set)
*/
boost::optional<storm::solver::OptimizationDirection> getOptionalOptimizationDirection() const;
/*!
* Sets a goal threshold for the value at each state. If such a threshold is set, it is assumed that we are only interested
* in the satisfaction of the threshold. Setting this allows the helper to compute values only up to the precision
* where satisfaction of the threshold can be decided.
* @param comparisonType The relation used when comparing computed values (left hand side) with the given threshold value (right hand side).
* @param thresholdValue The value used on the right hand side of the comparison relation.
*/
void setValueThreshold(storm::logic::ComparisonType const& comparisonType, ValueType const& thresholdValue);
/*!
* Clears the valueThreshold if it was set before.
*/
void clearValueThreshold();
/*!
* @return true, if a value threshold has been set.
*/
bool isValueThresholdSet() const;
/*!
* @pre A value threshold has to be set before calling this.
* @return The relation used when comparing computed values (left hand side) with the specified threshold value (right hand side).
*/
storm::logic::ComparisonType const& getValueThresholdComparisonType() const;
/*!
* @pre A value threshold has to be set before calling this.
* @return The value used on the right hand side of the comparison relation.
*/
ValueType const& getValueThresholdValue() const;
/*!
* Sets whether an optimal scheduler shall be constructed during the computation
*/
void setProduceScheduler(bool value);
/*!
* @return whether an optimal scheduler shall be constructed during the computation
*/
bool isProduceSchedulerSet() const;
private:
boost::optional<storm::solver::OptimizationDirection> _optimizationDirection;
boost::optional<std::pair<storm::logic::ComparisonType, ValueType>> _valueThreshold;
bool _produceScheduler;
};
}
}
}

147
src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.cpp

@ -0,0 +1,147 @@
#include "HybridInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h"
#include "storm/models/symbolic/NondeterministicModel.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/utility/macros.h"
#include "storm/exceptions/NotSupportedException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr) {
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRateVector) {
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& exitRateVector) : _model(model), _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRateVector) {
STORM_LOG_ASSERT(model.isNondeterministicModel() == Nondeterministic, "Template Parameter does not match model type.");
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates) {
// Convert this query to an instance for the sparse engine.
// Create ODD for the translation.
storm::dd::Odd odd = _model.getReachableStates().createOdd();
// Translate all required components
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
if (Nondeterministic) {
explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
} else {
explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd);
}
std::vector<ValueType> explicitExitRateVector;
storm::storage::BitVector explicitMarkovianStates;
if (isContinuousTime()) {
explicitExitRateVector = _exitRates->toVector(odd);
if (_markovianStates) {
explicitMarkovianStates = _markovianStates->toVector(odd);
}
}
auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd);
auto explicitResult = sparseHelper->computeLongRunAverageProbabilities(env, psiStates.toVector(odd));
return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) {
// Convert this query to an instance for the sparse engine.
// Create ODD for the translation.
storm::dd::Odd odd = _model.getReachableStates().createOdd();
// Translate all required components
// Transitions and rewards
storm::storage::SparseMatrix<ValueType> explicitTransitionMatrix;
std::vector<ValueType> explicitStateRewards, explicitActionRewards;
if (rewardModel.hasStateRewards()) {
explicitStateRewards = rewardModel.getStateRewardVector().toVector(odd);
}
if (Nondeterministic && rewardModel.hasStateActionRewards()) {
// Matrix and action-based vector have to be produced at the same time to guarantee the correct order
auto matrixRewards = _transitionMatrix.toMatrixVector(rewardModel.getStateActionRewardVector(), dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
explicitTransitionMatrix = std::move(matrixRewards.first);
explicitActionRewards = std::move(matrixRewards.second);
} else {
// Translate matrix only
if (Nondeterministic) {
explicitTransitionMatrix = _transitionMatrix.toMatrix(dynamic_cast<storm::models::symbolic::NondeterministicModel<DdType, ValueType> const&>(_model).getNondeterminismVariables(), odd, odd);
} else {
explicitTransitionMatrix = _transitionMatrix.toMatrix(odd, odd);
}
if (rewardModel.hasStateActionRewards()) {
// For deterministic models we can translate the action rewards easily
explicitActionRewards = rewardModel.getStateActionRewardVector().toVector(odd);
}
}
STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are not supported in this engine.");
// Continuous time information
std::vector<ValueType> explicitExitRateVector;
storm::storage::BitVector explicitMarkovianStates;
if (isContinuousTime()) {
explicitExitRateVector = _exitRates->toVector(odd);
if (_markovianStates) {
explicitMarkovianStates = _markovianStates->toVector(odd);
}
}
auto sparseHelper = createSparseHelper(explicitTransitionMatrix, explicitMarkovianStates, explicitExitRateVector, odd);
auto explicitResult = sparseHelper->computeLongRunAverageValues(env, rewardModel.hasStateRewards() ? &explicitStateRewards : nullptr, rewardModel.hasStateActionRewards() ? &explicitActionRewards : nullptr);
return std::make_unique<HybridQuantitativeCheckResult<DdType, ValueType>>(_model.getReachableStates(), _model.getManager().getBddZero(), _model.getManager().template getAddZero<ValueType>(), _model.getReachableStates(), std::move(odd), std::move(explicitResult));
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
bool HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
return _exitRates != nullptr;
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
template <bool N, std::enable_if_t<N, int>>
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const {
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result;
if (isContinuousTime()) {
result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, markovianStates, exitRates);
} else {
result = std::make_unique<storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix);
}
storm::modelchecker::helper::setInformationFromOtherHelperNondeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
STORM_LOG_WARN_COND(!this->isProduceSchedulerSet(), "Scheduler extraction not supported in Hybrid engine.");
return result;
}
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
template <bool N, std::enable_if_t<!N, int>>
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> HybridInfiniteHorizonHelper<ValueType, DdType, Nondeterministic>::createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const {
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> result;
if (isContinuousTime()) {
result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix, exitRates);
} else {
result = std::make_unique<storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType>>(transitionMatrix);
}
storm::modelchecker::helper::setInformationFromOtherHelperDeterministic(*result, *this, [&odd](storm::dd::Bdd<DdType> const& s){ return s.toVector(odd); });
return result;
}
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, false>;
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::CUDD, true>;
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, false>;
template class HybridInfiniteHorizonHelper<double, storm::dd::DdType::Sylvan, true>;
template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, false>;
template class HybridInfiniteHorizonHelper<storm::RationalNumber, storm::dd::DdType::Sylvan, true>;
template class HybridInfiniteHorizonHelper<storm::RationalFunction, storm::dd::DdType::Sylvan, false>;
}
}
}

87
src/storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h

@ -0,0 +1,87 @@
#pragma once
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h"
#include "storm/models/symbolic/Model.h"
#include "storm/models/symbolic/StandardRewardModel.h"
#include "storm/storage/dd/DdManager.h"
#include "storm/storage/dd/Add.h"
#include "storm/storage/dd/Bdd.h"
namespace storm {
class Environment;
namespace storage {
template <typename ValueType> class SparseMatrix;
class BitVector;
}
namespace modelchecker {
namespace helper {
template <typename ValueType, bool Nondeterministic> class SparseInfiniteHorizonHelper;
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
*/
template <typename ValueType, storm::dd::DdType DdType, bool Nondeterministic>
class HybridInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType, DdType> {
public:
/*!
* Initializes the helper for a discrete time model (MDP or DTMC)
*/
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a Markov Automaton
*/
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& markovianStates, storm::dd::Add<DdType, ValueType> const& _exitRates);
/*!
* Initializes the helper for a CTMC
* @note The transition matrix must be probabilistic
*/
HybridInfiniteHorizonHelper(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Add<DdType, ValueType> const& _exitRates);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
* @return a value for each state
*/
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageProbabilities(Environment const& env, storm::dd::Bdd<DdType> const& psiStates);
/*!
* Computes the long run average rewards, i.e., the average reward collected per time unit
* @return a value for each state
*/
std::unique_ptr<HybridQuantitativeCheckResult<DdType, ValueType>> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel);
protected:
/*!
* @return true iff this is a computation on a continuous time model (i.e. MA)
*/
bool isContinuousTime() const;
/*!
* @return a sparse infinite horizon helper with the provided explicit model information.
* @param exitRates exit rates (ignored for discrete time models)
* @param markovianStates Markovian states or (ignored for non-MA)
*/
template <bool N = Nondeterministic, std::enable_if_t<N, int> = 0>
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const;
template <bool N = Nondeterministic, std::enable_if_t<!N, int> = 0>
std::unique_ptr<SparseInfiniteHorizonHelper<ValueType, Nondeterministic>> createSparseHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates, storm::dd::Odd const& odd) const;
private:
storm::models::symbolic::Model<DdType, ValueType> const& _model;
storm::dd::Add<DdType, ValueType> const& _transitionMatrix;
storm::dd::Bdd<DdType> const* _markovianStates;
storm::dd::Add<DdType, ValueType> const* _exitRates;
};
}
}
}

431
src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.cpp

@ -0,0 +1,431 @@
#include "SparseDeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/storage/Scheduler.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/SignalHandler.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/TopologicalSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
#include "storm/exceptions/NotSupportedException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType>
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix) {
// Intentionally left empty.
}
template <typename ValueType>
SparseDeterministicInfiniteHorizonHelper<ValueType>::SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, false>(transitionMatrix, exitRates) {
// For the CTMC case we assert that the caller actually provided the probabilistic transitions
STORM_LOG_ASSERT(this->_transitionMatrix.isProbabilistic(), "Non-probabilistic transitions");
}
template <typename ValueType>
void SparseDeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
if (this->_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::StronglyConnectedComponentDecomposition<ValueType>>(this->_transitionMatrix, storm::storage::StronglyConnectedComponentDecompositionOptions().onlyBottomSccs());
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
}
}
template <typename ValueType>
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) {
// For deterministic models, we compute the LRA for a BSCC
STORM_LOG_ASSERT(!this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
auto trivialResult = computeLraForTrivialBscc(env, stateValueGetter, actionValueGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
// Solve nontrivial BSCC with the method specified in the settings
storm::solver::LraMethod method = env.solver().lra().getDetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isDetLraMethodSetFromDefault() && method == storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::GainBiasEquations;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
} else if (env.solver().isForceSoundness() && env.solver().lra().isDetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
method = storm::solver::LraMethod::ValueIteration;
STORM_LOG_INFO("Selecting " << storm::solver::toString(method) << " as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
}
STORM_LOG_TRACE("Computing LRA for BSCC of size " << component.size() << " using '" << storm::solver::toString(method) << "'.");
if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForBsccVi(env, stateValueGetter, actionValueGetter, component);
} else if (method == storm::solver::LraMethod::LraDistributionEquations) {
// We only need the first element of the pair as the lra distribution is not relevant at this point.
return computeLraForBsccLraDistr(env, stateValueGetter, actionValueGetter, component).first;
}
STORM_LOG_WARN_COND(method == storm::solver::LraMethod::GainBiasEquations, "Unsupported lra method selected. Defaulting to " << storm::solver::toString(storm::solver::LraMethod::GainBiasEquations) << ".");
// We don't need the bias values
return computeLraForBsccGainBias(env, stateValueGetter, actionValueGetter, component).first;
}
template <typename ValueType>
std::pair<bool, ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& component) {
// For deterministic models, we can catch the case where all values are the same. This includes the special case where the BSCC consist only of just one state.
bool first = true;
ValueType val = storm::utility::zero<ValueType>();
for (auto const& element : component) {
auto state = internal::getComponentElementState(element);
STORM_LOG_ASSERT(state == *internal::getComponentElementChoicesBegin(element), "Unexpected choice index at state " << state << " of deterministic model.");
ValueType curr = stateValueGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionValueGetter(state) : actionValueGetter(state));
if (first) {
val = curr;
first = false;
} else if (val != curr) {
return {false, storm::utility::zero<ValueType>()};
}
}
// All values are the same
return {true, val};
}
template <>
storm::RationalFunction SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The requested Method for LRA computation is not supported for parametric models.");
}
template <typename ValueType>
ValueType SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccVi(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, storm::storage::StronglyConnectedComponent const& bscc) {
// Collect parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
// Now create a helper and perform the algorithm
if (this->isContinuousTime()) {
// We assume a CTMC (with deterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter, this->_exitRates);
} else {
// We assume a DTMC (with deterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::StronglyConnectedComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNoIs> viHelper(bscc, this->_transitionMatrix, aperiodicFactor);
return viHelper.performValueIteration(env, stateValueGetter, actionValueGetter);
}
}
template <typename ValueType>
std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) {
// We build the equation system as in Line 3 of Algorithm 3 from
// Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017)
// https://doi.org/10.1007/978-3-319-68167-2_25
// The first variable corresponds to the gain of the bscc whereas the subsequent variables yield the bias for each state s_1, s_2, ....
// No bias variable for s_0 is needed since it is always set to zero, yielding an nxn equation system matrix
// To make this work for CTMC, we could uniformize the model. This preserves LRA and ensures that we can compute the
// LRA as for a DTMC (the soujourn time in each state is the same). If we then multiply the equations with the uniformization rate,
// the uniformization rate cancels out. Hence, we obtain the equation system below.
// Get a mapping from global state indices to local ones.
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
uint64_t localIndex = 0;
for (auto const& globalIndex : bscc) {
toLocalIndexMap[globalIndex] = localIndex;
++localIndex;
}
// Prepare an environment for the underlying equation solver
auto subEnv = env;
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
// Topological solver does not make any sense since the BSCC is connected.
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
}
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
// Build the equation system matrix and vector.
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isEquationSystemFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
storm::storage::SparseMatrixBuilder<ValueType> builder(bscc.size(), bscc.size());
std::vector<ValueType> eqSysVector;
eqSysVector.reserve(bscc.size());
// The first row asserts that the weighted bias variables and the reward at s_0 sum up to the gain
uint64_t row = 0;
ValueType entryValue;
for (auto const& globalState : bscc) {
ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalState] : storm::utility::one<ValueType>();
// Coefficient for the gain variable
if (isEquationSystemFormat) {
// '1-0' in row 0 and -(-1) in other rows
builder.addNextValue(row, 0, storm::utility::one<ValueType>());
} else if (row > 0) {
// No coeficient in row 0, othwerise substract the gain
builder.addNextValue(row, 0, -storm::utility::one<ValueType>());
}
// Compute weighted sum over successor state. As this is a BSCC, each successor state will again be in the BSCC.
if (row > 0) {
if (isEquationSystemFormat) {
builder.addDiagonalEntry(row, rateAtState);
} else if (!storm::utility::isOne(rateAtState)) {
builder.addDiagonalEntry(row, storm::utility::one<ValueType>() - rateAtState);
}
}
for (auto const& entry : this->_transitionMatrix.getRow(globalState)) {
uint64_t col = toLocalIndexMap[entry.getColumn()];
if (col == 0) {
//Skip transition to state_0. This corresponds to setting the bias of state_0 to zero
continue;
}
entryValue = entry.getValue() * rateAtState;
if (isEquationSystemFormat) {
entryValue = -entryValue;
}
builder.addNextValue(row, col, entryValue);
}
eqSysVector.push_back(stateValuesGetter(globalState) + rateAtState * actionValuesGetter(globalState));
++row;
}
// Create a linear equation solver
auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
// Check solver requirements.
auto requirements = solver->getRequirements(subEnv);
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
// Todo: Find bounds on the bias variables. Just inserting the maximal value from the vector probably does not work.
std::vector<ValueType> eqSysSol(bscc.size(), storm::utility::zero<ValueType>());
// Take the mean of the rewards as an initial guess for the gain
//eqSysSol.front() = std::accumulate(eqSysVector.begin(), eqSysVector.end(), storm::utility::zero<ValueType>()) / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size());
solver->solveEquations(subEnv, eqSysSol, eqSysVector);
ValueType gain = eqSysSol.front();
// insert bias value for state 0
eqSysSol.front() = storm::utility::zero<ValueType>();
// Return the gain and the bias values
return std::pair<ValueType, std::vector<ValueType>>(std::move(gain), std::move(eqSysSol));
}
template <typename ValueType>
std::pair<ValueType, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc) {
// Let A be ab auxiliary Matrix with A[s,s] = R(s,s) - r(s) & A[s,s'] = R(s,s') for s,s' in BSCC and s!=s'.
// We build and solve the equation system for
// x*A=0 & x_0+...+x_n=1 <=> A^t*x=0=x-x & x_0+...+x_n=1 <=> (1+A^t)*x = x & 1-x_0-...-x_n-1=x_n
// Then, x[i] will be the fraction of the time we are in state i.
// This method assumes that this BSCC consist of more than one state
if (bscc.size() == 1) {
ValueType lraValue = stateValuesGetter(*bscc.begin()) + (this->isContinuousTime() ? (*this->_exitRates)[*bscc.begin()] * actionValuesGetter(*bscc.begin()) : actionValuesGetter(*bscc.begin()));
return { lraValue, {storm::utility::one<ValueType>()} };
}
// Prepare an environment for the underlying linear equation solver
auto subEnv = env;
if (subEnv.solver().getLinearEquationSolverType() == storm::solver::EquationSolverType::Topological) {
// Topological solver does not make any sense since the BSCC is connected.
subEnv.solver().setLinearEquationSolverType(subEnv.solver().topological().getUnderlyingEquationSolverType(), subEnv.solver().topological().isUnderlyingEquationSolverTypeSetFromDefault());
}
subEnv.solver().setLinearEquationSolverPrecision(env.solver().lra().getPrecision(), env.solver().lra().getRelativeTerminationCriterion());
// Get a mapping from global state indices to local ones as well as a bitvector containing states within the BSCC.
std::unordered_map<uint64_t, uint64_t> toLocalIndexMap;
storm::storage::BitVector bsccStates(this->_transitionMatrix.getRowCount(), false);
uint64_t localIndex = 0;
for (auto const& globalIndex : bscc) {
bsccStates.set(globalIndex, true);
toLocalIndexMap[globalIndex] = localIndex;
++localIndex;
}
// Build the auxiliary Matrix A.
auto auxMatrix = this->_transitionMatrix.getSubmatrix(false, bsccStates, bsccStates, true); // add diagonal entries!
uint64_t row = 0;
for (auto const& globalIndex : bscc) {
ValueType rateAtState = this->_exitRates ? (*this->_exitRates)[globalIndex] : storm::utility::one<ValueType>();
for (auto& entry : auxMatrix.getRow(row)) {
if (entry.getColumn() == row) {
// This value is non-zero since we have a BSCC with more than one state
entry.setValue(rateAtState * (entry.getValue() - storm::utility::one<ValueType>()));
} else if (this->isContinuousTime()) {
entry.setValue(entry.getValue() * rateAtState);
}
}
++row;
}
assert(row == auxMatrix.getRowCount());
// We need to consider A^t. This will not delete diagonal entries since they are non-zero.
auxMatrix = auxMatrix.transpose();
// Check whether we need the fixpoint characterization
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isFixpointFormat = linearEquationSolverFactory.getEquationProblemFormat(subEnv) == storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem;
if (isFixpointFormat) {
// Add a 1 on the diagonal
for (row = 0; row < auxMatrix.getRowCount(); ++row) {
for (auto& entry : auxMatrix.getRow(row)) {
if (entry.getColumn() == row) {
entry.setValue(storm::utility::one<ValueType>() + entry.getValue());
}
}
}
}
// We now build the equation system matrix.
// We can drop the last row of A and add ones in this row instead to assert that the variables sum up to one
// Phase 1: replace the existing entries of the last row with ones
uint64_t col = 0;
uint64_t lastRow = auxMatrix.getRowCount() - 1;
for (auto& entry : auxMatrix.getRow(lastRow)) {
entry.setColumn(col);
if (isFixpointFormat) {
if (col == lastRow) {
entry.setValue(storm::utility::zero<ValueType>());
} else {
entry.setValue(-storm::utility::one<ValueType>());
}
} else {
entry.setValue(storm::utility::one<ValueType>());
}
++col;
}
storm::storage::SparseMatrixBuilder<ValueType> builder(std::move(auxMatrix));
for (; col <= lastRow; ++col) {
if (isFixpointFormat) {
if (col != lastRow) {
builder.addNextValue(lastRow, col, -storm::utility::one<ValueType>());
}
} else {
builder.addNextValue(lastRow, col, storm::utility::one<ValueType>());
}
}
std::vector<ValueType> bsccEquationSystemRightSide(bscc.size(), storm::utility::zero<ValueType>());
bsccEquationSystemRightSide.back() = storm::utility::one<ValueType>();
// Create a linear equation solver
auto solver = linearEquationSolverFactory.create(subEnv, builder.build());
solver->setBounds(storm::utility::zero<ValueType>(), storm::utility::one<ValueType>());
// Check solver requirements.
auto requirements = solver->getRequirements(subEnv);
requirements.clearLowerBounds();
requirements.clearUpperBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::vector<ValueType> lraDistr(bscc.size(), storm::utility::one<ValueType>() / storm::utility::convertNumber<ValueType, uint64_t>(bscc.size()));
solver->solveEquations(subEnv, lraDistr, bsccEquationSystemRightSide);
// Calculate final LRA Value
ValueType result = storm::utility::zero<ValueType>();
auto solIt = lraDistr.begin();
for (auto const& globalState : bscc) {
if (this->isContinuousTime()) {
result += (*solIt) * (stateValuesGetter(globalState) + (*this->_exitRates)[globalState] * actionValuesGetter(globalState));
} else {
result += (*solIt) * (stateValuesGetter(globalState) + actionValuesGetter(globalState));
}
++solIt;
}
assert(solIt == lraDistr.end());
return std::pair<ValueType, std::vector<ValueType>>(std::move(result), std::move(lraDistr));
}
template <typename ValueType>
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem) {
// Create SSP Matrix.
// In contrast to the version for nondeterministic models, we eliminate the auxiliary states representing each BSCC on the fly
// Probability mass that would lead to a BSCC will be considered in the rhs of the equation system
auto sspMatrix = this->_transitionMatrix.getSubmatrix(false, statesNotInComponent, statesNotInComponent, asEquationSystem);
if (asEquationSystem) {
sspMatrix.convertToEquationSystem();
}
// Create the SSP right-hand-side
std::vector<ValueType> rhs;
rhs.reserve(sspMatrix.getRowCount());
for (auto const& state : statesNotInComponent) {
ValueType stateValue = storm::utility::zero<ValueType>();
for (auto const& transition : this->_transitionMatrix.getRow(state)) {
if (!statesNotInComponent.get(transition.getColumn())) {
// This transition leads to a BSCC!
stateValue += transition.getValue() * bsccLraValues[inputStateToBsccIndexMap[transition.getColumn()]];
}
}
rhs.push_back(std::move(stateValue));
}
return std::make_pair(std::move(sspMatrix), std::move(rhs));
}
template <typename ValueType>
std::vector<ValueType> SparseDeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former BSCC state to a new (imaginary) auxiliary state.
// Each auxiliary state gets assigned the value of that BSCC and we compute expected rewards (aka stochastic shortest path, SSP) on that new system.
// For efficiency reasons, we actually build the system where the auxiliary states are already eliminated.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
std::vector<uint64_t> stateIndexMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
uint64_t state = internal::getComponentElementState(element);
statesInComponents.set(state);
stateIndexMap[state] = currentComponentIndex;
}
}
// Map the non-component states to their index in the SSP. Note that the order of these states will be preserved.
uint64_t numberOfNonComponentStates = 0;
storm::storage::BitVector statesNotInComponent = ~statesInComponents;
for (auto const& nonComponentState : statesNotInComponent) {
stateIndexMap[nonComponentState] = numberOfNonComponentStates;
++numberOfNonComponentStates;
}
// The next step is to create the equation system solving the SSP (unless the whole system consists of BSCCs)
std::vector<ValueType> sspValues;
if (numberOfNonComponentStates > 0) {
storm::solver::GeneralLinearEquationSolverFactory<ValueType> linearEquationSolverFactory;
bool isEqSysFormat = linearEquationSolverFactory.getEquationProblemFormat(env) == storm::solver::LinearEquationSolverProblemFormat::EquationSystem;
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, stateIndexMap, statesNotInComponent, isEqSysFormat);
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> solver = linearEquationSolverFactory.create(env, sspMatrixVector.first);
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
solver->setBounds(*lowerUpperBounds.first, *lowerUpperBounds.second);
// Check solver requirements
auto requirements = solver->getRequirements(env);
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
sspValues.assign(sspMatrixVector.first.getRowCount(), (*lowerUpperBounds.first + *lowerUpperBounds.second) / storm::utility::convertNumber<ValueType,uint64_t>(2));
solver->solveEquations(env, sspValues, sspMatrixVector.second);
}
// Prepare result vector.
std::vector<ValueType> result(this->_transitionMatrix.getRowGroupCount());
for (uint64_t state = 0; state < stateIndexMap.size(); ++state) {
if (statesNotInComponent.get(state)) {
result[state] = sspValues[stateIndexMap[state]];
} else {
result[state] = componentLraValues[stateIndexMap[state]];
}
}
return result;
}
template class SparseDeterministicInfiniteHorizonHelper<double>;
template class SparseDeterministicInfiniteHorizonHelper<storm::RationalNumber>;
template class SparseDeterministicInfiniteHorizonHelper<storm::RationalFunction>;
}
}
}

75
src/storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h

@ -0,0 +1,75 @@
#pragma once
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
*/
template <typename ValueType>
class SparseDeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, false> {
public:
/*!
* Function mapping from indices to values
*/
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
/*!
* Initializes the helper for a discrete time model (i.e. DTMC)
*/
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a continuous time model (i.e. CTMC)
* @note The transition matrix shall be probabilistic (i.e. the rows sum up to one)
*/
SparseDeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& component) override;
protected:
virtual void createDecomposition() override;
std::pair<bool, ValueType> computeLraForTrivialBscc(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
/*!
* As computeLraForComponent but uses value iteration as a solution method (independent of what is set in env)
*/
ValueType computeLraForBsccVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
/*!
* As computeLraForComponent but solves a linear equation system encoding gain and bias (independent of what is set in env)
* @see Kretinsky, Meggendorfer: Efficient Strategy Iteration for Mean Payoff in Markov Decision Processes (ATVA 2017), https://doi.org/10.1007/978-3-319-68167-2_25
*/
std::pair<ValueType, std::vector<ValueType>> computeLraForBsccGainBias(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
/*!
* As computeLraForComponent but solves a linear equation system consisting encoding the long run average (steady state) distribution (independent of what is set in env)
*/
std::pair<ValueType, std::vector<ValueType>> computeLraForBsccLraDistr(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::StronglyConnectedComponent const& bscc);
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& bsccLraValues, std::vector<uint64_t> const& inputStateToBsccIndexMap, storm::storage::BitVector const& statesNotInComponent, bool asEquationSystem);
/*!
* @return Lra values for each state
*/
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
};
}
}
}

161
src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.cpp

@ -0,0 +1,161 @@
#include "SparseInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/storage/StronglyConnectedComponentDecomposition.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/SignalHandler.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(nullptr), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(&markovianStates), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates) : _transitionMatrix(transitionMatrix), _markovianStates(nullptr), _exitRates(&exitRates), _backwardTransitions(nullptr), _longRunComponentDecomposition(nullptr) {
// Intentionally left empty.
}
template <typename ValueType, bool Nondeterministic>
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardTransitions) {
STORM_LOG_WARN_COND(_backwardTransitions == nullptr, "Backwards transitions were provided but they were already computed or provided before.");
_backwardTransitions = &backwardTransitions;
}
template <typename ValueType, bool Nondeterministic>
void SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition) {
STORM_LOG_WARN_COND(_longRunComponentDecomposition == nullptr, "Long Run Component Decomposition was provided but it was already computed or provided before.");
_longRunComponentDecomposition = &decomposition;
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates) {
return computeLongRunAverageValues(env,
[&psiStates] (uint64_t stateIndex) { return psiStates.get(stateIndex) ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>(); },
[] (uint64_t) { return storm::utility::zero<ValueType>(); }
);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel) {
ValueGetter stateRewardsGetter;
if (rewardModel.hasStateRewards()) {
stateRewardsGetter = [&rewardModel] (uint64_t stateIndex) { return rewardModel.getStateReward(stateIndex); };
} else {
stateRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
ValueGetter actionRewardsGetter;
if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) {
if (rewardModel.hasTransitionRewards()) {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionAndTransitionReward(globalChoiceIndex, this->_transitionMatrix); };
} else {
actionRewardsGetter = [&] (uint64_t globalChoiceIndex) { return rewardModel.getStateActionReward(globalChoiceIndex); };
}
} else {
actionRewardsGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateRewardsGetter, actionRewardsGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues, std::vector<ValueType> const* actionValues) {
ValueGetter stateValuesGetter;
if (stateValues) {
stateValuesGetter = [&stateValues] (uint64_t stateIndex) { return (*stateValues)[stateIndex]; };
} else {
stateValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
ValueGetter actionValuesGetter;
if (actionValues) {
actionValuesGetter = [&actionValues] (uint64_t globalChoiceIndex) { return (*actionValues)[globalChoiceIndex]; };
} else {
actionValuesGetter = [] (uint64_t) { return storm::utility::zero<ValueType>(); };
}
return computeLongRunAverageValues(env, stateValuesGetter, actionValuesGetter);
}
template <typename ValueType, bool Nondeterministic>
std::vector<ValueType> SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::computeLongRunAverageValues(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter) {
// We will compute the long run average value for each MEC individually and then set-up an Equation system to compute the value also at non-mec states.
// For a description of this approach see, e.g., Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
// Prepare an environment for the underlying solvers.
auto underlyingSolverEnvironment = env;
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should not exceed the user defined precsion.
storm::RationalNumber newPrecision = env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2);
underlyingSolverEnvironment.solver().minMax().setPrecision(newPrecision);
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().setLinearEquationSolverPrecision(newPrecision, env.solver().lra().getRelativeTerminationCriterion());
underlyingSolverEnvironment.solver().lra().setPrecision(newPrecision);
}
// If requested, allocate memory for the choices made
if (Nondeterministic && this->isProduceSchedulerSet()) {
if (!_producedOptimalChoices.is_initialized()) {
_producedOptimalChoices.emplace();
}
_producedOptimalChoices->resize(_transitionMatrix.getRowGroupCount());
}
STORM_LOG_ASSERT(Nondeterministic || !this->isProduceSchedulerSet(), "Scheduler production enabled for deterministic model.");
// Decompose the model to their bottom components (MECS or BSCCS)
createDecomposition();
// Compute the long-run average for all components in isolation.
std::vector<ValueType> componentLraValues;
componentLraValues.reserve(_longRunComponentDecomposition->size());
for (auto const& c : *_longRunComponentDecomposition) {
componentLraValues.push_back(computeLraForComponent(underlyingSolverEnvironment, stateRewardsGetter, actionRewardsGetter, c));
}
// Solve the resulting SSP where end components are collapsed into single auxiliary states
return buildAndSolveSsp(underlyingSolverEnvironment, componentLraValues);
}
template <typename ValueType, bool Nondeterministic>
bool SparseInfiniteHorizonHelper<ValueType, Nondeterministic>::isContinuousTime() const {
STORM_LOG_ASSERT((_markovianStates == nullptr) || (_exitRates != nullptr), "Inconsistent information given: Have Markovian states but no exit rates." );
return _exitRates != nullptr;
}
template class SparseInfiniteHorizonHelper<double, true>;
template class SparseInfiniteHorizonHelper<storm::RationalNumber, true>;
template class SparseInfiniteHorizonHelper<double, false>;
template class SparseInfiniteHorizonHelper<storm::RationalNumber, false>;
template class SparseInfiniteHorizonHelper<storm::RationalFunction, false>;
}
}
}

140
src/storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h

@ -0,0 +1,140 @@
#pragma once
#include "storm/modelchecker/helper/SingleValueModelCheckerHelper.h"
#include "storm/storage/MaximalEndComponent.h"
#include "storm/storage/StronglyConnectedComponent.h"
#include "storm/storage/Decomposition.h"
#include "storm/storage/SparseMatrix.h"
namespace storm {
class Environment;
namespace models {
namespace sparse {
template <typename VT> class StandardRewardModel;
}
}
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
* @tparam Nondeterministic true if there is nondeterminism in the Model (MDP or MA)
*/
template <typename ValueType, bool Nondeterministic>
class SparseInfiniteHorizonHelper : public SingleValueModelCheckerHelper<ValueType> {
public:
/*!
* The type of a component in which the system resides in the long run (BSCC for deterministic models, MEC for nondeterministic models)
*/
using LongRunComponentType = typename std::conditional<Nondeterministic, storm::storage::MaximalEndComponent, storm::storage::StronglyConnectedComponent>::type;
/*!
* Function mapping from indices to values
*/
typedef std::function<ValueType(uint64_t)> ValueGetter;
/*!
* Initializes the helper for a discrete time (i.e. DTMC, MDP)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for continuous time (i.e. MA)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
/*!
* Initializes the helper for continuous time (i.e. CTMC)
*/
SparseInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& exitRates);
/*!
* Provides backward transitions that can be used during the computation.
* Providing them is optional. If they are not provided, they will be computed internally
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the backwardstransitions remains valid.
*/
void provideBackwardTransitions(storm::storage::SparseMatrix<ValueType> const& backwardsTransitions);
/*!
* Provides the decomposition into long run components (BSCCs/MECs) that can be used during the computation.
* Providing the decomposition is optional. If it is not provided, they will be computed internally.
* Be aware that this class does not take ownership, i.e. the caller has to make sure that the reference to the decomposition remains valid.
*/
void provideLongRunComponentDecomposition(storm::storage::Decomposition<LongRunComponentType> const& decomposition);
/*!
* Computes the long run average probabilities, i.e., the fraction of the time we are in a psiState
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::storage::BitVector const& psiStates);
/*!
* Computes the long run average rewards, i.e., the average reward collected per time unit
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::models::sparse::StandardRewardModel<ValueType> const& rewardModel);
/*!
* Computes the long run average value given the provided state and action-based rewards.
* @param stateValues a vector containing a value for every state
* @param actionValues a vector containing a value for every choice
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, std::vector<ValueType> const* stateValues = nullptr, std::vector<ValueType> const* actionValues = nullptr);
/*!
* Computes the long run average value given the provided state and action based rewards
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return a value for each state
*/
std::vector<ValueType> computeLongRunAverageValues(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter);
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, LongRunComponentType const& component) = 0;
protected:
/*!
* @return true iff this is a computation on a continuous time model (i.e. CTMC, MA)
*/
bool isContinuousTime() const;
/*!
* @post _longRunComponentDecomposition points to a decomposition of the long run components (MECs, BSCCs)
*/
virtual void createDecomposition() = 0;
/*!
* @pre if scheduler production is enabled and Nondeterministic is true, a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* @return Lra values for each state
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) = 0;
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::BitVector const* _markovianStates;
std::vector<ValueType> const* _exitRates;
storm::storage::SparseMatrix<ValueType> const* _backwardTransitions;
storm::storage::Decomposition<LongRunComponentType> const* _longRunComponentDecomposition;
std::unique_ptr<storm::storage::SparseMatrix<ValueType>> _computedBackwardTransitions;
std::unique_ptr<storm::storage::Decomposition<LongRunComponentType>> _computedLongRunComponentDecomposition;
boost::optional<std::vector<uint64_t>> _producedOptimalChoices;
};
}
}
}

478
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp

@ -0,0 +1,478 @@
#include "SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h"
#include "storm/storage/SparseMatrix.h"
#include "storm/storage/MaximalEndComponentDecomposition.h"
#include "storm/storage/Scheduler.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
#include "storm/solver/LpSolver.h"
#include "storm/utility/solver.h"
#include "storm/utility/vector.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
namespace storm {
namespace modelchecker {
namespace helper {
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix) {
// Intentionally left empty.
}
template <typename ValueType>
SparseNondeterministicInfiniteHorizonHelper<ValueType>::SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates) : SparseInfiniteHorizonHelper<ValueType, true>(transitionMatrix, markovianStates, exitRates) {
// Intentionally left empty.
}
template <typename ValueType>
std::vector<uint64_t> const& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() const {
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return this->_producedOptimalChoices.get();
}
template <typename ValueType>
std::vector<uint64_t>& SparseNondeterministicInfiniteHorizonHelper<ValueType>::getProducedOptimalChoices() {
STORM_LOG_ASSERT(this->isProduceSchedulerSet(), "Trying to get the produced optimal choices although no scheduler was requested.");
STORM_LOG_ASSERT(this->_producedOptimalChoices.is_initialized(), "Trying to get the produced optimal choices but none were available. Was there a computation call before?");
return this->_producedOptimalChoices.get();
}
template <typename ValueType>
storm::storage::Scheduler<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::extractScheduler() const {
auto const& optimalChoices = getProducedOptimalChoices();
storm::storage::Scheduler<ValueType> scheduler(optimalChoices.size());
for (uint64_t state = 0; state < optimalChoices.size(); ++state) {
scheduler.setChoice(optimalChoices[state], state);
}
return scheduler;
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::createDecomposition() {
if (this->_longRunComponentDecomposition == nullptr) {
// The decomposition has not been provided or computed, yet.
if (this->_backwardTransitions == nullptr) {
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
this->_backwardTransitions = this->_computedBackwardTransitions.get();
}
this->_computedLongRunComponentDecomposition = std::make_unique<storm::storage::MaximalEndComponentDecomposition<ValueType>>(this->_transitionMatrix, *this->_backwardTransitions);
this->_longRunComponentDecomposition = this->_computedLongRunComponentDecomposition.get();
}
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForComponent(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
// For models with potential nondeterminisim, we compute the LRA for a maximal end component (MEC)
// Allocate memory for the nondeterministic choices.
if (this->isProduceSchedulerSet()) {
if (!this->_producedOptimalChoices.is_initialized()) {
this->_producedOptimalChoices.emplace();
}
this->_producedOptimalChoices->resize(this->_transitionMatrix.getRowGroupCount());
}
auto trivialResult = this->computeLraForTrivialMec(env, stateRewardsGetter, actionRewardsGetter, component);
if (trivialResult.first) {
return trivialResult.second;
}
// Solve nontrivial MEC with the method specified in the settings
storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::LinearProgramming;
} else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::ValueIteration;
}
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet() || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMecLp(env, stateRewardsGetter, actionRewardsGetter, component);
} else if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForMecVi(env, stateRewardsGetter, actionRewardsGetter, component);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
}
}
template <typename ValueType>
std::pair<bool, ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForTrivialMec(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& component) {
// If the component only consists of a single state, we compute the LRA value directly
if (component.size() == 1) {
auto const& element = *component.begin();
uint64_t state = internal::getComponentElementState(element);
auto choiceIt = internal::getComponentElementChoicesBegin(element);
if (!this->isContinuousTime()) {
// This is an MDP.
// Find the choice with the highest/lowest reward
ValueType bestValue = actionRewardsGetter(*choiceIt);
uint64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != internal::getComponentElementChoicesEnd(element); ++choiceIt) {
ValueType currentValue = actionRewardsGetter(*choiceIt);
if ((this->minimize() && currentValue < bestValue) || (this->maximize() && currentValue > bestValue)) {
bestValue = std::move(currentValue);
bestChoice = *choiceIt;
}
}
if (this->isProduceSchedulerSet()) {
this->_producedOptimalChoices.get()[state] = bestChoice - this->_transitionMatrix.getRowGroupIndices()[state];
}
bestValue += stateRewardsGetter(state);
return {true, bestValue};
} else {
// In a Markov Automaton, singleton components have to consist of a Markovian state because of the non-Zenoness assumption. Then, there is just one possible choice.
STORM_LOG_ASSERT(this->_markovianStates != nullptr, "Nondeterministic continuous time model without Markovian states... Is this a not a Markov Automaton?");
STORM_LOG_THROW(this->_markovianStates->get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported.");
STORM_LOG_ASSERT(internal::getComponentElementChoiceCount(element) == 1, "Markovian state has Nondeterministic behavior.");
if (this->isProduceSchedulerSet()) {
this->_producedOptimalChoices.get()[state] = 0;
}
ValueType result = stateRewardsGetter(state) + (this->isContinuousTime() ? (*this->_exitRates)[state] * actionRewardsGetter(*choiceIt) : actionRewardsGetter(*choiceIt));
return {true, result};
}
}
return {false, storm::utility::zero<ValueType>()};
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecVi(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Collect some parameters of the computation
ValueType aperiodicFactor = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
std::vector<uint64_t>* optimalChoices = nullptr;
if (this->isProduceSchedulerSet()) {
optimalChoices = &this->_producedOptimalChoices.get();
}
// Now create a helper and perform the algorithm
if (this->isContinuousTime()) {
// We assume a Markov Automaton (with deterministic timed states and nondeterministic instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::DetTsNondetIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor, this->_markovianStates, this->_exitRates);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, this->_exitRates, &this->getOptimizationDirection(), optimalChoices);
} else {
// We assume an MDP (with nondeterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
}
}
template <typename ValueType>
ValueType SparseNondeterministicInfiniteHorizonHelper<ValueType>::computeLraForMecLp(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) {
// Create an LP solver
auto solver = storm::utility::solver::LpSolverFactory<ValueType>().create("LRA for MEC");
// Now build the LP formulation as described in:
// Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
solver->setOptimizationDirection(invert(this->getOptimizationDirection()));
// Create variables
// TODO: Investigate whether we can easily make the variables bounded
std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
for (auto const& stateChoicesPair : mec) {
std::string variableName = "x" + std::to_string(stateChoicesPair.first);
stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
}
storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one<ValueType>());
solver->update();
// Add constraints.
for (auto const& stateChoicesPair : mec) {
uint_fast64_t state = stateChoicesPair.first;
bool stateIsMarkovian = this->_markovianStates && this->_markovianStates->get(state);
// Now create a suitable constraint for each choice
// x_s {≤, ≥} -k/rate(s) + sum_s' P(s,act,s') * x_s' + (value(s)/rate(s) + value(s,act))
for (auto choice : stateChoicesPair.second) {
std::vector<storm::expressions::Expression> summands;
auto matrixRow = this->_transitionMatrix.getRow(choice);
summands.reserve(matrixRow.getNumberOfEntries() + 2);
// add -k/rate(s) (only if s is either a Markovian state or we have an MDP)
if (stateIsMarkovian) {
summands.push_back(-(k / solver->getManager().rational((*this->_exitRates)[state])));
} else if (!this->isContinuousTime()) {
summands.push_back(-k);
}
// add sum_s' P(s,act,s') * x_s'
for (auto const& element : matrixRow) {
summands.push_back(stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue()));
}
// add value for state and choice
ValueType value;
if (stateIsMarkovian) {
// divide state reward with exit rate
value = stateRewardsGetter(state) / (*this->_exitRates)[state] + actionRewardsGetter(choice);
} else if (!this->isContinuousTime()) {
// in discrete time models no scaling is needed
value = stateRewardsGetter(state) + actionRewardsGetter(choice);
} else {
// state is a probabilistic state of a Markov automaton. The state reward is not collected
value = actionRewardsGetter(choice);
}
summands.push_back(solver->getConstant(value));
storm::expressions::Expression constraint;
if (this->minimize()) {
constraint = stateToVariableMap.at(state) <= storm::expressions::sum(summands);
} else {
constraint = stateToVariableMap.at(state) >= storm::expressions::sum(summands);
}
solver->addConstraint("s" + std::to_string(state) + "," + std::to_string(choice), constraint);
}
}
solver->optimize();
STORM_LOG_THROW(!this->isProduceSchedulerSet(), storm::exceptions::NotImplementedException, "Scheduler extraction is not yet implemented for LP based LRA method.");
return solver->getContinuousValue(k);
}
/*!
* Auxiliary function that adds the entries of the Ssp Matrix for a single choice (i.e., row)
* Transitions that lead to a Component state will be redirected to a new auxiliary state (there is one aux. state for each component).
* Transitions that don't lead to a Component state are copied (taking a state index mapping into account).
*/
template <typename ValueType>
void addSspMatrixChoice(uint64_t const& inputMatrixChoice, storm::storage::SparseMatrix<ValueType> const& inputTransitionMatrix, std::vector<uint64_t> const& inputToSspStateMap, uint64_t const& numberOfNonComponentStates, uint64_t const& currentSspChoice, storm::storage::SparseMatrixBuilder<ValueType>& sspMatrixBuilder) {
// As there could be multiple transitions to the same MEC, we accumulate them in this map before adding them to the matrix builder.
std::map<uint64_t, ValueType> auxiliaryStateToProbabilityMap;
for (auto const& transition : inputTransitionMatrix.getRow(inputMatrixChoice)) {
if (!storm::utility::isZero(transition.getValue())) {
auto const& sspTransitionTarget = inputToSspStateMap[transition.getColumn()];
// Since the auxiliary Component states are appended at the end of the matrix, we can use this check to
// decide whether the transition leads to a component state or not
if (sspTransitionTarget < numberOfNonComponentStates) {
// If the target state is not contained in a component, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentSspChoice, sspTransitionTarget, transition.getValue());
} else {
// If the target state is contained in component i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the component into the matrix later.
auto insertionRes = auxiliaryStateToProbabilityMap.emplace(sspTransitionTarget, transition.getValue());
if (!insertionRes.second) {
// sspTransitionTarget already existed in the map, i.e., there already was a transition to that component.
// Hence, we add up the probabilities.
insertionRes.first->second += transition.getValue();
}
}
}
}
// Now insert all (cumulative) probability values that target a component.
for (auto const& componentToProbEntry : auxiliaryStateToProbabilityMap) {
sspMatrixBuilder.addNextValue(currentSspChoice, componentToProbEntry.first, componentToProbEntry.second);
}
}
template <typename ValueType>
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap) {
auto const& choiceIndices = this->_transitionMatrix.getRowGroupIndices();
std::vector<ValueType> rhs;
uint64_t numberOfSspStates = numberOfNonComponentStates + this->_longRunComponentDecomposition->size();
storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates , 0, true, true, numberOfSspStates);
// If the source state of a transition is not contained in any component, we copy its choices (and perform the necessary modifications).
uint64_t currentSspChoice = 0;
for (auto const& nonComponentState : statesNotInComponent) {
sspMatrixBuilder.newRowGroup(currentSspChoice);
for (uint64_t choice = choiceIndices[nonComponentState]; choice < choiceIndices[nonComponentState + 1]; ++choice, ++currentSspChoice) {
rhs.push_back(storm::utility::zero<ValueType>());
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
}
}
// Now we construct the choices for the auxiliary states which reflect former Component states.
for (uint64_t componentIndex = 0; componentIndex < this->_longRunComponentDecomposition->size(); ++componentIndex) {
auto const& component = (*this->_longRunComponentDecomposition)[componentIndex];
sspMatrixBuilder.newRowGroup(currentSspChoice);
// For nondeterministic models it might still be that we leave the component again. This needs to be reflected in the SSP
// by adding the "exiting" choices of the MEC to the axiliary states
for (auto const& element : component) {
uint64_t componentState = internal::getComponentElementState(element);
for (uint64_t choice = choiceIndices[componentState]; choice < choiceIndices[componentState + 1]; ++choice) {
// If the choice is not contained in the component itself, we have to add a similar distribution to the auxiliary state.
if (!internal::componentElementChoicesContains(element, choice)) {
rhs.push_back(storm::utility::zero<ValueType>());
addSspMatrixChoice(choice, this->_transitionMatrix, inputToSspStateMap, numberOfNonComponentStates, currentSspChoice, sspMatrixBuilder);
if (sspComponentExitChoicesToOriginalMap) {
// Later we need to be able to map this choice back to the original input model
sspComponentExitChoicesToOriginalMap->emplace_back(componentState, choice - choiceIndices[componentState]);
}
++currentSspChoice;
}
}
}
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the component.
rhs.push_back(mecLraValues[componentIndex]);
if (sspComponentExitChoicesToOriginalMap) {
// Insert some invalid values so we can later detect that this choice is not an exit choice
sspComponentExitChoicesToOriginalMap->emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
}
++currentSspChoice;
}
return std::make_pair(sspMatrixBuilder.build(currentSspChoice, numberOfSspStates, numberOfSspStates), std::move(rhs));
}
template <typename ValueType>
void SparseNondeterministicInfiniteHorizonHelper<ValueType>::constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap) {
// We first take care of non-mec states
storm::utility::vector::setVectorValues(this->_producedOptimalChoices.get(), statesNotInComponent, sspChoices);
// Secondly, we consider MEC states. There are 3 cases for each MEC state:
// 1. The SSP choices encode that we want to stay in the MEC
// 2. The SSP choices encode that we want to leave the MEC and
// a) we take an exit (non-MEC) choice at the given state
// b) we have to take a MEC choice at the given state in a way that eventually an exit state of the MEC is reached
uint64_t exitChoiceOffset = sspMatrix.getRowGroupIndices()[numberOfNonComponentStates];
for (auto const& mec : *this->_longRunComponentDecomposition) {
// Get the sspState of this MEC (using one representative mec state)
auto const& sspState = inputToSspStateMap[mec.begin()->first];
uint64_t sspChoiceIndex = sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState];
// Obtain the state and choice of the original model to which the selected choice corresponds.
auto const& originalStateChoice = sspComponentExitChoicesToOriginalMap[sspChoiceIndex - exitChoiceOffset];
// Check if we are in Case 1 or 2
if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
// The optimal choice is to stay in this mec (Case 1)
// In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMec.
STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
} else {
// The best choice is to leave this MEC via the selected state and choice. (Case 2)
// Set the exit choice (Case 2.a)
this->_producedOptimalChoices.get()[originalStateChoice.first] = originalStateChoice.second;
// The remaining states in this MEC need to reach the state with the exit choice with probability 1. (Case 2.b)
// Perform a backwards search from the exit state, only using MEC choices
// We start by setting an invalid choice to all remaining mec states (so that we can easily detect them as unprocessed)
for (auto const& stateActions : mec) {
if (stateActions.first != originalStateChoice.first) {
this->_producedOptimalChoices.get()[stateActions.first] = std::numeric_limits<uint64_t>::max();
}
}
// Ensure that backwards transitions are available
if (this->_backwardTransitions == nullptr) {
this->_computedBackwardTransitions = std::make_unique<storm::storage::SparseMatrix<ValueType>>(this->_transitionMatrix.transpose(true));
this->_backwardTransitions = this->_computedBackwardTransitions.get();
}
// Now start a backwards DFS
std::vector<uint64_t> stack = {originalStateChoice.first};
while (!stack.empty()) {
uint64_t currentState = stack.back();
stack.pop_back();
for (auto const& backwardsTransition : this->_backwardTransitions->getRowGroup(currentState)) {
uint64_t predecessorState = backwardsTransition.getColumn();
if (mec.containsState(predecessorState)) {
auto& selectedPredChoice = this->_producedOptimalChoices.get()[predecessorState];
if (selectedPredChoice == std::numeric_limits<uint64_t>::max()) {
// We don't already have a choice for this predecessor.
// We now need to check whether there is a *MEC* choice leading to currentState
for (auto const& predChoice : mec.getChoicesForState(predecessorState)) {
for (auto const& forwardTransition : this->_transitionMatrix.getRow(predChoice)) {
if (forwardTransition.getColumn() == currentState && !storm::utility::isZero(forwardTransition.getValue())) {
// Playing this choice (infinitely often) will lead to current state (infinitely often)!
selectedPredChoice = predChoice - this->_transitionMatrix.getRowGroupIndices()[predecessorState];
stack.push_back(predecessorState);
break;
}
}
if (selectedPredChoice != std::numeric_limits<uint64_t>::max()) {
break;
}
}
}
}
}
}
}
}
}
template <typename ValueType>
std::vector<ValueType> SparseNondeterministicInfiniteHorizonHelper<ValueType>::buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& componentLraValues) {
STORM_LOG_ASSERT(this->_longRunComponentDecomposition != nullptr, "Decomposition not computed, yet.");
// For fast transition rewriting, we build a mapping from the input state indices to the state indices of a new transition matrix
// which redirects all transitions leading to a former component state to a new auxiliary state.
// There will be one auxiliary state for each component. These states will be appended to the end of the matrix.
// First gather the states that are part of a component
// and create a mapping from states that lie in a component to the corresponding component index.
storm::storage::BitVector statesInComponents(this->_transitionMatrix.getRowGroupCount());
std::vector<uint64_t> inputToSspStateMap(this->_transitionMatrix.getRowGroupCount(), std::numeric_limits<uint64_t>::max());
for (uint64_t currentComponentIndex = 0; currentComponentIndex < this->_longRunComponentDecomposition->size(); ++currentComponentIndex) {
for (auto const& element : (*this->_longRunComponentDecomposition)[currentComponentIndex]) {
uint64_t state = internal::getComponentElementState(element);
statesInComponents.set(state);
inputToSspStateMap[state] = currentComponentIndex;
}
}
// Now take care of the non-component states. Note that the order of these states will be preserved.
uint64_t numberOfNonComponentStates = 0;
storm::storage::BitVector statesNotInComponent = ~statesInComponents;
for (auto const& nonComponentState : statesNotInComponent) {
inputToSspStateMap[nonComponentState] = numberOfNonComponentStates;
++numberOfNonComponentStates;
}
// Finalize the mapping for the component states which now still assigns component states to to their component index.
// To make sure that they point to the auxiliary states (located at the end of the SspMatrix), we need to shift them by the
// number of states that are not in a component.
for (auto const& mecState : statesInComponents) {
inputToSspStateMap[mecState] += numberOfNonComponentStates;
}
// For scheduler extraction, we will need to create a mapping between choices at the auxiliary states and the
// corresponding choices in the original model.
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspComponentExitChoicesToOriginalMap;
// The next step is to create the SSP matrix and the right-hand side of the SSP.
auto sspMatrixVector = buildSspMatrixVector(componentLraValues, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, this->isProduceSchedulerSet() ? &sspComponentExitChoicesToOriginalMap : nullptr);
// Set-up a solver
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(env, true, true, this->getOptimizationDirection(), false, this->isProduceSchedulerSet());
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = minMaxLinearEquationSolverFactory.create(env, sspMatrixVector.first);
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(this->isProduceSchedulerSet());
auto lowerUpperBounds = std::minmax_element(componentLraValues.begin(), componentLraValues.end());
solver->setLowerBound(*lowerUpperBounds.first);
solver->setUpperBound(*lowerUpperBounds.second);
solver->setRequirementsChecked();
// Solve the equation system
std::vector<ValueType> x(sspMatrixVector.first.getRowGroupCount());
solver->solveEquations(env, this->getOptimizationDirection(), x, sspMatrixVector.second);
// Prepare scheduler (if requested)
if (this->isProduceSchedulerSet() && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
constructOptimalChoices(solver->getSchedulerChoices(), sspMatrixVector.first, inputToSspStateMap, statesNotInComponent, numberOfNonComponentStates, sspComponentExitChoicesToOriginalMap);
} else {
STORM_LOG_ERROR_COND(!this->isProduceSchedulerSet(), "Requested to produce a scheduler, but no scheduler was generated.");
}
// Prepare result vector.
// For efficiency reasons, we re-use the memory of our rhs for this!
std::vector<ValueType> result = std::move(sspMatrixVector.second);
result.resize(this->_transitionMatrix.getRowGroupCount());
result.shrink_to_fit();
storm::utility::vector::selectVectorValues(result, inputToSspStateMap, x);
return result;
}
template class SparseNondeterministicInfiniteHorizonHelper<double>;
template class SparseNondeterministicInfiniteHorizonHelper<storm::RationalNumber>;
}
}
}

101
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h

@ -0,0 +1,101 @@
#pragma once
#include "storm/modelchecker/helper/infinitehorizon/SparseInfiniteHorizonHelper.h"
namespace storm {
namespace storage {
template <typename VT> class Scheduler;
}
namespace modelchecker {
namespace helper {
/*!
* Helper class for model checking queries that depend on the long run behavior of the (nondeterministic) system.
* @tparam ValueType the type a value can have
*/
template <typename ValueType>
class SparseNondeterministicInfiniteHorizonHelper : public SparseInfiniteHorizonHelper<ValueType, true> {
public:
/*!
* Function mapping from indices to values
*/
typedef typename SparseInfiniteHorizonHelper<ValueType, true>::ValueGetter ValueGetter;
/*!
* Initializes the helper for a discrete time model (i.e. MDP)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix);
/*!
* Initializes the helper for a continuous time model (i.e. MA)
*/
SparseNondeterministicInfiniteHorizonHelper(storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& markovianStates, std::vector<ValueType> const& exitRates);
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
* @return the produced scheduler of the most recent call.
*/
std::vector<uint64_t> const& getProducedOptimalChoices() const;
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
* @return the produced scheduler of the most recent call.
*/
std::vector<uint64_t>& getProducedOptimalChoices();
/*!
* @pre before calling this, a computation call should have been performed during which scheduler production was enabled.
* @return a new scheduler containing optimal choices for each state that yield the long run average values of the most recent call.
*/
storm::storage::Scheduler<ValueType> extractScheduler() const;
/*!
* @param stateValuesGetter a function returning a value for a given state index
* @param actionValuesGetter a function returning a value for a given (global) choice index
* @return the (unique) optimal LRA value for the given component.
* @post if scheduler production is enabled and Nondeterministic is true, getProducedOptimalChoices() contains choices for the states of the given component which yield the returned LRA value. Choices for states outside of the component are not affected.
*/
virtual ValueType computeLraForComponent(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& component) override;
protected:
virtual void createDecomposition() override;
std::pair<bool, ValueType> computeLraForTrivialMec(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
/*!
* As computeLraForMec but uses value iteration as a solution method (independent of what is set in env)
*/
ValueType computeLraForMecVi(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
/*!
* As computeLraForMec but uses linear programming as a solution method (independent of what is set in env)
* @see Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13
*/
ValueType computeLraForMecLp(Environment const& env, ValueGetter const& stateValuesGetter, ValueGetter const& actionValuesGetter, storm::storage::MaximalEndComponent const& mec);
std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> buildSspMatrixVector(std::vector<ValueType> const& mecLraValues, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>>* sspComponentExitChoicesToOriginalMap);
/*!
* @pre a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* Translates optimal choices for MECS and SSP to the original model.
* @post getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
void constructOptimalChoices(std::vector<uint64_t> const& sspChoices, storm::storage::SparseMatrix<ValueType> const& sspMatrix, std::vector<uint64_t> const& inputToSspStateMap, storm::storage::BitVector const& statesNotInComponent, uint64_t numberOfNonComponentStates, std::vector<std::pair<uint64_t, uint64_t>> const& sspComponentExitChoicesToOriginalMap);
/*!
* @pre if scheduler production is enabled a choice for each state within a component must be set such that the choices yield optimal values w.r.t. the individual components.
* @return Lra values for each state
* @post if scheduler production is enabled getProducedOptimalChoices() contains choices for all input model states which yield the returned LRA values.
*/
virtual std::vector<ValueType> buildAndSolveSsp(Environment const& env, std::vector<ValueType> const& mecLraValues) override;
};
}
}
}

27
src/storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h

@ -0,0 +1,27 @@
#pragma once
#include "storm/storage/StronglyConnectedComponent.h"
#include "storm/storage/MaximalEndComponent.h"
namespace storm {
namespace modelchecker {
namespace helper {
namespace internal {
/// Auxiliary functions that deal with the different kinds of components (MECs on potentially nondeterministic models and BSCCs on deterministic models)
// BSCCS:
inline uint64_t getComponentElementState(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return element; }
inline uint64_t getComponentElementChoiceCount(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return 1; } // Assumes deterministic model!
inline uint64_t const* getComponentElementChoicesBegin(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element; }
inline uint64_t const* getComponentElementChoicesEnd(typename storm::storage::StronglyConnectedComponent::value_type const& element) { return &element + 1; }
inline bool componentElementChoicesContains(typename storm::storage::StronglyConnectedComponent::value_type const& element, uint64_t choice) { return element == choice; }
// MECS:
inline uint64_t getComponentElementState(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.first; }
inline uint64_t getComponentElementChoiceCount(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.size(); }
inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesBegin(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.begin(); }
inline typename storm::storage::MaximalEndComponent::set_type::const_iterator getComponentElementChoicesEnd(typename storm::storage::MaximalEndComponent::map_type::value_type const& element) { return element.second.end(); }
inline bool componentElementChoicesContains(storm::storage::MaximalEndComponent::map_type::value_type const& element, uint64_t choice) { return element.second.contains(choice); }
}
}
}
}

491
src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp

@ -0,0 +1,491 @@
#include "LraViHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/internal/ComponentUtility.h"
#include "storm/storage/MaximalEndComponent.h"
#include "storm/storage/StronglyConnectedComponent.h"
#include "storm/utility/graph.h"
#include "storm/utility/vector.h"
#include "storm/utility/macros.h"
#include "storm/utility/SignalHandler.h"
#include "storm/environment/solver/SolverEnvironment.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/exceptions/UnmetRequirementException.h"
namespace storm {
namespace modelchecker {
namespace helper {
namespace internal {
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
LraViHelper<ValueType, ComponentType, TransitionsType>::LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates, std::vector<ValueType> const* exitRates) : _component(component), _transitionMatrix(transitionMatrix), _timedStates(timedStates), _hasInstantStates(TransitionsType == LraViTransitionsType::DetTsNondetIs || TransitionsType == LraViTransitionsType::DetTsDetIs), _Tsx1IsCurrent(false) {
// Run through the component and collect some data:
// We create two submodels, one consisting of the timed states of the component and one consisting of the instant states of the component.
// For this, we create a state index map that point from state indices of the input model to indices of the corresponding submodel of that state.
std::map<uint64_t, uint64_t> toSubModelStateMapping;
// We also obtain state and choices counts of the two submodels
uint64_t numTsSubModelStates(0), numTsSubModelChoices(0);
uint64_t numIsSubModelStates(0), numIsSubModelChoices(0);
// We will need to uniformize the timed MEC states by introducing a selfloop.
// For this, we need to find a uniformization rate which will be a little higher (given by aperiodicFactor) than the maximum rate occurring in the component.
_uniformizationRate = exitRates == nullptr ? storm::utility::one<ValueType>() : storm::utility::zero<ValueType>();
// Now run over the MEC and collect the required data.
for (auto const& element : _component) {
uint64_t componentState = getComponentElementState(element);
if (isTimedState(componentState)) {
toSubModelStateMapping.emplace(componentState, numTsSubModelStates);
++numTsSubModelStates;
numTsSubModelChoices += getComponentElementChoiceCount(element);
STORM_LOG_ASSERT(nondetTs() || getComponentElementChoiceCount(element) == 1, "Timed state has multiple choices but only a single choice was expected.");
if (exitRates) {
_uniformizationRate = std::max(_uniformizationRate, (*exitRates)[componentState]);
}
} else {
toSubModelStateMapping.emplace(componentState, numIsSubModelStates);
++numIsSubModelStates;
numIsSubModelChoices += getComponentElementChoiceCount(element);
STORM_LOG_ASSERT(nondetIs() || getComponentElementChoiceCount(element) == 1, "Instant state has multiple choices but only a single choice was expected.");
}
}
assert(numIsSubModelStates + numTsSubModelStates == _component.size());
assert(_hasInstantStates || numIsSubModelStates == 0);
STORM_LOG_ASSERT(nondetTs() || numTsSubModelStates == numTsSubModelChoices, "Unexpected choice count of deterministic timed submodel.");
STORM_LOG_ASSERT(nondetIs() || numIsSubModelStates == numIsSubModelChoices, "Unexpected choice count of deterministic instant submodel.");
_hasInstantStates = _hasInstantStates && numIsSubModelStates > 0;
STORM_LOG_THROW(numTsSubModelStates > 0, storm::exceptions::InvalidOperationException, "Bottom Component has no timed states. Computation of Long Run Average values not supported. Is this a Markov Automaton with Zeno behavior?");
// We make sure that every timed state gets a selfloop to make the model aperiodic
_uniformizationRate *= storm::utility::one<ValueType>() + aperiodicFactor;
// Now build the timed and the instant submodels.
// In addition, we also need the transitions between the two.
storm::storage::SparseMatrixBuilder<ValueType> tsTransitionsBuilder(numTsSubModelChoices, numTsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0);
storm::storage::SparseMatrixBuilder<ValueType> tsToIsTransitionsBuilder, isTransitionsBuilder, isToTsTransitionsBuilder;
if (_hasInstantStates) {
tsToIsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numTsSubModelChoices, numIsSubModelStates, 0, true, nondetTs(), nondetTs() ? numTsSubModelStates : 0);
isTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numIsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0);
isToTsTransitionsBuilder = storm::storage::SparseMatrixBuilder<ValueType>(numIsSubModelChoices, numTsSubModelStates, 0, true, nondetIs(), nondetIs() ? numIsSubModelStates : 0);
_IsChoiceValues.reserve(numIsSubModelChoices);
}
ValueType uniformizationFactor = storm::utility::one<ValueType>() / _uniformizationRate;
uint64_t currTsRow = 0;
uint64_t currIsRow = 0;
for (auto const& element : _component) {
uint64_t componentState = getComponentElementState(element);
if (isTimedState(componentState)) {
// The currently processed state is timed.
if (nondetTs()) {
tsTransitionsBuilder.newRowGroup(currTsRow);
if (_hasInstantStates) {
tsToIsTransitionsBuilder.newRowGroup(currTsRow);
}
}
// If there are exit rates, the uniformization factor needs to be updated.
if (exitRates) {
uniformizationFactor = (*exitRates)[componentState] / _uniformizationRate;
}
// We need to uniformize which means that a diagonal entry for the selfloop will be inserted.
ValueType selfLoopProb = storm::utility::one<ValueType>() - uniformizationFactor;
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
tsTransitionsBuilder.addDiagonalEntry(currTsRow, selfLoopProb);
for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
if (isTimedState(entry.getColumn())) {
// We have a transition from a timed state to a timed state
STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel");
tsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue());
} else {
// We have a transition from a timed to a instant state
STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel");
tsToIsTransitionsBuilder.addNextValue(currTsRow, subModelColumn, uniformizationFactor * entry.getValue());
}
}
++currTsRow;
}
} else {
// The currently processed state is instant
if (nondetIs()) {
isTransitionsBuilder.newRowGroup(currIsRow);
isToTsTransitionsBuilder.newRowGroup(currIsRow);
}
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
for (auto const& entry : this->_transitionMatrix.getRow(*componentChoiceIt)) {
uint64_t subModelColumn = toSubModelStateMapping[entry.getColumn()];
if (isTimedState(entry.getColumn())) {
// We have a transition from an instant state to a timed state
STORM_LOG_ASSERT(subModelColumn < numTsSubModelStates, "Invalid state for timed submodel");
isToTsTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue());
} else {
// We have a transition from an instant to an instant state
STORM_LOG_ASSERT(subModelColumn < numIsSubModelStates, "Invalid state for instant submodel");
isTransitionsBuilder.addNextValue(currIsRow, subModelColumn, entry.getValue());
}
}
++currIsRow;
}
}
}
_TsTransitions = tsTransitionsBuilder.build();
if (_hasInstantStates) {
_TsToIsTransitions = tsToIsTransitionsBuilder.build();
_IsTransitions = isTransitionsBuilder.build();
_IsToTsTransitions = isToTsTransitionsBuilder.build();
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
initializeNewValues(stateValueGetter, actionValueGetter, exitRates);
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision());
bool relative = env.solver().lra().getRelativeTerminationCriterion();
boost::optional<uint64_t> maxIter;
if (env.solver().lra().isMaximalIterationCountSet()) {
maxIter = env.solver().lra().getMaximalIterationCount();
}
// start the iterations
ValueType result = storm::utility::zero<ValueType>();
uint64_t iter = 0;
while (!maxIter.is_initialized() || iter < maxIter.get()) {
++iter;
performIterationStep(env, dir);
// Check if we are done
auto convergenceCheckResult = checkConvergence(relative, precision);
result = convergenceCheckResult.currentValue;
if (convergenceCheckResult.isPrecisionAchieved) {
break;
}
if (storm::utility::resources::isTerminate()) {
break;
}
// If there will be a next iteration, we have to prepare it.
prepareNextIteration(env);
}
if (maxIter.is_initialized() && iter == maxIter.get()) {
STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
} else if (storm::utility::resources::isTerminate()) {
STORM_LOG_WARN("LRA computation aborted after " << iter << " iterations.");
} else {
STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
}
if (choices) {
// We will be doing one more iteration step and track scheduler choices this time.
prepareNextIteration(env);
performIterationStep(env, dir, choices);
}
return result;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates) {
// clear potential old values and reserve enough space for new values
_TsChoiceValues.clear();
_TsChoiceValues.reserve(_TsTransitions.getRowCount());
if (_hasInstantStates) {
_IsChoiceValues.clear();
_IsChoiceValues.reserve(_IsTransitions.getRowCount());
}
// Set the new choice-based values
ValueType actionRewardScalingFactor = storm::utility::one<ValueType>() / _uniformizationRate;
for (auto const& element : _component) {
uint64_t componentState = getComponentElementState(element);
if (isTimedState(componentState)) {
if (exitRates) {
actionRewardScalingFactor = (*exitRates)[componentState] / _uniformizationRate;
}
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
// Compute the values obtained for this choice.
_TsChoiceValues.push_back(stateValueGetter(componentState) / _uniformizationRate + actionValueGetter(*componentChoiceIt) * actionRewardScalingFactor);
}
} else {
for (auto componentChoiceIt = getComponentElementChoicesBegin(element); componentChoiceIt != getComponentElementChoicesEnd(element); ++componentChoiceIt) {
// Compute the values obtained for this choice.
// State values do not count here since no time passes in instant states.
_IsChoiceValues.push_back(actionValueGetter(*componentChoiceIt));
}
}
}
// Set-up new iteration vectors for timed states
_Tsx1.assign(_TsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
_Tsx2 = _Tsx1;
if (_hasInstantStates) {
// Set-up vectors for storing intermediate results for instant states.
_Isx.resize(_IsTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
_Isb = _IsChoiceValues;
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareSolversAndMultipliers(const Environment& env, storm::solver::OptimizationDirection const* dir) {
_TsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsTransitions);
if (_hasInstantStates) {
if (_IsTransitions.getNonzeroEntryCount() > 0) {
// Set-up a solver for transitions within instant states
_IsSolverEnv = std::make_unique<storm::Environment>(env);
if (env.solver().isForceSoundness()) {
// To get correct results, the inner equation systems are solved exactly.
// TODO investigate how an error would propagate
_IsSolverEnv->solver().setForceExact(true);
}
bool isAcyclic = !storm::utility::graph::hasCycle(_IsTransitions);
if (isAcyclic) {
STORM_LOG_INFO("Instant transitions are acyclic.");
if (_IsSolverEnv->solver().minMax().isMethodSetFromDefault()) {
_IsSolverEnv->solver().minMax().setMethod(storm::solver::MinMaxMethod::Acyclic);
}
if (_IsSolverEnv->solver().isLinearEquationSolverTypeSetFromDefaultValue()) {
_IsSolverEnv->solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Acyclic);
}
}
if (nondetIs()) {
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> factory;
_NondetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions);
_NondetIsSolver->setHasUniqueSolution(true); // Assume non-zeno MA
_NondetIsSolver->setHasNoEndComponents(true); // assume non-zeno MA
_NondetIsSolver->setCachingEnabled(true);
auto req = _NondetIsSolver->getRequirements(*_IsSolverEnv, *dir);
req.clearUniqueSolution();
if (isAcyclic) {
req.clearAcyclic();
}
// Computing a priori lower/upper bounds is not particularly easy, as there might be selfloops with high probabilities
// Which accumulate a lot of reward. Moreover, the right-hand-side of the equation system changes dynamically.
STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared.");
_NondetIsSolver->setRequirementsChecked(true);
} else {
storm::solver::GeneralLinearEquationSolverFactory<ValueType> factory;
if (factory.getEquationProblemFormat(*_IsSolverEnv) != storm::solver::LinearEquationSolverProblemFormat::FixedPointSystem) {
// We need to convert the transition matrix connecting instant states
// TODO: This could have been done already during construction of the matrix.
// Insert diagonal entries.
storm::storage::SparseMatrix<ValueType> converted(_IsTransitions, true);
// Compute A' = 1-A
converted.convertToEquationSystem();
STORM_LOG_WARN("The selected equation solver requires to create a temporary " << converted.getDimensionsAsString());
// Note that the solver has ownership of the converted matrix.
_DetIsSolver = factory.create(*_IsSolverEnv, std::move(converted));
} else {
_DetIsSolver = factory.create(*_IsSolverEnv, _IsTransitions);
}
_DetIsSolver->setCachingEnabled(true);
auto req = _DetIsSolver->getRequirements(*_IsSolverEnv);
if (isAcyclic) {
req.clearAcyclic();
}
// A priori lower/upper bounds are hard (see MinMax version of this above)
STORM_LOG_THROW(!req.hasEnabledCriticalRequirement(), storm::exceptions::UnmetRequirementException, "The solver requirement " << req.getEnabledRequirementsAsString() << " has not been cleared.");
}
}
// Set up multipliers for transitions connecting timed and instant states
_TsToIsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _TsToIsTransitions);
_IsToTsMultiplier = storm::solver::MultiplierFactory<ValueType>().create(env, _IsToTsTransitions);
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToTimedStates, bool setChoiceZeroToInstantStates) const {
// Transform the local choices (within this mec) to choice indices for the input model
uint64_t localState = 0;
for (auto const& element : _component) {
uint64_t elementState = getComponentElementState(element);
if ((setChoiceZeroToTimedStates && isTimedState(elementState)) || (setChoiceZeroToInstantStates && !isTimedState(elementState))) {
choices[elementState] = 0;
} else {
uint64_t choice = localMecChoices[localState];
STORM_LOG_ASSERT(choice < getComponentElementChoiceCount(element), "The selected choice does not seem to exist.");
uint64_t globalChoiceIndex = *(getComponentElementChoicesBegin(element) + choice);
choices[elementState] = globalChoiceIndex - _transitionMatrix.getRowGroupIndices()[elementState];
++localState;
}
}
STORM_LOG_ASSERT(localState == localMecChoices.size(), "Did not traverse all component states.");
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism");
// Initialize value vectors, multiplers, and solver if this has not been done, yet
if (!_TsMultiplier) {
prepareSolversAndMultipliers(env, dir);
}
// Compute new x values for the timed states
// Flip what is new and what is old
_Tsx1IsCurrent = !_Tsx1IsCurrent;
// At this point, xOld() points to what has been computed in the most recent call of performIterationStep (initially, this is the 0-vector).
// The result of this ongoing computation will be stored in xNew()
// Compute the values obtained by a single uniformization step between timed states only
if (nondetTs()) {
if (choices == nullptr) {
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew());
} else {
// Also keep track of the choices made.
std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount());
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices);
// Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs)
// Hence, in this branch we don't have to care for choices at instant states.
STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states.");
setInputModelChoices(*choices, tsChoices);
}
} else {
_TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew());
}
if (_hasInstantStates) {
// Add the values obtained by taking a single uniformization step that leads to an instant state followed by arbitrarily many instant steps.
// First compute the total values when taking arbitrarily many instant transitions (in no time)
if (_NondetIsSolver) {
// We might need to track the optimal choices.
if (choices == nullptr) {
_NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb);
} else {
_NondetIsSolver->setTrackScheduler();
_NondetIsSolver->solveEquations(*_IsSolverEnv, *dir, _Isx, _Isb);
setInputModelChoices(*choices, _NondetIsSolver->getSchedulerChoices(), true);
}
} else if (_DetIsSolver) {
_DetIsSolver->solveEquations(*_IsSolverEnv, _Isx, _Isb);
} else {
STORM_LOG_ASSERT(_IsTransitions.getNonzeroEntryCount() == 0, "If no solver was initialized, an empty matrix would have been expected.");
if (nondetIs()) {
if (choices == nullptr) {
storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices());
} else {
std::vector<uint64_t> psChoices(_IsTransitions.getRowGroupCount());
storm::utility::vector::reduceVectorMinOrMax(*dir, _Isb, _Isx, _IsTransitions.getRowGroupIndices(), &psChoices);
setInputModelChoices(*choices, psChoices, true);
}
} else {
// For deterministic instant states, there is nothing to reduce, i.e., we could just set _Isx = _Isb.
// For efficiency reasons, we do a swap instead:
_Isx.swap(_Isb);
// Note that at this point we have changed the contents of _Isb, but they will be overwritten anyway.
if (choices) {
// Set choice 0 to all states.
setInputModelChoices(*choices, {}, true, true);
}
}
}
// Now add the (weighted) values of the instant states to the values of the timed states.
_TsToIsMultiplier->multiply(env, _Isx, &xNew(), xNew());
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
typename LraViHelper<ValueType, ComponentType, TransitionsType>::ConvergenceCheckResult LraViHelper<ValueType, ComponentType, TransitionsType>::checkConvergence(bool relative, ValueType precision) const {
STORM_LOG_ASSERT(_TsMultiplier, "tried to check for convergence without doing an iteration first.");
// All values are scaled according to the uniformizationRate.
// We need to 'revert' this scaling when computing the absolute precision.
// However, for relative precision, the scaling cancels out.
ValueType threshold = relative ? precision : ValueType(precision / _uniformizationRate);
ConvergenceCheckResult res = { true, storm::utility::one<ValueType>() };
// Now check whether the currently produced results are precise enough
STORM_LOG_ASSERT(threshold > storm::utility::zero<ValueType>(), "Did not expect a non-positive threshold.");
auto x1It = xOld().begin();
auto x1Ite = xOld().end();
auto x2It = xNew().begin();
ValueType maxDiff = (*x2It - *x1It);
ValueType minDiff = maxDiff;
// The difference between maxDiff and minDiff is zero at this point. Thus, it doesn't make sense to check the threshold now.
for (++x1It, ++x2It; x1It != x1Ite; ++x1It, ++x2It) {
ValueType diff = (*x2It - *x1It);
// Potentially update maxDiff or minDiff
bool skipCheck = false;
if (maxDiff < diff) {
maxDiff = diff;
} else if (minDiff > diff) {
minDiff = diff;
} else {
skipCheck = true;
}
// Check convergence
if (!skipCheck && (maxDiff - minDiff) > (relative ? (threshold * minDiff) : threshold)) {
res.isPrecisionAchieved = false;
break;
}
}
// Compute the average of the maximal and the minimal difference.
ValueType avgDiff = (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0));
// "Undo" the scaling of the values
res.currentValue = avgDiff * _uniformizationRate;
return res;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::prepareNextIteration(Environment const& env) {
// To avoid large (and numerically unstable) x-values, we substract a reference value.
ValueType referenceValue = xNew().front();
storm::utility::vector::applyPointwise<ValueType, ValueType>(xNew(), xNew(), [&referenceValue] (ValueType const& x_i) -> ValueType { return x_i - referenceValue; });
if (_hasInstantStates) {
// Update the RHS of the equation system for the instant states by taking the new values of timed states into account.
STORM_LOG_ASSERT(!nondetTs(), "Nondeterministic timed states not expected when there are also instant states.");
_IsToTsMultiplier->multiply(env, xNew(), &_IsChoiceValues, _Isb);
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
bool LraViHelper<ValueType, ComponentType, TransitionsType>::isTimedState(uint64_t const& inputModelStateIndex) const {
STORM_LOG_ASSERT(!_hasInstantStates || _timedStates != nullptr, "Model has instant states but no partition into timed and instant states is given.");
STORM_LOG_ASSERT(!_hasInstantStates || inputModelStateIndex < _timedStates->size(), "Unable to determine whether state " << inputModelStateIndex << " is timed.");
return !_hasInstantStates || _timedStates->get(inputModelStateIndex);
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() {
return _Tsx1IsCurrent ? _Tsx1 : _Tsx2;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xNew() const {
return _Tsx1IsCurrent ? _Tsx1 : _Tsx2;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
std::vector<ValueType>& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() {
return _Tsx1IsCurrent ? _Tsx2 : _Tsx1;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
std::vector<ValueType> const& LraViHelper<ValueType, ComponentType, TransitionsType>::xOld() const {
return _Tsx1IsCurrent ? _Tsx2 : _Tsx1;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetTs() const {
return TransitionsType == LraViTransitionsType::NondetTsNoIs;
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
bool LraViHelper<ValueType, ComponentType, TransitionsType>::nondetIs() const {
return TransitionsType == LraViTransitionsType::DetTsNondetIs;
}
template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>;
template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::NondetTsNoIs>;
template class LraViHelper<double, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
template class LraViHelper<storm::RationalNumber, storm::storage::MaximalEndComponent, LraViTransitionsType::DetTsNondetIs>;
template class LraViHelper<double, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>;
template class LraViHelper<storm::RationalNumber, storm::storage::StronglyConnectedComponent, LraViTransitionsType::DetTsNoIs>;
}
}
}
}

149
src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h

@ -0,0 +1,149 @@
#pragma once
#include "storm/storage/SparseMatrix.h"
#include "storm/solver/LinearEquationSolver.h"
#include "storm/solver/MinMaxLinearEquationSolver.h"
#include "storm/solver/Multiplier.h"
namespace storm {
class Environment;
namespace modelchecker {
namespace helper {
namespace internal {
/*!
* Specifies differnt kinds of transition types with which this helper can be used
* Ts means timed states (cf. Markovian states in a Markov Automaton) and Is means instant states (cf. probabilistic states in a Markov automaton).
* The way to think about this is that time can only pass in a timed state, whereas transitions emerging from an instant state fire immediately
* In an MDP, all states are seen as timed.
* In this enum, we also specify whether there can be a nondeterministic choice at the corresponding states or not.
*/
enum class LraViTransitionsType {
DetTsNoIs, /// deterministic choice at timed states, no instant states (as in DTMCs and CTMCs)
DetTsNondetIs, /// deterministic choice at timed states, nondeterministic choice at instant states (as in Markov Automata)
DetTsDetIs, /// deterministic choice at timed states, deterministic choice at instant states (as in Markov Automata without any nondeterminisim)
NondetTsNoIs /// nondeterministic choice at timed states, no instant states (as in MDPs)
};
/*!
* Helper class that performs iterations of the value iteration method.
* The purpose of the template parameters ComponentType and TransitionsType are used to make this work for various model types.
*
* @see Ashok et al.: Value Iteration for Long-Run Average Reward in Markov Decision Processes (CAV'17), https://doi.org/10.1007/978-3-319-63387-9_10
* @see Butkova, Wimmer, Hermanns: Long-Run Rewards for Markov Automata (TACAS'17), https://doi.org/10.1007/978-3-662-54580-5_11
*
* @tparam ValueType The type of a value
* @tparam ComponentType The type of a 'bottom component' of the model (e.g. a BSCC (for purely deterministic models) or a MEC (for models with potential nondeterminism).
* @tparam TransitionsType The kind of transitions that occur.
*/
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
class LraViHelper {
public:
/// Function mapping from indices to values
typedef std::function<ValueType(uint64_t)> ValueGetter;
/*!
* Initializes a new VI helper for the provided MEC or BSCC
* @param component the MEC or BSCC
* @param transitionMatrix The transition matrix of the input model
* @param aperiodicFactor a non-zero factor that is used for making the MEC aperiodic (by adding selfloops to each state)
* @param timedStates States in which time can pass (Markovian states in a Markov automaton). If nullptr, it is assumed that all states are timed states
* @param exitRates The exit rates of the timed states (relevant for continuous time models). If nullptr, all rates are assumed to be 1 (which corresponds to a discrete time model)
* @note All indices and vectors must be w.r.t. the states as described by the provided transition matrix
*/
LraViHelper(ComponentType const& component, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, ValueType const& aperiodicFactor, storm::storage::BitVector const* timedStates = nullptr, std::vector<ValueType> const* exitRates = nullptr);
/*!
* Performs value iteration with the given state- and action values.
* @param env The environment, containing information on the precision of this computation.
* @param stateValueGetter function that returns for each state index (w.r.t. the input transition matrix) the reward for staying in state. Will only be called for timed states.
* @param actionValueGetter function that returns for each global choice index (w.r.t. the input transition matrix) the reward for taking that choice
* @param exitRates (as in the constructor)
* @param dir Optimization direction. Must be not nullptr in case of nondeterminism
* @param choices if not nullptr, the optimal choices will be inserted in this vector. The vector's size must then be equal to the number of row groups of the input transition matrix.
* @return The (optimal) long run average value of the specified component.
* @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching.
*/
ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
private:
/*!
* Initializes the value iterations with the provided values.
* Resets all information from potential previous calls.
* Must be called before the first call to performIterationStep.
* @param stateValueGetter Function that returns for each state index (w.r.t. the input transitions) the value (e.g. reward) for that state
* @param stateValueGetter Function that returns for each global choice index (w.r.t. the input transitions) the value (e.g. reward) for that choice
*/
void initializeNewValues(ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr);
/*!
* Performs a single iteration step.
* @param env The environment.
* @param dir The optimization direction. Has to be given if there is nondeterminism (otherwise it will be ignored)
* @param choices If given, the optimal choices will be inserted at the appropriate states.
* Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model
* @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this.
*/
void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
struct ConvergenceCheckResult {
bool isPrecisionAchieved;
ValueType currentValue;
};
/*!
* Checks whether the curently computed value achieves the desired precision
*/
ConvergenceCheckResult checkConvergence(bool relative, ValueType precision) const;
/*!
* Must be called between two calls of performIterationStep.
*/
void prepareNextIteration(Environment const& env);
/// Prepares the necessary solvers and multipliers for doing the iterations.
void prepareSolversAndMultipliers(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr);
void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const;
/// Returns true iff the given state is a timed state
bool isTimedState(uint64_t const& inputModelStateIndex) const;
/// The result for timed states of the most recent iteration
std::vector<ValueType>& xNew();
std::vector<ValueType> const& xNew() const;
/// The result for timed states of the previous iteration
std::vector<ValueType>& xOld();
std::vector<ValueType> const& xOld() const;
/// @return true iff there potentially is a nondeterministic choice at timed states
bool nondetTs() const;
/// @return true iff there potentially is a nondeterministic choice at instant states. Returns false if there are no instant states.
bool nondetIs() const;
ComponentType const& _component;
storm::storage::SparseMatrix<ValueType> const& _transitionMatrix;
storm::storage::BitVector const* _timedStates; // e.g. Markovian states of a Markov automaton.
bool _hasInstantStates;
ValueType _uniformizationRate;
storm::storage::SparseMatrix<ValueType> _TsTransitions, _TsToIsTransitions, _IsTransitions, _IsToTsTransitions;
std::vector<ValueType> _Tsx1, _Tsx2, _TsChoiceValues;
bool _Tsx1IsCurrent;
std::vector<ValueType> _Isx, _Isb, _IsChoiceValues;
std::unique_ptr<storm::solver::Multiplier<ValueType>> _TsMultiplier, _TsToIsMultiplier, _IsToTsMultiplier;
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> _NondetIsSolver;
std::unique_ptr<storm::solver::LinearEquationSolver<ValueType>> _DetIsSolver;
std::unique_ptr<storm::Environment> _IsSolverEnv;
};
}
}
}
}

46
src/storm/modelchecker/helper/utility/SetInformationFromCheckTask.h

@ -0,0 +1,46 @@
#pragma once
#include "storm/modelchecker/CheckTask.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Forwards relevant information stored in the given CheckTask to the given helper
*/
template <typename HelperType, typename FormulaType, typename ModelType>
void setInformationFromCheckTaskNondeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) {
// Relevancy of initial states.
if (checkTask.isOnlyInitialStatesRelevantSet()) {
helper.setRelevantStates(model.getInitialStates());
}
// Value threshold to which the result will be compared
if (checkTask.isBoundSet()) {
helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold());
}
// Optimization direction
if (checkTask.isOptimizationDirectionSet()) {
helper.setOptimizationDirection(checkTask.getOptimizationDirection());
}
// Scheduler Production
helper.setProduceScheduler(checkTask.isProduceSchedulersSet());
}
/*!
* Forwards relevant information stored in the given CheckTask to the given helper
*/
template <typename HelperType, typename FormulaType, typename ModelType>
void setInformationFromCheckTaskDeterministic(HelperType& helper, storm::modelchecker::CheckTask<FormulaType, typename ModelType::ValueType> const& checkTask, ModelType const& model) {
// Relevancy of initial states.
if (checkTask.isOnlyInitialStatesRelevantSet()) {
helper.setRelevantStates(model.getInitialStates());
}
// Value threshold to which the result will be compared
if (checkTask.isBoundSet()) {
helper.setValueThreshold(checkTask.getBoundComparisonType(), checkTask.getBoundThreshold());
}
}
}
}
}

46
src/storm/modelchecker/helper/utility/SetInformationFromOtherHelper.h

@ -0,0 +1,46 @@
#pragma once
#include "storm/modelchecker/CheckTask.h"
namespace storm {
namespace modelchecker {
namespace helper {
/*!
* Forwards relevant information stored in another helper to the given helper
*/
template <typename TargetHelperType, typename SourceHelperType>
void setInformationFromOtherHelperNondeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) {
// Relevancy of initial states.
if (sourceHelperType.hasRelevantStates()) {
targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates()));
}
// Value threshold to which the result will be compared
if (sourceHelperType.isValueThresholdSet()) {
targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue()));
}
// Optimization direction
if (sourceHelperType.isOptimizationDirectionSet()) {
targetHelper.setOptimizationDirection(sourceHelperType.getOptimizationDirection());
}
// Scheduler Production
targetHelper.setProduceScheduler(sourceHelperType.isProduceSchedulerSet());
}
/*!
* Forwards relevant information stored in another helper to the given helper
*/
template <typename TargetHelperType, typename SourceHelperType>
void setInformationFromOtherHelperDeterministic(TargetHelperType& targetHelper, SourceHelperType const& sourceHelperType, std::function<typename TargetHelperType::StateSet(typename SourceHelperType::StateSet const&)> const& stateSetTransformer) {
// Relevancy of initial states.
if (sourceHelperType.hasRelevantStates()) {
targetHelper.setRelevantStates(stateSetTransformer(sourceHelperType.getRelevantStates()));
}
// Value threshold to which the result will be compared
if (sourceHelperType.isValueThresholdSet()) {
targetHelper.setValueThreshold(sourceHelperType.getValueThresholdComparisonType(), storm::utility::convertNumber<typename TargetHelperType::ValueType>(sourceHelperType.getValueThresholdValue()));
}
}
}
}
}

12
src/storm/modelchecker/prctl/HybridDtmcPrctlModelChecker.cpp

@ -2,6 +2,8 @@
#include "storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h"
#include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/HybridInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/storage/dd/Odd.h"
#include "storm/storage/dd/DdManager.h"
@ -113,20 +115,24 @@ namespace storm {
return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeReachabilityTimes(env, this->getModel(), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), checkTask.isQualitativeSet());
}
template<typename ModelType>
std::unique_ptr<CheckResult> HybridDtmcPrctlModelChecker<ModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
SymbolicQualitativeCheckResult<DdType> const& subResult = subResultPointer->asSymbolicQualitativeCheckResult<DdType>();
return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageProbabilities(env, this->getModel(), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector());
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
}
template<typename ModelType>
std::unique_ptr<CheckResult> HybridDtmcPrctlModelChecker<ModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
return storm::modelchecker::helper::HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageRewards(env, this->getModel(), this->getModel().getTransitionMatrix(), rewardModel.get());
storm::modelchecker::helper::HybridInfiniteHorizonHelper<ValueType, DdType, false> helper(this->getModel(), this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
return helper.computeLongRunAverageRewards(env, rewardModel.get());
}
template class HybridDtmcPrctlModelChecker<storm::models::symbolic::Dtmc<storm::dd::DdType::CUDD, double>>;

21
src/storm/modelchecker/prctl/SparseDtmcPrctlModelChecker.cpp

@ -13,6 +13,8 @@
#include "storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h"
#include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
#include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseDeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/logic/FragmentSpecification.h"
@ -166,18 +168,25 @@ namespace storm {
template<typename SparseDtmcModelType>
std::unique_ptr<CheckResult> SparseDtmcPrctlModelChecker<SparseDtmcModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageProbabilities<ValueType>(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), subResult.getTruthValuesVector(), nullptr);
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
}
template<typename SparseDtmcModelType>
std::unique_ptr<CheckResult> SparseDtmcPrctlModelChecker<SparseDtmcModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType>(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), rewardModel.get(), nullptr);
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
storm::modelchecker::helper::SparseDeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskDeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
}
template<typename SparseDtmcModelType>

26
src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

@ -15,6 +15,8 @@
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h"
#include "storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.h"
#include "storm/modelchecker/helper/utility/SetInformationFromCheckTask.h"
#include "storm/modelchecker/prctl/helper/rewardbounded/QuantileHelper.h"
#include "storm/modelchecker/multiobjective/multiObjectiveModelChecking.h"
@ -220,14 +222,18 @@ namespace storm {
template<typename SparseMdpModelType>
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageProbabilities(Environment const& env, CheckTask<storm::logic::StateFormula, ValueType> const& checkTask) {
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
storm::logic::StateFormula const& stateFormula = checkTask.getFormula();
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageProbabilities(env, subResult.getTruthValuesVector());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
}
return result;
}
@ -236,10 +242,12 @@ namespace storm {
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
storm::modelchecker::helper::SparseNondeterministicInfiniteHorizonHelper<ValueType> helper(this->getModel().getTransitionMatrix());
storm::modelchecker::helper::setInformationFromCheckTaskNondeterministic(helper, checkTask, this->getModel());
auto values = helper.computeLongRunAverageRewards(env, rewardModel.get());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(values)));
if (checkTask.isProduceSchedulersSet()) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::make_unique<storm::storage::Scheduler<ValueType>>(helper.extractScheduler()));
}
return result;
}

26
src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.cpp

@ -344,32 +344,6 @@ namespace storm {
return computeReachabilityRewards(env, model, transitionMatrix, rewardModel, targetStates, qualitative);
}
template<storm::dd::DdType DdType, typename ValueType>
std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates) {
// Create ODD for the translation.
storm::utility::Stopwatch conversionWatch(true);
storm::dd::Odd odd = model.getReachableStates().createOdd();
storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, targetStates.toVector(odd));
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
}
template<storm::dd::DdType DdType, typename ValueType>
std::unique_ptr<CheckResult> HybridDtmcPrctlHelper<DdType, ValueType>::computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
// Create ODD for the translation.
storm::utility::Stopwatch conversionWatch(true);
storm::dd::Odd odd = model.getReachableStates().createOdd();
storm::storage::SparseMatrix<ValueType> explicitProbabilityMatrix = model.getTransitionMatrix().toMatrix(odd, odd);
conversionWatch.stop();
STORM_LOG_INFO("Converting symbolic matrix/vector to explicit representation done in " << conversionWatch.getTimeInMilliseconds() << "ms.");
std::vector<ValueType> result = storm::modelchecker::helper::SparseDtmcPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(), explicitProbabilityMatrix, rewardModel.getTotalRewardVector(model.getTransitionMatrix(), model.getColumnVariables()).toVector(odd));
return std::unique_ptr<CheckResult>(new HybridQuantitativeCheckResult<DdType, ValueType>(model.getReachableStates(), model.getManager().getBddZero(), model.getManager().template getAddZero<ValueType>(), model.getReachableStates(), std::move(odd), std::move(result)));
}
template class HybridDtmcPrctlHelper<storm::dd::DdType::CUDD, double>;
template class HybridDtmcPrctlHelper<storm::dd::DdType::Sylvan, double>;

4
src/storm/modelchecker/prctl/helper/HybridDtmcPrctlHelper.h

@ -39,10 +39,6 @@ namespace storm {
static std::unique_ptr<CheckResult> computeReachabilityTimes(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates, bool qualitative);
static std::unique_ptr<CheckResult> computeLongRunAverageProbabilities(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, storm::dd::Bdd<DdType> const& targetStates);
static std::unique_ptr<CheckResult> computeLongRunAverageRewards(Environment const& env, storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Add<DdType, ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
};
}

15
src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.cpp

@ -531,21 +531,6 @@ namespace storm {
return result;
}
template<typename ValueType, typename RewardModelType>
std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates) {
return SparseCtmcCslHelper::computeLongRunAverageProbabilities<ValueType>(env, std::move(goal), transitionMatrix, psiStates, nullptr);
}
template<typename ValueType, typename RewardModelType>
std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel) {
return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType, RewardModelType>(env, std::move(goal), transitionMatrix, rewardModel, nullptr);
}
template<typename ValueType, typename RewardModelType>
std::vector<ValueType> SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards) {
return SparseCtmcCslHelper::computeLongRunAverageRewards<ValueType>(env, std::move(goal), transitionMatrix, stateRewards, nullptr);
}
template<typename ValueType, typename RewardModelType>
typename SparseDtmcPrctlHelper<ValueType, RewardModelType>::BaierTransformedModel SparseDtmcPrctlHelper<ValueType, RewardModelType>::computeBaierTransformation(Environment const& env, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, boost::optional<std::vector<ValueType>> const& stateRewards) {

6
src/storm/modelchecker/prctl/helper/SparseDtmcPrctlHelper.h

@ -52,12 +52,6 @@ namespace storm {
static std::vector<ValueType> computeReachabilityTimes(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, bool qualitative, ModelCheckerHint const& hint = ModelCheckerHint());
static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::BitVector const& psiStates);
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel);
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, std::vector<ValueType> const& stateRewards);
static std::vector<ValueType> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);
static std::vector<ValueType> computeConditionalRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates, bool qualitative);

467
src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp

@ -39,7 +39,6 @@
#include "storm/transformer/EndComponentEliminator.h"
#include "storm/environment/solver/MinMaxSolverEnvironment.h"
#include "storm/environment/solver/LongRunAverageSolverEnvironment.h"
#include "storm/exceptions/InvalidStateException.h"
#include "storm/exceptions/InvalidPropertyException.h"
@ -1208,464 +1207,6 @@ namespace storm {
return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
}
template<typename ValueType>
MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) {
// If there are no goal states, we avoid the computation and directly return zero.
if (psiStates.empty()) {
return std::vector<ValueType>(transitionMatrix.getRowGroupCount(), storm::utility::zero<ValueType>());
}
// Likewise, if all bits are set, we can avoid the computation and set.
if (psiStates.full()) {
return std::vector<ValueType>(transitionMatrix.getRowGroupCount(), storm::utility::one<ValueType>());
}
// Reduce long run average probabilities to long run average rewards by
// building a reward model assigning one reward to every psi state
std::vector<ValueType> stateRewards(psiStates.size(), storm::utility::zero<ValueType>());
storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one<ValueType>());
storm::models::sparse::StandardRewardModel<ValueType> rewardModel(std::move(stateRewards));
return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler);
}
template<typename ValueType>
template<typename RewardModelType>
MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) {
uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
std::unique_ptr<storm::storage::Scheduler<ValueType>> scheduler;
if (produceScheduler) {
scheduler = std::make_unique<storm::storage::Scheduler<ValueType>>(numberOfStates);
}
// Start by decomposing the MDP into its MECs.
storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(transitionMatrix, backwardTransitions);
// Get some data members for convenience.
std::vector<uint_fast64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
ValueType zero = storm::utility::zero<ValueType>();
//first calculate LRA for the Maximal End Components.
storm::storage::BitVector statesInMecs(numberOfStates);
std::vector<uint_fast64_t> stateToMecIndexMap(transitionMatrix.getColumnCount());
std::vector<ValueType> lraValuesForEndComponents(mecDecomposition.size(), zero);
auto underlyingSolverEnvironment = env;
if (env.solver().isForceSoundness()) {
// For sound computations, the error in the MECS plus the error in the remaining system should be less than the user defined precsion.
underlyingSolverEnvironment.solver().lra().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
underlyingSolverEnvironment.solver().minMax().setPrecision(env.solver().lra().getPrecision() / storm::utility::convertNumber<storm::RationalNumber>(2));
underlyingSolverEnvironment.solver().minMax().setRelativeTerminationCriterion(env.solver().lra().getRelativeTerminationCriterion());
}
for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex];
lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler);
// Gather information for later use.
for (auto const& stateChoicesPair : mec) {
statesInMecs.set(stateChoicesPair.first);
stateToMecIndexMap[stateChoicesPair.first] = currentMecIndex;
}
}
// For fast transition rewriting, we build some auxiliary data structures.
storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs;
uint_fast64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits();
uint_fast64_t lastStateNotInMecs = 0;
uint_fast64_t numberOfStatesNotInMecs = 0;
std::vector<uint_fast64_t> statesNotInMecsBeforeIndex;
statesNotInMecsBeforeIndex.reserve(numberOfStates);
for (auto state : statesNotContainedInAnyMec) {
while (lastStateNotInMecs <= state) {
statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs);
++lastStateNotInMecs;
}
++numberOfStatesNotInMecs;
}
// Finally, we are ready to create the SSP matrix and right-hand side of the SSP.
std::vector<ValueType> b;
uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size();
typename storm::storage::SparseMatrixBuilder<ValueType> sspMatrixBuilder(0, numberOfSspStates, 0, false, true, numberOfSspStates);
// If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications).
uint_fast64_t currentChoice = 0;
for (auto state : statesNotContainedInAnyMec) {
sspMatrixBuilder.newRowGroup(currentChoice);
for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) {
std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
b.push_back(storm::utility::zero<ValueType>());
for (auto element : transitionMatrix.getRow(choice)) {
if (statesNotContainedInAnyMec.get(element.getColumn())) {
// If the target state is not contained in an MEC, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
} else {
// If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the MEC into the matrix.
auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
}
}
// Now insert all (cumulative) probability values that target an MEC.
for (uint_fast64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) {
if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[mecIndex])) {
sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]);
}
}
}
}
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecChoicesToOriginalMap; // for scheduler extraction
// Now we are ready to construct the choices for the auxiliary states.
for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
sspMatrixBuilder.newRowGroup(currentChoice);
for (auto const& stateChoicesPair : mec) {
uint_fast64_t state = stateChoicesPair.first;
storm::storage::FlatSet<uint_fast64_t> const& choicesInMec = stateChoicesPair.second;
for (uint_fast64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) {
// If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state.
if (choicesInMec.find(choice) == choicesInMec.end()) {
std::vector<ValueType> auxiliaryStateToProbabilityMap(mecDecomposition.size());
b.push_back(storm::utility::zero<ValueType>());
for (auto element : transitionMatrix.getRow(choice)) {
if (statesNotContainedInAnyMec.get(element.getColumn())) {
// If the target state is not contained in an MEC, we can copy over the entry.
sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue());
} else {
// If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector
// so that we are able to write the cumulative probability to the MEC into the matrix.
auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue();
}
}
// Now insert all (cumulative) probability values that target an MEC.
for (uint_fast64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) {
if (!storm::utility::isZero(auxiliaryStateToProbabilityMap[targetMecIndex])) {
sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]);
}
}
if (produceScheduler) {
sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]);
}
++currentChoice;
}
}
}
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
++currentChoice;
b.push_back(lraValuesForEndComponents[mecIndex]);
if (produceScheduler) {
// Insert some invalid values
sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
}
}
// Finalize the matrix and solve the corresponding system of equations.
storm::storage::SparseMatrix<ValueType> sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates);
// Check for requirements of the solver.
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler);
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
std::vector<ValueType> sspResult(numberOfSspStates);
goal.restrictRelevantValues(statesNotContainedInAnyMec);
std::unique_ptr<storm::solver::MinMaxLinearEquationSolver<ValueType>> solver = storm::solver::configureMinMaxLinearEquationSolver(underlyingSolverEnvironment, std::move(goal), minMaxLinearEquationSolverFactory, sspMatrix);
solver->setLowerBound(storm::utility::zero<ValueType>());
solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end()));
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(produceScheduler);
solver->setRequirementsChecked();
solver->solveEquations(underlyingSolverEnvironment, sspResult, b);
// Prepare result vector.
std::vector<ValueType> result(numberOfStates, zero);
// Set the values for states not contained in MECs.
storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, sspResult);
// Set the values for all states in MECs.
for (auto state : statesInMecs) {
result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]];
}
if (produceScheduler && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
auto const& sspChoices = solver->getSchedulerChoices();
uint64_t sspState = 0;
for (auto state : statesNotContainedInAnyMec) {
scheduler->setChoice(sspChoices[sspState], state);
++sspState;
}
// The other sspStates correspond to MECS in the original system.
uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState];
for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
// Obtain the state and choice of the original model to which the selected choice corresponds.
auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset];
// Check if the best choice is to stay in this MEC
if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
// In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent.
} else {
// The best choice is to leave this MEC via the selected state and choice.
scheduler->setChoice(originalStateChoice.second, originalStateChoice.first);
// The remaining states in this MEC need to reach this state with probability 1.
storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false);
exitStateAsBitVector.set(originalStateChoice.first, true);
storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false);
for (auto const& stateChoices : mecDecomposition[mecIndex]) {
if (stateChoices.first != originalStateChoice.first) {
otherStatesAsBitVector.set(stateChoices.first, true);
}
}
storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler);
}
++sspState;
}
assert(sspState == sspMatrix.getRowGroupCount());
} else {
STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated.");
}
return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
}
template<typename ValueType>
template<typename RewardModelType>
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
// If the mec only consists of a single state, we compute the LRA value directly
if (++mec.begin() == mec.end()) {
uint64_t state = mec.begin()->first;
auto choiceIt = mec.begin()->second.begin();
ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
uint_fast64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
if (storm::solver::minimize(dir)) {
if (result > choiceValue) {
result = std::move(choiceValue);
bestChoice = *choiceIt;
}
} else {
if (result < choiceValue) {
result = std::move(choiceValue);
bestChoice = *choiceIt;
}
}
}
if (scheduler) {
scheduler->setChoice(bestChoice - transitionMatrix.getRowGroupIndices()[state], state);
}
return result;
}
// Solve MEC with the method specified in the settings
storm::solver::LraMethod method = env.solver().lra().getNondetLraMethod();
if ((storm::NumberTraits<ValueType>::IsExact || env.solver().isForceExact()) && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::LinearProgramming) {
STORM_LOG_INFO("Selecting 'LP' as the solution technique for long-run properties to guarantee exact results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::LinearProgramming;
} else if (env.solver().isForceSoundness() && env.solver().lra().isNondetLraMethodSetFromDefault() && method != storm::solver::LraMethod::ValueIteration) {
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::ValueIteration;
}
STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec);
} else if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
}
}
template<typename ValueType>
template<typename RewardModelType>
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
// Initialize data about the mec
storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false);
storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false);
for (auto const& stateChoicesPair : mec) {
mecStates.set(stateChoicesPair.first);
for (auto const& choice : stateChoicesPair.second) {
mecChoices.set(choice);
}
}
boost::container::flat_map<uint64_t, uint64_t> toSubModelStateMapping;
uint64_t currState = 0;
toSubModelStateMapping.reserve(mecStates.getNumberOfSetBits());
for (auto const& mecState : mecStates) {
toSubModelStateMapping.insert(std::pair<uint64_t, uint64_t>(mecState, currState));
++currState;
}
// Get a transition matrix that only considers the states and choices within the MEC
storm::storage::SparseMatrixBuilder<ValueType> mecTransitionBuilder(mecChoices.getNumberOfSetBits(), mecStates.getNumberOfSetBits(), 0, true, true, mecStates.getNumberOfSetBits());
std::vector<ValueType> choiceRewards;
choiceRewards.reserve(mecChoices.getNumberOfSetBits());
uint64_t currRow = 0;
ValueType selfLoopProb = storm::utility::convertNumber<ValueType>(env.solver().lra().getAperiodicFactor());
ValueType scalingFactor = storm::utility::one<ValueType>() - selfLoopProb;
for (auto const& mecState : mecStates) {
mecTransitionBuilder.newRowGroup(currRow);
uint64_t groupStart = transitionMatrix.getRowGroupIndices()[mecState];
uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[mecState + 1];
for (uint64_t choice = mecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = mecChoices.getNextSetIndex(choice + 1)) {
bool insertedDiagElement = false;
for (auto const& entry : transitionMatrix.getRow(choice)) {
uint64_t column = toSubModelStateMapping[entry.getColumn()];
if (!insertedDiagElement && entry.getColumn() > mecState) {
mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
insertedDiagElement = true;
}
if (!insertedDiagElement && entry.getColumn() == mecState) {
mecTransitionBuilder.addNextValue(currRow, column, selfLoopProb + scalingFactor * entry.getValue());
insertedDiagElement = true;
} else {
mecTransitionBuilder.addNextValue(currRow, column, scalingFactor * entry.getValue());
}
}
if (!insertedDiagElement) {
mecTransitionBuilder.addNextValue(currRow, toSubModelStateMapping[mecState], selfLoopProb);
}
// Compute the rewards obtained for this choice
choiceRewards.push_back(scalingFactor * rewardModel.getTotalStateActionReward(mecState, choice, transitionMatrix));
++currRow;
}
}
auto mecTransitions = mecTransitionBuilder.build();
STORM_LOG_ASSERT(mecTransitions.isProbabilistic(), "The MEC-Matrix is not probabilistic.");
// start the iterations
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()) / scalingFactor;
bool relative = env.solver().lra().getRelativeTerminationCriterion();
std::vector<ValueType> x(mecTransitions.getRowGroupCount(), storm::utility::zero<ValueType>());
std::vector<ValueType> xPrime = x;
auto multiplier = storm::solver::MultiplierFactory<ValueType>().create(env, mecTransitions);
ValueType maxDiff, minDiff;
uint64_t iter = 0;
boost::optional<uint64_t> maxIter;
if (env.solver().lra().isMaximalIterationCountSet()) {
maxIter = env.solver().lra().getMaximalIterationCount();
}
while (!maxIter.is_initialized() || iter < maxIter.get()) {
++iter;
// Compute the obtained rewards for the next step
multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x);
// update xPrime and check for convergence
// to avoid large (and numerically unstable) x-values, we substract a reference value.
auto xIt = x.begin();
auto xPrimeIt = xPrime.begin();
ValueType refVal = *xIt;
maxDiff = *xIt - *xPrimeIt;
minDiff = maxDiff;
*xIt -= refVal;
*xPrimeIt = *xIt;
for (++xIt, ++xPrimeIt; xIt != x.end(); ++xIt, ++xPrimeIt) {
ValueType diff = *xIt - *xPrimeIt;
maxDiff = std::max(maxDiff, diff);
minDiff = std::min(minDiff, diff);
*xIt -= refVal;
*xPrimeIt = *xIt;
}
if ((maxDiff - minDiff) <= (relative ? (precision * minDiff) : precision)) {
break;
}
if (storm::utility::resources::isTerminate()) {
break;
}
}
if (maxIter.is_initialized() && iter == maxIter.get()) {
STORM_LOG_WARN("LRA computation did not converge within " << iter << " iterations.");
} else {
STORM_LOG_TRACE("LRA computation converged after " << iter << " iterations.");
}
if (scheduler) {
std::vector<uint_fast64_t> localMecChoices(mecTransitions.getRowGroupCount(), 0);
multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices);
auto localMecChoiceIt = localMecChoices.begin();
for (auto const& mecState : mecStates) {
// Get the choice index of the selected mec choice with respect to the global transition matrix.
uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]);
for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) {
globalChoice = mecChoices.getNextSetIndex(globalChoice + 1);
}
STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state.");
scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState);
++localMecChoiceIt;
}
}
return (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0) * scalingFactor);
}
template<typename ValueType>
template<typename RewardModelType>
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
std::shared_ptr<storm::solver::LpSolver<ValueType>> solver = storm::utility::solver::getLpSolver<ValueType>("LRA for MEC");
solver->setOptimizationDirection(invert(dir));
// First, we need to create the variables for the problem.
std::map<uint_fast64_t, storm::expressions::Variable> stateToVariableMap;
for (auto const& stateChoicesPair : mec) {
std::string variableName = "h" + std::to_string(stateChoicesPair.first);
stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName);
}
storm::expressions::Variable lambda = solver->addUnboundedContinuousVariable("L", 1);
solver->update();
// Now we encode the problem as constraints.
for (auto const& stateChoicesPair : mec) {
uint_fast64_t state = stateChoicesPair.first;
// Now, based on the type of the state, create a suitable constraint.
for (auto choice : stateChoicesPair.second) {
storm::expressions::Expression constraint = -lambda;
for (auto element : transitionMatrix.getRow(choice)) {
constraint = constraint + stateToVariableMap.at(element.getColumn()) * solver->getConstant(element.getValue());
}
typename RewardModelType::ValueType r = rewardModel.getTotalStateActionReward(state, choice, transitionMatrix);
constraint = solver->getConstant(r) + constraint;
if (dir == OptimizationDirection::Minimize) {
constraint = stateToVariableMap.at(state) <= constraint;
} else {
constraint = stateToVariableMap.at(state) >= constraint;
}
solver->addConstraint("state" + std::to_string(state) + "," + std::to_string(choice), constraint);
}
}
solver->optimize();
return solver->getContinuousValue(lambda);
}
template<typename ValueType>
std::unique_ptr<CheckResult> SparseMdpPrctlHelper<ValueType>::computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates) {
@ -1818,10 +1359,6 @@ namespace storm {
template std::vector<double> SparseMdpPrctlHelper<double>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, uint_fast64_t stepBound);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool produceScheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
#ifdef STORM_HAVE_CARL
template class SparseMdpPrctlHelper<storm::RationalNumber>;
@ -1829,10 +1366,6 @@ namespace storm {
template std::vector<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, uint_fast64_t stepBound);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool produceScheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
#endif
}
}

13
src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h

@ -67,24 +67,11 @@ namespace storm {
static std::vector<ValueType> computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::Interval> const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative);
#endif
static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler);
template<typename RewardModelType>
static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler);
static std::unique_ptr<CheckResult> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates);
private:
static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::function<std::vector<ValueType>(uint_fast64_t, storm::storage::SparseMatrix<ValueType> const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function<storm::storage::BitVector()> const& zeroRewardStatesGetter, std::function<storm::storage::BitVector()> const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint());
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
};
}

12
src/storm/models/sparse/Ctmc.cpp

@ -79,6 +79,18 @@ namespace storm {
}
}
template<typename ValueType, typename RewardModelType>
storm::storage::SparseMatrix<ValueType> Ctmc<ValueType, RewardModelType>::computeProbabilityMatrix() const {
// Turn the rates into probabilities by scaling each row with the exit rate of the state.
storm::storage::SparseMatrix<ValueType> result(this->getTransitionMatrix());
for (uint_fast64_t row = 0; row < result.getRowCount(); ++row) {
for (auto& entry : result.getRow(row)) {
entry.setValue(entry.getValue() / exitRates[row]);
}
}
return result;
}
template class Ctmc<double>;
#ifdef STORM_HAVE_CARL

6
src/storm/models/sparse/Ctmc.h

@ -64,6 +64,12 @@ namespace storm {
virtual void reduceToStateBasedRewards() override;
/*!
* @return the probabilistic transition matrix P
* @note getTransitionMatrix() retrieves the exit rate matrix R, where R(s,s') = r(s) * P(s,s')
*/
storm::storage::SparseMatrix<ValueType> computeProbabilityMatrix() const;
private:
/*!
* Computes the exit rate vector based on the given rate matrix.

33
src/storm/models/sparse/StandardRewardModel.cpp

@ -155,24 +155,20 @@ namespace storm {
template<typename ValueType>
template<typename MatrixValueType>
ValueType StandardRewardModel<ValueType>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, MatrixValueType const& stateRewardWeight, MatrixValueType const& actionRewardWeight) const {
ValueType result = this->hasStateRewards() ? (this->hasStateActionRewards() ? (ValueType) (this->getStateReward(stateIndex) * stateRewardWeight + this->getStateActionReward(choiceIndex) * actionRewardWeight)
: (ValueType) (this->getStateReward(stateIndex) * stateRewardWeight))
: (this->hasStateActionRewards() ? (ValueType) (this->getStateActionReward(choiceIndex) * actionRewardWeight)
: storm::utility::zero<ValueType>());
ValueType StandardRewardModel<ValueType>::getStateActionAndTransitionReward(uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix) const {
ValueType result = this->hasStateActionRewards() ? this->getStateActionReward(choiceIndex) : storm::utility::zero<ValueType>();
if (this->hasTransitionRewards()) {
auto rewMatrixEntryIt = this->getTransitionRewardMatrix().begin(choiceIndex);
for (auto const& transitionEntry : transitionMatrix.getRow(choiceIndex)) {
assert(rewMatrixEntryIt != this->getTransitionRewardMatrix().end(choiceIndex));
if (transitionEntry.getColumn() < rewMatrixEntryIt->getColumn()) {
continue;
} else {
// We assume that the transition reward matrix is a submatrix of the given transition matrix. Hence, the following must hold
assert(transitionEntry.getColumn() == rewMatrixEntryIt->getColumn());
result += actionRewardWeight * rewMatrixEntryIt->getValue() * storm::utility::convertNumber<ValueType>(transitionEntry.getValue());
++rewMatrixEntryIt;
}
}
result += transitionMatrix.getPointwiseProductRowSum(getTransitionRewardMatrix(), choiceIndex);
}
return result;
}
template<typename ValueType>
template<typename MatrixValueType>
ValueType StandardRewardModel<ValueType>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, MatrixValueType const& stateRewardWeight, MatrixValueType const& actionRewardWeight) const {
ValueType result = actionRewardWeight * getStateActionAndTransitionReward(choiceIndex, transitionMatrix);
if (this->hasStateRewards()) {
result += stateRewardWeight * this->getStateReward(stateIndex);
}
return result;
}
@ -453,6 +449,7 @@ namespace storm {
template storm::storage::BitVector StandardRewardModel<double>::getStatesWithFilter(storm::storage::SparseMatrix<double> const& transitionMatrix, std::function<bool(double const&)> const& filter) const;
template storm::storage::BitVector StandardRewardModel<double>::getChoicesWithZeroReward(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
template storm::storage::BitVector StandardRewardModel<double>::getChoicesWithFilter(storm::storage::SparseMatrix<double> const& transitionMatrix, std::function<bool(double const&)> const& filter) const;
template double StandardRewardModel<double>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<double> const& transitionMatrix) const;
template double StandardRewardModel<double>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<double> const& transitionMatrix, double const& stateRewardWeight, double const& actionRewardWeight) const;
template void StandardRewardModel<double>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<double> const& transitionMatrix);
template void StandardRewardModel<double>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards, std::vector<double> const* weights);
@ -480,6 +477,7 @@ namespace storm {
template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getStatesWithFilter(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::function<bool(storm::RationalNumber const&)> const& filter) const;
template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getChoicesWithZeroReward(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getChoicesWithFilter(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::function<bool(storm::RationalNumber const&)> const& filter) const;
template storm::RationalNumber StandardRewardModel<storm::RationalNumber>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
template storm::RationalNumber StandardRewardModel<storm::RationalNumber>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::RationalNumber const& stateRewardWeight, storm::RationalNumber const& actionRewardWeight) const;
template void StandardRewardModel<storm::RationalNumber>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalNumber> const* weights);
template void StandardRewardModel<storm::RationalNumber>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix);
@ -497,6 +495,7 @@ namespace storm {
template storm::storage::BitVector StandardRewardModel<storm::RationalFunction>::getChoicesWithFilter(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, std::function<bool(storm::RationalFunction const&)> const& filter) const;
template void StandardRewardModel<storm::RationalFunction>::clearRewardAtState(uint_fast64_t state, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix);
template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalActionRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, std::vector<storm::RationalFunction> const& stateRewardWeights) const;
template storm::RationalFunction StandardRewardModel<storm::RationalFunction>::getStateActionAndTransitionReward(uint_fast64_t stateIndex, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix) const;
template storm::RationalFunction StandardRewardModel<storm::RationalFunction>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, storm::RationalFunction const& stateRewardWeight, storm::RationalFunction const& actionRewardWeight) const;
template void StandardRewardModel<storm::RationalFunction>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalFunction> const* weights);
template void StandardRewardModel<storm::RationalFunction>::setStateActionReward(uint_fast64_t choiceIndex, storm::RationalFunction const & newValue);

9
src/storm/models/sparse/StandardRewardModel.h

@ -167,6 +167,15 @@ namespace storm {
*/
boost::optional<storm::storage::SparseMatrix<ValueType>> const& getOptionalTransitionRewardMatrix() const;
/*!
* @param choiceIndex The index of the considered choice
* @param transitionMatrix The matrix that is used to weight the values of the transition reward matrix.
* @return the sum of the action reward and the weighted transition rewards for the given choice, excluding potential state rewards
* @note returns zero if there is neither action nor transition reward.
*/
template<typename MatrixValueType>
ValueType getStateActionAndTransitionReward(uint_fast64_t choiceIndex, storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix) const;
/*!
* Retrieves the total reward for the given state action pair (including (scaled) state rewards, action rewards and transition rewards
*

4
src/storm/models/symbolic/Ctmc.cpp

@ -94,6 +94,10 @@ namespace storm {
}
}
template<storm::dd::DdType Type, typename ValueType>
storm::dd::Add<Type, ValueType> Ctmc<Type, ValueType>::computeProbabilityMatrix() const {
return this->getTransitionMatrix() / this->getExitRateVector();
}
template<storm::dd::DdType Type, typename ValueType>
template<typename NewValueType>

6
src/storm/models/symbolic/Ctmc.h

@ -142,6 +142,12 @@ namespace storm {
storm::dd::Add<Type, ValueType> const& getExitRateVector() const;
virtual void reduceToStateBasedRewards() override;
/*!
* @return the probabilistic transition matrix P
* @note getTransitionMatrix() retrieves the exit rate matrix R, where R(s,s') = r(s) * P(s,s')
*/
storm::dd::Add<Type, ValueType> computeProbabilityMatrix() const;
template<typename NewValueType>
std::shared_ptr<Ctmc<Type, NewValueType>> toValueType() const;

12
src/storm/solver/GmmxxMultiplier.cpp

@ -120,7 +120,11 @@ namespace storm {
template<typename ValueType>
void GmmxxMultiplier<ValueType>::multAdd(std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
if (b) {
gmm::mult_add(gmmMatrix, x, *b, result);
if (b == &result) {
gmm::mult_add(gmmMatrix, x, result);
} else {
gmm::mult_add(gmmMatrix, x, *b, result);
}
} else {
gmm::mult(gmmMatrix, x, result);
}
@ -257,7 +261,11 @@ namespace storm {
void GmmxxMultiplier<ValueType>::multAddParallel(std::vector<ValueType> const& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
#ifdef STORM_HAVE_INTELTBB
if (b) {
gmm::mult_add_parallel(gmmMatrix, x, *b, result);
if (b == &result) {
gmm::mult_add_parallel(gmmMatrix, x, result);
} else {
gmm::mult_add_parallel(gmmMatrix, x, *b, result);
}
} else {
gmm::mult_parallel(gmmMatrix, x, result);
}

4
src/storm/storage/MaximalEndComponent.cpp

@ -36,6 +36,10 @@ namespace storm {
stateToChoicesMapping.emplace(state, std::move(choices));
}
std::size_t MaximalEndComponent::size() const {
return stateToChoicesMapping.size();
}
MaximalEndComponent::set_type const& MaximalEndComponent::getChoicesForState(uint_fast64_t state) const {
auto stateChoicePair = stateToChoicesMapping.find(state);

5
src/storm/storage/MaximalEndComponent.h

@ -68,6 +68,11 @@ namespace storm {
*/
void addState(uint_fast64_t state, set_type&& choices);
/*!
* @return The number of states in this mec.
*/
std::size_t size() const;
/*!
* Retrieves the choices for the given state that are contained in this MEC under the assumption that the
* state is in the MEC.

86
src/storm/storage/SparseMatrix.cpp

@ -123,22 +123,40 @@ namespace storm {
void SparseMatrixBuilder<ValueType>::addNextValue(index_type row, index_type column, ValueType const& value) {
// Check that we did not move backwards wrt. the row.
STORM_LOG_THROW(row >= lastRow, storm::exceptions::InvalidArgumentException, "Adding an element in row " << row << ", but an element in row " << lastRow << " has already been added.");
STORM_LOG_ASSERT(columnsAndValues.size() == currentEntryCount, "Unexpected size of columnsAndValues vector.");
// Check if a diagonal entry shall be inserted before
if (pendingDiagonalEntry) {
index_type diagColumn = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
if (row > lastRow || column >= diagColumn) {
ValueType diagValue = std::move(pendingDiagonalEntry.get());
pendingDiagonalEntry = boost::none;
// Add the pending diagonal value now
if (row == lastRow && column == diagColumn) {
// The currently added value coincides with the diagonal entry!
// We add up the values and repeat this call.
addNextValue(row, column, diagValue + value);
// We return here because the above call already did all the work.
return;
} else {
addNextValue(lastRow, diagColumn, diagValue);
}
}
}
// If the element is in the same row, but was not inserted in the correct order, we need to fix the row after
// the insertion.
bool fixCurrentRow = row == lastRow && column < lastColumn;
// If the element is in the same row and column as the previous entry, we add them up.
if (row == lastRow && column == lastColumn && !columnsAndValues.empty()) {
// If the element is in the same row and column as the previous entry, we add them up...
// unless there is no entry in this row yet, which might happen either for the very first entry or when only a diagonal value has been added
if (row == lastRow && column == lastColumn && rowIndications.back() < currentEntryCount) {
columnsAndValues.back().setValue(columnsAndValues.back().getValue() + value);
} else {
// If we switched to another row, we have to adjust the missing entries in the row indices vector.
if (row != lastRow) {
// Otherwise, we need to push the correct values to the vectors, which might trigger reallocations.
for (index_type i = lastRow + 1; i <= row; ++i) {
rowIndications.push_back(currentEntryCount);
}
assert(rowIndications.size() == lastRow + 1);
rowIndications.resize(row + 1, currentEntryCount);
lastRow = row;
}
@ -183,15 +201,24 @@ namespace storm {
void SparseMatrixBuilder<ValueType>::newRowGroup(index_type startingRow) {
STORM_LOG_THROW(hasCustomRowGrouping, storm::exceptions::InvalidStateException, "Matrix was not created to have a custom row grouping.");
STORM_LOG_THROW(startingRow >= lastRow, storm::exceptions::InvalidStateException, "Illegal row group with negative size.");
rowGroupIndices.get().push_back(startingRow);
++currentRowGroupCount;
// Close all rows from the most recent one to the starting row.
for (index_type i = lastRow + 1; i < startingRow; ++i) {
rowIndications.push_back(currentEntryCount);
// If there still is a pending diagonal entry, we need to add it now (otherwise, the correct diagonal column will be unclear)
if (pendingDiagonalEntry) {
STORM_LOG_ASSERT(currentRowGroupCount > 0, "Diagonal entry was set before opening the first row group.");
index_type diagColumn = currentRowGroupCount - 1;
ValueType diagValue = std::move(pendingDiagonalEntry.get());
pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
addNextValue(lastRow, diagColumn, diagValue);
}
rowGroupIndices.get().push_back(startingRow);
++currentRowGroupCount;
// Handle the case where the previous row group ends with one or more empty rows
if (lastRow + 1 < startingRow) {
// Close all rows from the most recent one to the starting row.
assert(rowIndications.size() == lastRow + 1);
rowIndications.resize(startingRow, currentEntryCount);
// Reset the most recently seen row/column to allow for proper insertion of the following elements.
lastRow = startingRow - 1;
lastColumn = 0;
@ -201,6 +228,14 @@ namespace storm {
template<typename ValueType>
SparseMatrix<ValueType> SparseMatrixBuilder<ValueType>::build(index_type overriddenRowCount, index_type overriddenColumnCount, index_type overriddenRowGroupCount) {
// If there still is a pending diagonal entry, we need to add it now
if (pendingDiagonalEntry) {
index_type diagColumn = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
ValueType diagValue = std::move(pendingDiagonalEntry.get());
pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
addNextValue(lastRow, diagColumn, diagValue);
}
bool hasEntries = currentEntryCount != 0;
uint_fast64_t rowCount = hasEntries ? lastRow + 1 : 0;
@ -332,9 +367,34 @@ namespace storm {
}
highestColumn = maxColumn;
lastColumn = columnsAndValues.empty() ? 0 : columnsAndValues[columnsAndValues.size() - 1].getColumn();
lastColumn = columnsAndValues.empty() ? 0 : columnsAndValues.back().getColumn();
}
template<typename ValueType>
void SparseMatrixBuilder<ValueType>::addDiagonalEntry(index_type row, ValueType const& value) {
STORM_LOG_THROW(row >= lastRow, storm::exceptions::InvalidArgumentException, "Adding a diagonal element in row " << row << ", but an element in row " << lastRow << " has already been added.");
if (pendingDiagonalEntry) {
if (row == lastRow) {
// Add the two diagonal entries, nothing else to be done.
pendingDiagonalEntry.get() += value;
return;
} else {
// add the pending entry
index_type column = hasCustomRowGrouping ? currentRowGroupCount - 1 : lastRow;
ValueType diagValue = std::move(pendingDiagonalEntry.get());
pendingDiagonalEntry = boost::none; // clear now, so addNextValue works properly
addNextValue(lastRow, column, diagValue);
}
}
pendingDiagonalEntry = value;
if (lastRow != row) {
assert(rowIndications.size() == lastRow + 1);
rowIndications.resize(row + 1, currentEntryCount);
lastRow = row;
lastColumn = 0;
}
}
template<typename ValueType>
SparseMatrix<ValueType>::rows::rows(iterator begin, index_type entryCount) : beginIterator(begin), entryCount(entryCount) {
// Intentionally left empty.

13
src/storm/storage/SparseMatrix.h

@ -243,7 +243,16 @@ namespace storm {
* @param offset Offset to add to each id in vector index.
*/
void replaceColumns(std::vector<index_type> const& replacements, index_type offset);
/*!
* Makes sure that a diagonal entry will be inserted at the given row.
* All other entries of this row must be set immediately after calling this (without setting values at other rows in between)
* The provided row must not be smaller than the row of the most recent insertion.
* If there is a row grouping, the column of the diagonal entry will correspond to the current row group.
* If addNextValue is called on the given row and the diagonal column, we take the sum of the two values provided to addDiagonalEntry and addNextValue
*/
void addDiagonalEntry(index_type row, ValueType const& value);
private:
// A flag indicating whether a row count was set upon construction.
bool initialRowCountSet;
@ -305,6 +314,8 @@ namespace storm {
// Stores the currently active row group. This is used for correctly constructing the row grouping of the
// matrix.
index_type currentRowGroupCount;
boost::optional<ValueType> pendingDiagonalEntry;
};
/*!

1
src/storm/storage/dd/DdType.h

@ -4,6 +4,7 @@
namespace storm {
namespace dd {
enum class DdType {
None,
CUDD,
Sylvan
};

5
src/storm/utility/vector.h

@ -63,6 +63,7 @@ namespace storm {
*/
template<class T>
void setVectorValues(std::vector<T>& vector, storm::storage::BitVector const& positions, std::vector<T> const& values) {
STORM_LOG_ASSERT(positions.getNumberOfSetBits() <= values.size(), "The number of selected positions (" << positions.getNumberOfSetBits() << ") exceeds the size of the input vector (" << values.size() << ").");
uint_fast64_t oldPosition = 0;
for (auto position : positions) {
vector[position] = values[oldPosition++];
@ -156,6 +157,8 @@ namespace storm {
*/
template<class T>
void selectVectorValues(std::vector<T>& vector, storm::storage::BitVector const& positions, std::vector<T> const& values) {
STORM_LOG_ASSERT(positions.getNumberOfSetBits() <= vector.size(), "The number of selected positions (" << positions.getNumberOfSetBits() << ") exceeds the size of the target vector (" << vector.size() << ").");
STORM_LOG_ASSERT(positions.size() == values.size(), "Size mismatch of the positions vector (" << positions.size() << ") and the values vector (" << values.size() << ").");
auto targetIt = vector.begin();
for (auto position : positions) {
*targetIt = values[position];
@ -207,6 +210,8 @@ namespace storm {
*/
template<class T>
void selectVectorValues(std::vector<T>& vector, std::vector<uint_fast64_t> const& indexSequence, std::vector<T> const& values) {
STORM_LOG_ASSERT(indexSequence.size() <= vector.size(), "The number of selected positions (" << indexSequence.size() << ") exceeds the size of the target vector (" << vector.size() << ").");
for (uint_fast64_t vectorIndex = 0; vectorIndex < vector.size(); ++vectorIndex){
vector[vectorIndex] = values[indexSequence[vectorIndex]];
}

7
src/test/storm/modelchecker/csl/LraCtmcCslModelCheckerTest.cpp

@ -43,6 +43,7 @@ namespace {
env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
return env;
@ -61,6 +62,7 @@ namespace {
env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
return env;
@ -78,6 +80,8 @@ namespace {
storm::Environment env;
env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
return env;
@ -95,6 +99,8 @@ namespace {
storm::Environment env;
env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
env.solver().lra().setDetLraMethod(storm::solver::LraMethod::GainBiasEquations);
env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
return env;
@ -164,6 +170,7 @@ namespace {
env.solver().setLinearEquationSolverType(storm::solver::EquationSolverType::Gmmxx);
env.solver().gmmxx().setMethod(storm::solver::GmmxxLinearEquationSolverMethod::Gmres);
env.solver().gmmxx().setPreconditioner(storm::solver::GmmxxLinearEquationSolverPreconditioner::Ilu);
env.solver().gmmxx().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
env.solver().lra().setDetLraMethod(storm::solver::LraMethod::LraDistributionEquations);
env.solver().lra().setPrecision(storm::utility::convertNumber<storm::RationalNumber>(1e-8)); // Need to increase precision because eq sys yields incorrect results
return env;

2
src/test/storm/modelchecker/csl/MarkovAutomatonCslModelCheckerTest.cpp

@ -341,7 +341,7 @@ namespace {
EXPECT_NEAR(this->parseNumber("0"), this->getQuantitativeResultAtInitialState(model, result), this->precision());
result = checker->check(this->env(), tasks[8]);
EXPECT_NEAR(this->parseNumber("407"), this->getQuantitativeResultAtInitialState(model, result), this->precision());
EXPECT_NEAR(this->parseNumber("407"), this->getQuantitativeResultAtInitialState(model, result), this->precision() * this->parseNumber("407")); // use relative precision!
result = checker->check(this->env(), tasks[9]);
EXPECT_NEAR(this->parseNumber("27"), this->getQuantitativeResultAtInitialState(model, result), this->precision());

73
src/test/storm/storage/SparseMatrixTest.cpp

@ -148,6 +148,79 @@ TEST(SparseMatrix, Build) {
ASSERT_EQ(5ul, matrix5.getEntryCount());
}
TEST(SparseMatrix, DiagonalEntries) {
{
// No row groupings
storm::storage::SparseMatrixBuilder<double> builder(4, 4, 7);
storm::storage::SparseMatrixBuilder<double> builderCmp(4, 4, 7);
for (uint64_t i = 0; i < 4; ++i) {
ASSERT_NO_THROW(builder.addDiagonalEntry(i, i));
ASSERT_NO_THROW(builder.addNextValue(i, 2, 100.0 + i));
if (i < 2) {
ASSERT_NO_THROW(builderCmp.addNextValue(i, i, i));
ASSERT_NO_THROW(builderCmp.addNextValue(i, 2, 100.0 + i));
} else {
ASSERT_NO_THROW(builderCmp.addNextValue(i, 2, 100.0 + i));
ASSERT_NO_THROW(builderCmp.addNextValue(i, i, i));
}
}
auto matrix = builder.build();
auto matrixCmp = builderCmp.build();
EXPECT_EQ(matrix, matrixCmp);
}
{
// With row groupings (each row group has 3 rows)
storm::storage::SparseMatrixBuilder<double> builder(12, 4, 21, true, true, 4);
storm::storage::SparseMatrixBuilder<double> builderCmp(12, 4, 21, true, true, 4);
for (uint64_t i = 0; i < 4; ++i) {
uint64_t row = 3*i;
builder.newRowGroup(row);
builderCmp.newRowGroup(row);
for (; row < 3*(i+1); ++row) {
ASSERT_NO_THROW(builder.addDiagonalEntry(row, row));
ASSERT_NO_THROW(builder.addNextValue(row, 2, 100 + row));
if (i < 2) {
ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
} else {
ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
}
}
}
auto matrix = builder.build();
auto matrixCmp = builderCmp.build();
EXPECT_EQ(matrix, matrixCmp);
}
{
// With row groupings (every second row is empty)
storm::storage::SparseMatrixBuilder<double> builder(12, 4, 10, true, true, 4);
storm::storage::SparseMatrixBuilder<double> builderCmp(12, 4, 10, true, true, 4);
for (uint64_t i = 0; i < 4; ++i) {
uint64_t row = 3*i;
builder.newRowGroup(row);
builderCmp.newRowGroup(row);
for (; row < 3*(i+1); ++row) {
if (row % 2 == 1) {
continue;
}
ASSERT_NO_THROW(builder.addDiagonalEntry(row, row));
ASSERT_NO_THROW(builder.addNextValue(row, 2, 100 + row));
if (i < 2) {
ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
} else {
ASSERT_NO_THROW(builderCmp.addNextValue(row, i, row));
ASSERT_NO_THROW(builderCmp.addNextValue(row, 2, 100.0 + row));
}
}
}
auto matrix = builder.build();
auto matrixCmp = builderCmp.build();
EXPECT_EQ(matrix, matrixCmp);
}
}
TEST(SparseMatrix, CreationWithMovingContents) {
std::vector<storm::storage::MatrixEntry<uint_fast64_t, double>> columnsAndValues;
columnsAndValues.emplace_back(1, 1.0);

|||||||
100:0
Loading…
Cancel
Save