Browse Source

Merge branch 'lra-strategies'

tempestpy_adaptions
Tim Quatmann 5 years ago
parent
commit
d4ee19c350
  1. 17
      src/storm-cli-utilities/model-handling.h
  2. 10
      src/storm/api/export.h
  3. 16
      src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp
  4. 115
      src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
  5. 8
      src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h
  6. 10
      src/storm/settings/modules/IOSettings.cpp
  7. 11
      src/storm/settings/modules/IOSettings.h
  8. 10
      src/storm/storage/Scheduler.cpp

17
src/storm-cli-utilities/model-handling.h

@ -748,10 +748,14 @@ namespace storm {
template <typename ValueType>
void verifyWithSparseEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
auto sparseModel = model->as<storm::models::sparse::Model<ValueType>>();
auto const& ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
verifyProperties<ValueType>(input,
[&sparseModel] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
[&sparseModel,&ioSettings] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
bool filterForInitialStates = states->isInitialFormula();
auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
if (ioSettings.isExportSchedulerSet()) {
task.setProduceSchedulers(true);
}
std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithSparseEngine<ValueType>(sparseModel, task);
std::unique_ptr<storm::modelchecker::CheckResult> filter;
@ -764,6 +768,17 @@ namespace storm {
result->filter(filter->asQualitativeCheckResult());
}
return result;
},
[&sparseModel,&ioSettings] (std::unique_ptr<storm::modelchecker::CheckResult> const& result) {
if (ioSettings.isExportSchedulerSet()) {
if (result->template asExplicitQuantitativeCheckResult<ValueType>().hasScheduler()) {
auto const& scheduler = result->template asExplicitQuantitativeCheckResult<ValueType>().getScheduler();
STORM_PRINT_AND_LOG("Exporting scheduler ... ")
storm::api::exportScheduler(sparseModel, scheduler, ioSettings.getExportSchedulerFilename());
} else {
STORM_LOG_ERROR("Scheduler requested but could not be generated.");
}
}
});
}

10
src/storm/api/export.h

@ -6,6 +6,7 @@
#include "storm/utility/DDEncodingExporter.h"
#include "storm/utility/file.h"
#include "storm/utility/macros.h"
#include "storm/storage/Scheduler.h"
namespace storm {
@ -42,5 +43,14 @@ namespace storm {
void exportSymbolicModelAsDot(std::shared_ptr<storm::models::symbolic::Model<Type,ValueType>> const& model, std::string const& filename) {
model->writeDotToFile(filename);
}
template <typename ValueType>
void exportScheduler(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, storm::storage::Scheduler<ValueType> const& scheduler, std::string const& filename) {
std::ofstream stream;
storm::utility::openFile(filename, stream);
scheduler.printToStream(stream, model);
storm::utility::closeFile(stream);
}
}
}

16
src/storm/modelchecker/prctl/SparseMdpPrctlModelChecker.cpp

@ -215,16 +215,24 @@ namespace storm {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
std::unique_ptr<CheckResult> subResultPointer = this->check(env, stateFormula);
ExplicitQualitativeCheckResult const& subResult = subResultPointer->asExplicitQualitativeCheckResult();
std::vector<ValueType> numericResult = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(numericResult)));
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), subResult.getTruthValuesVector(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
}
return result;
}
template<typename SparseMdpModelType>
std::unique_ptr<CheckResult> SparseMdpPrctlModelChecker<SparseMdpModelType>::computeLongRunAverageRewards(Environment const& env, storm::logic::RewardMeasureType rewardMeasureType, CheckTask<storm::logic::LongRunAverageRewardFormula, ValueType> const& checkTask) {
STORM_LOG_THROW(checkTask.isOptimizationDirectionSet(), storm::exceptions::InvalidPropertyException, "Formula needs to specify whether minimal or maximal values are to be computed on nondeterministic model.");
auto rewardModel = storm::utility::createFilteredRewardModel(this->getModel(), checkTask);
std::vector<ValueType> result = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get());
return std::unique_ptr<CheckResult>(new ExplicitQuantitativeCheckResult<ValueType>(std::move(result)));
auto ret = storm::modelchecker::helper::SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(env, storm::solver::SolveGoal<ValueType>(this->getModel(), checkTask), this->getModel().getTransitionMatrix(), this->getModel().getBackwardTransitions(), rewardModel.get(), checkTask.isProduceSchedulersSet());
std::unique_ptr<CheckResult> result(new ExplicitQuantitativeCheckResult<ValueType>(std::move(ret.values)));
if (checkTask.isProduceSchedulersSet() && ret.scheduler) {
result->asExplicitQuantitativeCheckResult<ValueType>().setScheduler(std::move(ret.scheduler));
}
return result;
}
template<typename SparseMdpModelType>

115
src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp

@ -1206,7 +1206,7 @@ namespace storm {
}
template<typename ValueType>
std::vector<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates) {
MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler) {
// If there are no goal states, we avoid the computation and directly return zero.
if (psiStates.empty()) {
@ -1223,15 +1223,20 @@ namespace storm {
std::vector<ValueType> stateRewards(psiStates.size(), storm::utility::zero<ValueType>());
storm::utility::vector::setVectorValues(stateRewards, psiStates, storm::utility::one<ValueType>());
storm::models::sparse::StandardRewardModel<ValueType> rewardModel(std::move(stateRewards));
return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel);
return computeLongRunAverageRewards(env, std::move(goal), transitionMatrix, backwardTransitions, rewardModel, produceScheduler);
}
template<typename ValueType>
template<typename RewardModelType>
std::vector<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel) {
MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler) {
uint64_t numberOfStates = transitionMatrix.getRowGroupCount();
std::unique_ptr<storm::storage::Scheduler<ValueType>> scheduler;
if (produceScheduler) {
scheduler = std::make_unique<storm::storage::Scheduler<ValueType>>(numberOfStates);
}
// Start by decomposing the MDP into its MECs.
storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(transitionMatrix, backwardTransitions);
@ -1253,7 +1258,7 @@ namespace storm {
for (uint_fast64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex];
lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec);
lraValuesForEndComponents[currentMecIndex] = computeLraForMaximalEndComponent(underlyingSolverEnvironment, goal.direction(), transitionMatrix, rewardModel, mec, scheduler);
// Gather information for later use.
for (auto const& stateChoicesPair : mec) {
@ -1312,6 +1317,8 @@ namespace storm {
}
}
std::vector<std::pair<uint_fast64_t, uint_fast64_t>> sspMecChoicesToOriginalMap; // for scheduler extraction
// Now we are ready to construct the choices for the auxiliary states.
for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex];
@ -1345,6 +1352,9 @@ namespace storm {
}
}
if (produceScheduler) {
sspMecChoicesToOriginalMap.emplace_back(state, choice - nondeterministicChoiceIndices[state]);
}
++currentChoice;
}
}
@ -1353,6 +1363,10 @@ namespace storm {
// For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC.
++currentChoice;
b.push_back(lraValuesForEndComponents[mecIndex]);
if (produceScheduler) {
// Insert some invalid values
sspMecChoicesToOriginalMap.emplace_back(std::numeric_limits<uint_fast64_t>::max(), std::numeric_limits<uint_fast64_t>::max());
}
}
// Finalize the matrix and solve the corresponding system of equations.
@ -1360,7 +1374,7 @@ namespace storm {
// Check for requirements of the solver.
storm::solver::GeneralMinMaxLinearEquationSolverFactory<ValueType> minMaxLinearEquationSolverFactory;
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction());
storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(underlyingSolverEnvironment, true, true, goal.direction(), false, produceScheduler);
requirements.clearBounds();
STORM_LOG_THROW(!requirements.hasEnabledCriticalRequirement(), storm::exceptions::UncheckedRequirementException, "Solver requirements " + requirements.getEnabledRequirementsAsString() + " not checked.");
@ -1372,6 +1386,7 @@ namespace storm {
solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end()));
solver->setHasUniqueSolution();
solver->setHasNoEndComponents();
solver->setTrackScheduler(produceScheduler);
solver->setRequirementsChecked();
solver->solveEquations(underlyingSolverEnvironment, sspResult, b);
@ -1386,25 +1401,74 @@ namespace storm {
result[state] = sspResult[firstAuxiliaryStateIndex + stateToMecIndexMap[state]];
}
return result;
if (produceScheduler && solver->hasScheduler()) {
// Translate result for ssp matrix to original model
auto const& sspChoices = solver->getSchedulerChoices();
uint64_t sspState = 0;
for (auto state : statesNotContainedInAnyMec) {
scheduler->setChoice(sspChoices[sspState], state);
++sspState;
}
// The other sspStates correspond to MECS in the original system.
uint_fast64_t rowOffset = sspMatrix.getRowGroupIndices()[sspState];
for (uint_fast64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) {
// Obtain the state and choice of the original model to which the selected choice corresponds.
auto const& originalStateChoice = sspMecChoicesToOriginalMap[sspMatrix.getRowGroupIndices()[sspState] + sspChoices[sspState] - rowOffset];
// Check if the best choice is to stay in this MEC
if (originalStateChoice.first == std::numeric_limits<uint_fast64_t>::max()) {
STORM_LOG_ASSERT(sspMatrix.getRow(sspState, sspChoices[sspState]).getNumberOfEntries() == 0, "Expected empty row at choice that stays in MEC.");
// In this case, no further operations are necessary. The scheduler has already been set to the optimal choices during the call of computeLraForMaximalEndComponent.
} else {
// The best choice is to leave this MEC via the selected state and choice.
scheduler->setChoice(originalStateChoice.second, originalStateChoice.first);
// The remaining states in this MEC need to reach this state with probability 1.
storm::storage::BitVector exitStateAsBitVector(transitionMatrix.getRowGroupCount(), false);
exitStateAsBitVector.set(originalStateChoice.first, true);
storm::storage::BitVector otherStatesAsBitVector(transitionMatrix.getRowGroupCount(), false);
for (auto const& stateChoices : mecDecomposition[mecIndex]) {
if (stateChoices.first != originalStateChoice.first) {
otherStatesAsBitVector.set(stateChoices.first, true);
}
}
storm::utility::graph::computeSchedulerProb1E(otherStatesAsBitVector, transitionMatrix, backwardTransitions, otherStatesAsBitVector, exitStateAsBitVector, *scheduler);
}
++sspState;
}
assert(sspState == sspMatrix.getRowGroupCount());
} else {
STORM_LOG_ERROR_COND(!produceScheduler, "Requested to produce a scheduler, but no scheduler was generated.");
}
return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
}
template<typename ValueType>
template<typename RewardModelType>
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
// If the mec only consists of a single state, we compute the LRA value directly
if (++mec.begin() == mec.end()) {
uint64_t state = mec.begin()->first;
auto choiceIt = mec.begin()->second.begin();
ValueType result = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
uint_fast64_t bestChoice = *choiceIt;
for (++choiceIt; choiceIt != mec.begin()->second.end(); ++choiceIt) {
ValueType choiceValue = rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix);
if (storm::solver::minimize(dir)) {
result = std::min(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix));
if (result > choiceValue) {
result = std::move(choiceValue);
bestChoice = *choiceIt;
}
} else {
result = std::max(result, rewardModel.getTotalStateActionReward(state, *choiceIt, transitionMatrix));
if (result < choiceValue) {
result = std::move(choiceValue);
bestChoice = *choiceIt;
}
}
}
if (scheduler) {
scheduler->setChoice(bestChoice - transitionMatrix.getRowGroupIndices()[state], state);
}
return result;
}
@ -1417,10 +1481,11 @@ namespace storm {
STORM_LOG_INFO("Selecting 'VI' as the solution technique for long-run properties to guarantee sound results. If you want to override this, please explicitly specify a different LRA method.");
method = storm::solver::LraMethod::ValueIteration;
}
STORM_LOG_ERROR_COND(scheduler == nullptr || method == storm::solver::LraMethod::ValueIteration, "Scheduler generation not supported for the chosen LRA method. Try value-iteration.");
if (method == storm::solver::LraMethod::LinearProgramming) {
return computeLraForMaximalEndComponentLP(env, dir, transitionMatrix, rewardModel, mec);
} else if (method == storm::solver::LraMethod::ValueIteration) {
return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec);
return computeLraForMaximalEndComponentVI(env, dir, transitionMatrix, rewardModel, mec, scheduler);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique.");
}
@ -1428,7 +1493,7 @@ namespace storm {
template<typename ValueType>
template<typename RewardModelType>
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) {
ValueType SparseMdpPrctlHelper<ValueType>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler) {
// Initialize data about the mec
storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false);
@ -1520,6 +1585,22 @@ namespace storm {
break;
}
}
if (scheduler) {
std::vector<uint_fast64_t> localMecChoices(mecTransitions.getRowGroupCount(), 0);
multiplier->multiplyAndReduce(env, dir, x, &choiceRewards, x, &localMecChoices);
auto localMecChoiceIt = localMecChoices.begin();
for (auto const& mecState : mecStates) {
// Get the choice index of the selected mec choice with respect to the global transition matrix.
uint_fast64_t globalChoice = mecChoices.getNextSetIndex(transitionMatrix.getRowGroupIndices()[mecState]);
for (uint_fast64_t i = 0; i < *localMecChoiceIt; ++i) {
globalChoice = mecChoices.getNextSetIndex(globalChoice + 1);
}
STORM_LOG_ASSERT(globalChoice < transitionMatrix.getRowGroupIndices()[mecState + 1], "Invalid global choice for mec state.");
scheduler->setChoice(globalChoice - transitionMatrix.getRowGroupIndices()[mecState], mecState);
++localMecChoiceIt;
}
}
return (maxDiff + minDiff) / (storm::utility::convertNumber<ValueType>(2.0) * scalingFactor);
}
@ -1717,9 +1798,9 @@ namespace storm {
template std::vector<double> SparseMdpPrctlHelper<double>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, uint_fast64_t stepBound);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template std::vector<double> SparseMdpPrctlHelper<double>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template MDPSparseModelCheckingHelperReturnType<double> SparseMdpPrctlHelper<double>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<double>&& goal, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::SparseMatrix<double> const& backwardTransitions, storm::models::sparse::StandardRewardModel<double> const& rewardModel, bool produceScheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<double>>& scheduler);
template double SparseMdpPrctlHelper<double>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::models::sparse::StandardRewardModel<double> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
#ifdef STORM_HAVE_CARL
@ -1728,9 +1809,9 @@ namespace storm {
template std::vector<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeCumulativeRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, uint_fast64_t stepBound);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeTotalRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool qualitative, bool produceScheduler, ModelCheckerHint const& hint);
template std::vector<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
template MDPSparseModelCheckingHelperReturnType<storm::RationalNumber> SparseMdpPrctlHelper<storm::RationalNumber>::computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<storm::RationalNumber>&& goal, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::SparseMatrix<storm::RationalNumber> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, bool produceScheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<storm::RationalNumber>>& scheduler);
template storm::RationalNumber SparseMdpPrctlHelper<storm::RationalNumber>::computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::models::sparse::StandardRewardModel<storm::RationalNumber> const& rewardModel, storm::storage::MaximalEndComponent const& mec);
#endif
}

8
src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h

@ -67,11 +67,11 @@ namespace storm {
static std::vector<ValueType> computeReachabilityRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::models::sparse::StandardRewardModel<storm::Interval> const& intervalRewardModel, bool lowerBoundOfIntervals, storm::storage::BitVector const& targetStates, bool qualitative);
#endif
static std::vector<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates);
static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& psiStates, bool produceScheduler);
template<typename RewardModelType>
static std::vector<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel);
static MDPSparseModelCheckingHelperReturnType<ValueType> computeLongRunAverageRewards(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, RewardModelType const& rewardModel, bool produceScheduler);
static std::unique_ptr<CheckResult> computeConditionalProbabilities(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, storm::storage::BitVector const& targetStates, storm::storage::BitVector const& conditionStates);
@ -79,9 +79,9 @@ namespace storm {
static MDPSparseModelCheckingHelperReturnType<ValueType> computeReachabilityRewardsHelper(Environment const& env, storm::solver::SolveGoal<ValueType>&& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::function<std::vector<ValueType>(uint_fast64_t, storm::storage::SparseMatrix<ValueType> const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, std::function<storm::storage::BitVector()> const& zeroRewardStatesGetter, std::function<storm::storage::BitVector()> const& zeroRewardChoicesGetter, ModelCheckerHint const& hint = ModelCheckerHint());
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
static ValueType computeLraForMaximalEndComponent(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);
static ValueType computeLraForMaximalEndComponentVI(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, std::unique_ptr<storm::storage::Scheduler<ValueType>>& scheduler);
template<typename RewardModelType>
static ValueType computeLraForMaximalEndComponentLP(Environment const& env, OptimizationDirection dir, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec);

10
src/storm/settings/modules/IOSettings.cpp

@ -23,6 +23,7 @@ namespace storm {
const std::string IOSettings::exportJaniDotOptionName = "exportjanidot";
const std::string IOSettings::exportCdfOptionName = "exportcdf";
const std::string IOSettings::exportCdfOptionShortName = "cdf";
const std::string IOSettings::exportSchedulerOptionName = "exportscheduler";
const std::string IOSettings::explicitOptionName = "explicit";
const std::string IOSettings::explicitOptionShortName = "exp";
const std::string IOSettings::explicitDrnOptionName = "explicit-drn";
@ -55,6 +56,7 @@ namespace storm {
this->addOption(storm::settings::OptionBuilder(moduleName, exportJaniDotOptionName, "", "If given, the loaded jani model will be written to the specified file in the dot format.").setIsAdvanced()
.addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "The name of the file to which the model is to be written.").build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, exportCdfOptionName, false, "Exports the cumulative density function for reward bounded properties into a .csv file.").setIsAdvanced().setShortName(exportCdfOptionShortName).addArgument(storm::settings::ArgumentBuilder::createStringArgument("directory", "A path to an existing directory where the cdf files will be stored.").build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, exportSchedulerOptionName, false, "Exports the choices of an optimal scheduler to the given file (if supported by engine).").setIsAdvanced().addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "The output file.").build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, exportExplicitOptionName, "", "If given, the loaded model will be written to the specified file in the drn format.")
.addArgument(storm::settings::ArgumentBuilder::createStringArgument("filename", "the name of the file to which the model is to be writen.").build()).build());
this->addOption(storm::settings::OptionBuilder(moduleName, exportDdOptionName, "", "If given, the loaded model will be written to the specified file in the drdd format.")
@ -148,6 +150,14 @@ namespace storm {
return result;
}
bool IOSettings::isExportSchedulerSet() const {
return this->getOption(exportSchedulerOptionName).getHasOptionBeenSet();
}
std::string IOSettings::getExportSchedulerFilename() const {
return this->getOption(exportSchedulerOptionName).getArgumentByName("filename").getValueAsString();
}
bool IOSettings::isExplicitSet() const {
return this->getOption(explicitOptionName).getHasOptionBeenSet();
}

11
src/storm/settings/modules/IOSettings.h

@ -89,6 +89,16 @@ namespace storm {
*/
std::string getExportCdfDirectory() const;
/*!
* Retrieves whether an optimal scheduler is to be exported
*/
bool isExportSchedulerSet() const;
/*!
* Retrieves a filename to which an optimal scheduler will be exported.
*/
std::string getExportSchedulerFilename() const;
/*!
* Retrieves whether the explicit option was set.
*
@ -325,6 +335,7 @@ namespace storm {
static const std::string exportDdOptionName;
static const std::string exportCdfOptionName;
static const std::string exportCdfOptionShortName;
static const std::string exportSchedulerOptionName;
static const std::string explicitOptionName;
static const std::string explicitOptionShortName;
static const std::string explicitDrnOptionName;

10
src/storm/storage/Scheduler.cpp

@ -3,6 +3,7 @@
#include "storm/utility/macros.h"
#include "storm/exceptions/NotImplementedException.h"
#include <boost/algorithm/string/join.hpp>
namespace storm {
namespace storage {
@ -125,6 +126,7 @@ namespace storm {
STORM_LOG_THROW(model == nullptr || model->getNumberOfStates() == schedulerChoices.front().size(), storm::exceptions::InvalidOperationException, "The given model is not compatible with this scheduler.");
bool const stateValuationsGiven = model != nullptr && model->hasStateValuations();
bool const choiceLabelsGiven = model != nullptr && model->hasChoiceLabeling();
bool const choiceOriginsGiven = model != nullptr && model->hasChoiceOrigins();
uint_fast64_t widthOfStates = std::to_string(schedulerChoices.front().size()).length();
if (stateValuationsGiven) {
@ -181,6 +183,10 @@ namespace storm {
} else {
out << choice.getDeterministicChoice();
}
if (choiceLabelsGiven) {
auto choiceLabels = model->getChoiceLabeling().getLabelsOfChoice(model->getTransitionMatrix().getRowGroupIndices()[state] + choice.getDeterministicChoice());
out << " {" << boost::join(choiceLabels, ", ") << "}";
}
} else {
bool firstChoice = true;
for (auto const& choiceProbPair : choice.getChoiceAsDistribution()) {
@ -195,6 +201,10 @@ namespace storm {
} else {
out << choiceProbPair.first;
}
if (choiceLabelsGiven) {
auto choiceLabels = model->getChoiceLabeling().getLabelsOfChoice(model->getTransitionMatrix().getRowGroupIndices()[state] + choice.getDeterministicChoice());
out << " {" << boost::join(choiceLabels, ", ") << "}";
}
out << ")";
}
}

Loading…
Cancel
Save