11 changed files with 1063 additions and 958 deletions
-
611src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.cpp
-
105src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.h
-
105src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h
-
137src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorTask.h
-
103src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveMemoryIncorporation.cpp
-
33src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveMemoryIncorporation.h
-
424src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp
-
105src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h
-
95src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h
-
230src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.cpp
-
73src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h
@ -1,611 +0,0 @@ |
|||
#include "storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.h"
|
|||
|
|||
#include <algorithm>
|
|||
#include <set>
|
|||
|
|||
#include "storm/models/sparse/Mdp.h"
|
|||
#include "storm/models/sparse/MarkovAutomaton.h"
|
|||
#include "storm/models/sparse/StandardRewardModel.h"
|
|||
#include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h"
|
|||
#include "storm/modelchecker/prctl/helper/BaierUpperRewardBoundsComputer.h"
|
|||
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
|
|||
#include "storm/storage/MaximalEndComponentDecomposition.h"
|
|||
#include "storm/storage/memorystructure/MemoryStructureBuilder.h"
|
|||
#include "storm/storage/memorystructure/SparseModelMemoryProduct.h"
|
|||
#include "storm/storage/expressions/ExpressionManager.h"
|
|||
#include "storm/transformer/EndComponentEliminator.h"
|
|||
#include "storm/utility/macros.h"
|
|||
#include "storm/utility/vector.h"
|
|||
#include "storm/utility/graph.h"
|
|||
#include "storm/settings/SettingsManager.h"
|
|||
#include "storm/settings/modules/GeneralSettings.h"
|
|||
|
|||
|
|||
#include "storm/exceptions/InvalidPropertyException.h"
|
|||
#include "storm/exceptions/UnexpectedException.h"
|
|||
#include "storm/exceptions/NotImplementedException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::preprocess(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula) { |
|||
|
|||
PreprocessorData data(originalModel); |
|||
|
|||
//Invoke preprocessing on the individual objectives
|
|||
for (auto const& subFormula : originalFormula.getSubformulas()) { |
|||
STORM_LOG_INFO("Preprocessing objective " << *subFormula<< "."); |
|||
data.objectives.push_back(std::make_shared<Objective<ValueType>>()); |
|||
data.objectives.back()->originalFormula = subFormula; |
|||
data.finiteRewardCheckObjectives.resize(data.objectives.size(), false); |
|||
data.upperResultBoundObjectives.resize(data.objectives.size(), false); |
|||
if (data.objectives.back()->originalFormula->isOperatorFormula()) { |
|||
preprocessOperatorFormula(data.objectives.back()->originalFormula->asOperatorFormula(), data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the subformula " << *subFormula << " of " << originalFormula << " because it is not supported"); |
|||
} |
|||
} |
|||
|
|||
// Incorporate the required memory into the state space
|
|||
storm::storage::SparseModelMemoryProduct<ValueType> product = data.memory->product(originalModel); |
|||
std::shared_ptr<SparseModelType> preprocessedModel = std::dynamic_pointer_cast<SparseModelType>(product.build()); |
|||
|
|||
auto backwardTransitions = preprocessedModel->getBackwardTransitions(); |
|||
|
|||
// compute the end components of the model (if required)
|
|||
bool endComponentAnalysisRequired = false; |
|||
for (auto& task : data.tasks) { |
|||
endComponentAnalysisRequired = endComponentAnalysisRequired || task->requiresEndComponentAnalysis(); |
|||
} |
|||
if (endComponentAnalysisRequired) { |
|||
// TODO
|
|||
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "End component analysis required but currently not implemented."); |
|||
} |
|||
|
|||
for (auto& task : data.tasks) { |
|||
task->perform(*preprocessedModel); |
|||
} |
|||
|
|||
// Remove reward models that are not needed anymore
|
|||
std::set<std::string> relevantRewardModels; |
|||
for (auto const& obj : data.objectives) { |
|||
obj->formula->gatherReferencedRewardModels(relevantRewardModels); |
|||
} |
|||
preprocessedModel->restrictRewardModels(relevantRewardModels); |
|||
|
|||
// Build the actual result
|
|||
return buildResult(originalModel, originalFormula, data, preprocessedModel, backwardTransitions); |
|||
} |
|||
|
|||
template <typename SparseModelType> |
|||
SparseMultiObjectivePreprocessor<SparseModelType>::PreprocessorData::PreprocessorData(SparseModelType const& model) : originalModel(model) { |
|||
memory = std::make_shared<storm::storage::MemoryStructure>(storm::storage::MemoryStructureBuilder<ValueType, RewardModelType>::buildTrivialMemoryStructure(model)); |
|||
|
|||
// The memoryLabelPrefix should not be a prefix of a state label of the given model to ensure uniqueness of label names
|
|||
memoryLabelPrefix = "mem"; |
|||
while (true) { |
|||
bool prefixIsUnique = true; |
|||
for (auto const& label : originalModel.getStateLabeling().getLabels()) { |
|||
if (memoryLabelPrefix.size() <= label.size()) { |
|||
if (std::mismatch(memoryLabelPrefix.begin(), memoryLabelPrefix.end(), label.begin()).first == memoryLabelPrefix.end()) { |
|||
prefixIsUnique = false; |
|||
memoryLabelPrefix = "_" + memoryLabelPrefix; |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
if (prefixIsUnique) { |
|||
break; |
|||
} |
|||
} |
|||
|
|||
// The rewardModelNamePrefix should not be a prefix of a reward model name of the given model to ensure uniqueness of reward model names
|
|||
rewardModelNamePrefix = "obj"; |
|||
while (true) { |
|||
bool prefixIsUnique = true; |
|||
for (auto const& label : originalModel.getStateLabeling().getLabels()) { |
|||
if (memoryLabelPrefix.size() <= label.size()) { |
|||
if (std::mismatch(memoryLabelPrefix.begin(), memoryLabelPrefix.end(), label.begin()).first == memoryLabelPrefix.end()) { |
|||
prefixIsUnique = false; |
|||
memoryLabelPrefix = "_" + memoryLabelPrefix; |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
if (prefixIsUnique) { |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data) { |
|||
|
|||
Objective<ValueType>& objective = *data.objectives.back(); |
|||
|
|||
// Check whether the complementary event is considered
|
|||
objective.considersComplementaryEvent = formula.isProbabilityOperatorFormula() && formula.getSubformula().isGloballyFormula(); |
|||
|
|||
storm::logic::OperatorInformation opInfo; |
|||
if (formula.hasBound()) { |
|||
opInfo.bound = formula.getBound(); |
|||
// Invert the bound (if necessary)
|
|||
if (objective.considersComplementaryEvent) { |
|||
opInfo.bound->threshold = opInfo.bound->threshold.getManager().rational(storm::utility::one<storm::RationalNumber>()) - opInfo.bound->threshold; |
|||
switch (opInfo.bound->comparisonType) { |
|||
case storm::logic::ComparisonType::Greater: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::Less; |
|||
break; |
|||
case storm::logic::ComparisonType::GreaterEqual: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::LessEqual; |
|||
break; |
|||
case storm::logic::ComparisonType::Less: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::Greater; |
|||
break; |
|||
case storm::logic::ComparisonType::LessEqual: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::GreaterEqual; |
|||
break; |
|||
default: |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Current objective " << formula << " has unexpected comparison type"); |
|||
} |
|||
} |
|||
if (storm::logic::isLowerBound(opInfo.bound->comparisonType)) { |
|||
opInfo.optimalityType = storm::solver::OptimizationDirection::Maximize; |
|||
} else { |
|||
opInfo.optimalityType = storm::solver::OptimizationDirection::Minimize; |
|||
} |
|||
STORM_LOG_WARN_COND(!formula.hasOptimalityType(), "Optimization direction of formula " << formula << " ignored as the formula also specifies a threshold."); |
|||
} else if (formula.hasOptimalityType()){ |
|||
opInfo.optimalityType = formula.getOptimalityType(); |
|||
// Invert the optimality type (if necessary)
|
|||
if (objective.considersComplementaryEvent) { |
|||
opInfo.optimalityType = storm::solver::invert(opInfo.optimalityType.get()); |
|||
} |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Objective " << formula << " does not specify whether to minimize or maximize"); |
|||
} |
|||
|
|||
if (formula.isProbabilityOperatorFormula()){ |
|||
preprocessProbabilityOperatorFormula(formula.asProbabilityOperatorFormula(), opInfo, data); |
|||
} else if (formula.isRewardOperatorFormula()){ |
|||
preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data); |
|||
} else if (formula.isTimeOperatorFormula()){ |
|||
preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported"); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
// Probabilities are between zero and one
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
data.objectives.back()->upperResultBound = storm::utility::one<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isUntilFormula()){ |
|||
preprocessUntilFormula(formula.getSubformula().asUntilFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isBoundedUntilFormula()){ |
|||
preprocessBoundedUntilFormula(formula.getSubformula().asBoundedUntilFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isGloballyFormula()){ |
|||
preprocessGloballyFormula(formula.getSubformula().asGloballyFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
STORM_LOG_THROW((formula.hasRewardModelName() && data.originalModel.hasRewardModel(formula.getRewardModelName())) |
|||
|| (!formula.hasRewardModelName() && data.originalModel.hasUniqueRewardModel()), storm::exceptions::InvalidPropertyException, "The reward model is not unique or the formula " << formula << " does not specify an existing reward model."); |
|||
|
|||
std::string rewardModelName; |
|||
if (formula.hasRewardModelName()) { |
|||
rewardModelName = formula.getRewardModelName(); |
|||
STORM_LOG_THROW(data.originalModel.hasRewardModel(rewardModelName), storm::exceptions::InvalidPropertyException, "The reward model specified by formula " << formula << " does not exist in the model"); |
|||
} else { |
|||
STORM_LOG_THROW(data.originalModel.hasUniqueRewardModel(), storm::exceptions::InvalidOperationException, "The formula " << formula << " does not specify a reward model name and the reward model is not unique."); |
|||
rewardModelName = data.originalModel.getRewardModels().begin()->first; |
|||
} |
|||
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data, rewardModelName); |
|||
} else if (formula.getSubformula().isCumulativeRewardFormula()) { |
|||
preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName); |
|||
} else if (formula.getSubformula().isTotalRewardFormula()) { |
|||
preprocessTotalRewardFormula(opInfo, data, rewardModelName); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
// Time formulas are only supported for Markov automata
|
|||
STORM_LOG_THROW(data.originalModel.isOfType(storm::models::ModelType::MarkovAutomaton), storm::exceptions::InvalidPropertyException, "Time operator formulas are only supported for Markov automata."); |
|||
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula) { |
|||
|
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(data.originalModel); |
|||
storm::storage::BitVector rightSubformulaResult = mc.check(formula.getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
storm::storage::BitVector leftSubformulaResult = mc.check(formula.getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
// Check if the formula is already satisfied in the initial state because then the transformation to expected rewards will fail.
|
|||
// TODO: Handle this case more properly
|
|||
STORM_LOG_THROW((data.originalModel.getInitialStates() & rightSubformulaResult).empty(), storm::exceptions::NotImplementedException, "The Probability for the objective " << *data.objectives.back()->originalFormula << " is always one as the rhs of the until formula is true in the initial state. This (trivial) case is currently not implemented."); |
|||
|
|||
// Create a memory structure that stores whether a non-PhiState or a PsiState has already been reached
|
|||
storm::storage::MemoryStructureBuilder<ValueType, RewardModelType> builder(2, data.originalModel); |
|||
std::string relevantStatesLabel = data.memoryLabelPrefix + "_obj" + std::to_string(data.objectives.size()) + "_relevant"; |
|||
builder.setLabel(0, relevantStatesLabel); |
|||
storm::storage::BitVector nonRelevantStates = ~leftSubformulaResult | rightSubformulaResult; |
|||
builder.setTransition(0, 0, ~nonRelevantStates); |
|||
builder.setTransition(0, 1, nonRelevantStates); |
|||
builder.setTransition(1, 1, storm::storage::BitVector(data.originalModel.getNumberOfStates(), true)); |
|||
for (auto const& initState : data.originalModel.getInitialStates()) { |
|||
builder.setInitialMemoryState(initState, nonRelevantStates.get(initState) ? 1 : 0); |
|||
} |
|||
storm::storage::MemoryStructure objectiveMemory = builder.build(); |
|||
data.memory = std::make_shared<storm::storage::MemoryStructure>(data.memory->product(objectiveMemory)); |
|||
|
|||
std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); |
|||
if (subformula == nullptr) { |
|||
subformula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
} |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(subformula, rewardModelName, opInfo); |
|||
|
|||
auto relevantStatesFormula = std::make_shared<storm::logic::AtomicLabelFormula>(relevantStatesLabel); |
|||
data.tasks.push_back(std::make_shared<SparseMultiObjectivePreprocessorReachProbToTotalRewTask<SparseModelType>>(data.objectives.back(), relevantStatesFormula, formula.getRightSubformula().asSharedPointer())); |
|||
|
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
// Check how to handle this query
|
|||
if (formula.isMultiDimensional() || formula.getTimeBoundReference().isRewardBound()) { |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo); |
|||
} else if (!formula.hasLowerBound() || (!formula.isLowerBoundStrict() && storm::utility::isZero(formula.template getLowerBound<storm::RationalNumber>()))) { |
|||
std::shared_ptr<storm::logic::Formula const> subformula; |
|||
if (!formula.hasUpperBound()) { |
|||
// The formula is actually unbounded
|
|||
subformula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
} else { |
|||
STORM_LOG_THROW(!data.originalModel.isOfType(storm::models::ModelType::MarkovAutomaton) || formula.getTimeBoundReference().isTimeBound(), storm::exceptions::InvalidPropertyException, "Bounded until formulas for Markov Automata are only allowed when time bounds are considered."); |
|||
storm::logic::TimeBound bound(formula.isUpperBoundStrict(), formula.getUpperBound()); |
|||
subformula = std::make_shared<storm::logic::CumulativeRewardFormula>(bound, formula.getTimeBoundReference()); |
|||
} |
|||
preprocessUntilFormula(storm::logic::UntilFormula(formula.getLeftSubformula().asSharedPointer(), formula.getRightSubformula().asSharedPointer()), opInfo, data, subformula); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Property " << formula << "is not supported"); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
// The formula is transformed to an until formula for the complementary event.
|
|||
auto negatedSubformula = std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not, formula.getSubformula().asSharedPointer()); |
|||
|
|||
preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), negatedSubformula), opInfo, data); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
if (formula.isReachabilityProbabilityFormula()){ |
|||
preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), opInfo, data); |
|||
return; |
|||
} |
|||
|
|||
// Analyze the subformula
|
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(data.originalModel); |
|||
storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
|
|||
// Create a memory structure that stores whether a target state has already been reached
|
|||
storm::storage::MemoryStructureBuilder<ValueType, RewardModelType> builder(2, data.originalModel); |
|||
// Get a unique label that is not already present in the model
|
|||
std::string relevantStatesLabel = data.memoryLabelPrefix + "_obj" + std::to_string(data.objectives.size()) + "_relevant"; |
|||
builder.setLabel(0, relevantStatesLabel); |
|||
builder.setTransition(0, 0, ~subFormulaResult); |
|||
builder.setTransition(0, 1, subFormulaResult); |
|||
builder.setTransition(1, 1, storm::storage::BitVector(data.originalModel.getNumberOfStates(), true)); |
|||
for (auto const& initState : data.originalModel.getInitialStates()) { |
|||
builder.setInitialMemoryState(initState, subFormulaResult.get(initState) ? 1 : 0); |
|||
} |
|||
storm::storage::MemoryStructure objectiveMemory = builder.build(); |
|||
data.memory = std::make_shared<storm::storage::MemoryStructure>(data.memory->product(objectiveMemory)); |
|||
|
|||
auto relevantStatesFormula = std::make_shared<storm::logic::AtomicLabelFormula>(relevantStatesLabel); |
|||
|
|||
std::string auxRewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); |
|||
auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, auxRewardModelName, opInfo); |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
data.upperResultBoundObjectives.set(data.objectives.size() - 1, true); |
|||
|
|||
if (formula.isReachabilityRewardFormula()) { |
|||
assert(optionalRewardModelName.is_initialized()); |
|||
data.tasks.push_back(std::make_shared<SparseMultiObjectivePreprocessorReachRewToTotalRewTask<SparseModelType>>(data.objectives.back(), relevantStatesFormula, optionalRewardModelName.get())); |
|||
} else if (formula.isReachabilityTimeFormula() && data.originalModel.isOfType(storm::models::ModelType::MarkovAutomaton)) { |
|||
data.tasks.push_back(std::make_shared<SparseMultiObjectivePreprocessorReachTimeToTotalRewTask<SparseModelType>>(data.objectives.back(), relevantStatesFormula)); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability probabilities nor reachability rewards " << (data.originalModel.isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "") << ". This is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
STORM_LOG_THROW(data.originalModel.isOfType(storm::models::ModelType::Mdp), storm::exceptions::InvalidPropertyException, "Cumulative reward formulas are not supported for the given model type."); |
|||
|
|||
auto cumulativeRewardFormula = std::make_shared<storm::logic::CumulativeRewardFormula>(formula); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(cumulativeRewardFormula, *optionalRewardModelName, opInfo); |
|||
bool onlyRewardBounds = true; |
|||
for (uint64_t i = 0; i < cumulativeRewardFormula->getDimension(); ++i) { |
|||
if (!cumulativeRewardFormula->getTimeBoundReference(i).isRewardBound()) { |
|||
onlyRewardBounds = false; |
|||
break; |
|||
} |
|||
} |
|||
if (onlyRewardBounds) { |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
data.upperResultBoundObjectives.set(data.objectives.size() - 1, true); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
|
|||
auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, *optionalRewardModelName, opInfo); |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
data.upperResultBoundObjectives.set(data.objectives.size() - 1, true); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data, std::shared_ptr<SparseModelType> const& preprocessedModel, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
|
|||
ReturnType result(originalFormula, originalModel); |
|||
|
|||
result.preprocessedModel = preprocessedModel; |
|||
|
|||
for (auto& obj : data.objectives) { |
|||
result.objectives.push_back(std::move(*obj)); |
|||
} |
|||
|
|||
result.queryType = getQueryType(result.objectives); |
|||
|
|||
setReward0States(result, backwardTransitions); |
|||
checkRewardFiniteness(result, data.finiteRewardCheckObjectives, backwardTransitions); |
|||
|
|||
// We compute upper result bounds if the 'sound' option has been enabled
|
|||
if (storm::settings::getModule<storm::settings::modules::GeneralSettings>().isSoundSet()) { |
|||
for (auto const& objIndex : data.upperResultBoundObjectives) { |
|||
result.objectives[objIndex].upperResultBound = computeUpperResultBound(result, objIndex, backwardTransitions); |
|||
} |
|||
} |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType::QueryType SparseMultiObjectivePreprocessor<SparseModelType>::getQueryType(std::vector<Objective<ValueType>> const& objectives) { |
|||
uint_fast64_t numOfObjectivesWithThreshold = 0; |
|||
for (auto& obj : objectives) { |
|||
if (obj.formula->hasBound()) { |
|||
++numOfObjectivesWithThreshold; |
|||
} |
|||
} |
|||
if (numOfObjectivesWithThreshold == objectives.size()) { |
|||
return ReturnType::QueryType::Achievability; |
|||
} else if (numOfObjectivesWithThreshold + 1 == objectives.size()) { |
|||
// Note: We do not want to consider a Pareto query when the total number of objectives is one.
|
|||
return ReturnType::QueryType::Quantitative; |
|||
} else if (numOfObjectivesWithThreshold == 0) { |
|||
return ReturnType::QueryType::Pareto; |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Invalid Multi-objective query: The numer of qualitative objectives should be either 0 (Pareto query), 1 (quantitative query), or #objectives (achievability query)."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::setReward0States(ReturnType& result, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
|
|||
uint_fast64_t stateCount = result.preprocessedModel->getNumberOfStates(); |
|||
auto const& transitions = result.preprocessedModel->getTransitionMatrix(); |
|||
std::vector<uint_fast64_t> const& groupIndices = transitions.getRowGroupIndices(); |
|||
storm::storage::BitVector allStates(stateCount, true); |
|||
|
|||
// Get the choices that yield non-zero reward
|
|||
storm::storage::BitVector zeroRewardChoices(result.preprocessedModel->getNumberOfChoices(), true); |
|||
for (auto const& obj : result.objectives) { |
|||
if (obj.formula->isRewardOperatorFormula()) { |
|||
auto const& rewModel = result.preprocessedModel->getRewardModel(obj.formula->asRewardOperatorFormula().getRewardModelName()); |
|||
zeroRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); |
|||
} |
|||
} |
|||
|
|||
// Get the states that have reward for at least one (or for all) choices assigned to it.
|
|||
storm::storage::BitVector statesWithRewardForOneChoice = storm::storage::BitVector(stateCount, false); |
|||
storm::storage::BitVector statesWithRewardForAllChoices = storm::storage::BitVector(stateCount, true); |
|||
for (uint_fast64_t state = 0; state < stateCount; ++state) { |
|||
bool stateHasChoiceWithReward = false; |
|||
bool stateHasChoiceWithoutReward = false; |
|||
uint_fast64_t const& groupEnd = groupIndices[state + 1]; |
|||
for (uint_fast64_t choice = groupIndices[state]; choice < groupEnd; ++choice) { |
|||
if (zeroRewardChoices.get(choice)) { |
|||
stateHasChoiceWithoutReward = true; |
|||
} else { |
|||
stateHasChoiceWithReward = true; |
|||
} |
|||
} |
|||
if (stateHasChoiceWithReward) { |
|||
statesWithRewardForOneChoice.set(state, true); |
|||
} |
|||
if (stateHasChoiceWithoutReward) { |
|||
statesWithRewardForAllChoices.set(state, false); |
|||
} |
|||
} |
|||
|
|||
// get the states for which there is a scheduler yielding reward zero
|
|||
result.reward0EStates = storm::utility::graph::performProbGreater0A(transitions, groupIndices, backwardTransitions, allStates, statesWithRewardForAllChoices, false, 0, zeroRewardChoices); |
|||
result.reward0EStates.complement(); |
|||
result.reward0AStates = storm::utility::graph::performProb0A(backwardTransitions, allStates, statesWithRewardForOneChoice); |
|||
assert(result.reward0AStates.isSubsetOf(result.reward0EStates)); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::checkRewardFiniteness(ReturnType& result, storm::storage::BitVector const& finiteRewardCheckObjectives, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
|
|||
result.rewardFinitenessType = ReturnType::RewardFinitenessType::AllFinite; |
|||
|
|||
auto const& transitions = result.preprocessedModel->getTransitionMatrix(); |
|||
std::vector<uint_fast64_t> const& groupIndices = transitions.getRowGroupIndices(); |
|||
|
|||
storm::storage::BitVector maxRewardsToCheck(result.preprocessedModel->getNumberOfChoices(), true); |
|||
storm::storage::BitVector minRewardsToCheck(result.preprocessedModel->getNumberOfChoices(), true); |
|||
for (auto const& objIndex : finiteRewardCheckObjectives) { |
|||
STORM_LOG_ASSERT(result.objectives[objIndex].formula->isRewardOperatorFormula(), "Objective needs to be checked for finite reward but has no reward operator."); |
|||
auto const& rewModel = result.preprocessedModel->getRewardModel(result.objectives[objIndex].formula->asRewardOperatorFormula().getRewardModelName()); |
|||
auto unrelevantChoices = rewModel.getChoicesWithZeroReward(transitions); |
|||
// For (upper) reward bounded cumulative reward formulas, we do not need to consider the choices where boundReward is collected.
|
|||
if (result.objectives[objIndex].formula->getSubformula().isCumulativeRewardFormula()) { |
|||
auto const& timeBoundReference = result.objectives[objIndex].formula->getSubformula().asCumulativeRewardFormula().getTimeBoundReference(); |
|||
// Only reward bounded formulas need a finiteness check
|
|||
assert(timeBoundReference.isRewardBound()); |
|||
auto const& rewModelOfBound = result.preprocessedModel->getRewardModel(timeBoundReference.getRewardName()); |
|||
unrelevantChoices |= ~rewModelOfBound.getChoicesWithZeroReward(transitions); |
|||
} |
|||
if (storm::solver::minimize(result.objectives[objIndex].formula->getOptimalityType())) { |
|||
minRewardsToCheck &= unrelevantChoices; |
|||
} else { |
|||
maxRewardsToCheck &= unrelevantChoices; |
|||
} |
|||
} |
|||
maxRewardsToCheck.complement(); |
|||
minRewardsToCheck.complement(); |
|||
|
|||
// Check reward finiteness under all schedulers
|
|||
storm::storage::BitVector allStates(result.preprocessedModel->getNumberOfStates(), true); |
|||
if (storm::utility::graph::checkIfECWithChoiceExists(transitions, backwardTransitions, allStates, maxRewardsToCheck | minRewardsToCheck)) { |
|||
// Check whether there is a scheduler yielding infinite reward for a maximizing objective
|
|||
if (storm::utility::graph::checkIfECWithChoiceExists(transitions, backwardTransitions, allStates, maxRewardsToCheck)) { |
|||
result.rewardFinitenessType = ReturnType::RewardFinitenessType::Infinite; |
|||
} else { |
|||
// Check whether there is a scheduler under which all rewards are finite.
|
|||
result.rewardLessInfinityEStates = storm::utility::graph::performProb1E(transitions, groupIndices, backwardTransitions, allStates, result.reward0EStates); |
|||
if ((result.rewardLessInfinityEStates.get() & result.preprocessedModel->getInitialStates()).empty()) { |
|||
// There is no scheduler that induces finite reward for the initial state
|
|||
result.rewardFinitenessType = ReturnType::RewardFinitenessType::Infinite; |
|||
} else { |
|||
result.rewardFinitenessType = ReturnType::RewardFinitenessType::ExistsParetoFinite; |
|||
} |
|||
} |
|||
} else { |
|||
result.rewardLessInfinityEStates = allStates; |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
boost::optional<typename SparseModelType::ValueType> SparseMultiObjectivePreprocessor<SparseModelType>::computeUpperResultBound(ReturnType const& result, uint64_t objIndex, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
boost::optional<ValueType> upperBound; |
|||
|
|||
if (!result.originalModel.isOfType(storm::models::ModelType::Mdp)) { |
|||
return upperBound; |
|||
} |
|||
|
|||
auto const& transitions = result.preprocessedModel->getTransitionMatrix(); |
|||
|
|||
if (result.objectives[objIndex].formula->isRewardOperatorFormula()) { |
|||
auto const& rewModel = result.preprocessedModel->getRewardModel(result.objectives[objIndex].formula->asRewardOperatorFormula().getRewardModelName()); |
|||
auto actionRewards = rewModel.getTotalRewardVector(transitions); |
|||
|
|||
if (result.objectives[objIndex].formula->getSubformula().isTotalRewardFormula() || result.objectives[objIndex].formula->getSubformula().isCumulativeRewardFormula()) { |
|||
// We have to eliminate ECs here to treat zero-reward ECs
|
|||
|
|||
storm::storage::BitVector allStates(result.preprocessedModel->getNumberOfStates(), true); |
|||
|
|||
// Get the set of states from which no reward is reachable
|
|||
auto nonZeroRewardStates = rewModel.getStatesWithZeroReward(transitions); |
|||
nonZeroRewardStates.complement(); |
|||
auto expRewGreater0EStates = storm::utility::graph::performProbGreater0E(backwardTransitions, allStates, nonZeroRewardStates); |
|||
|
|||
auto zeroRewardChoices = rewModel.getChoicesWithZeroReward(transitions); |
|||
|
|||
auto ecElimRes = storm::transformer::EndComponentEliminator<ValueType>::transform(transitions, expRewGreater0EStates, zeroRewardChoices, ~allStates); |
|||
|
|||
allStates.resize(ecElimRes.matrix.getRowGroupCount()); |
|||
storm::storage::BitVector outStates(allStates.size(), false); |
|||
std::vector<ValueType> rew0StateProbs; |
|||
rew0StateProbs.reserve(ecElimRes.matrix.getRowCount()); |
|||
for (uint64_t state = 0; state < allStates.size(); ++ state) { |
|||
for (uint64_t choice = ecElimRes.matrix.getRowGroupIndices()[state]; choice < ecElimRes.matrix.getRowGroupIndices()[state + 1]; ++choice) { |
|||
// Check whether the choice lead to a state with expRew 0 in the original model
|
|||
bool isOutChoice = false; |
|||
uint64_t originalModelChoice = ecElimRes.newToOldRowMapping[choice]; |
|||
for (auto const& entry : transitions.getRow(originalModelChoice)) { |
|||
if (!expRewGreater0EStates.get(entry.getColumn())) { |
|||
isOutChoice = true; |
|||
outStates.set(state, true); |
|||
rew0StateProbs.push_back(storm::utility::one<ValueType>() - ecElimRes.matrix.getRowSum(choice)); |
|||
assert (!storm::utility::isZero(rew0StateProbs.back())); |
|||
break; |
|||
} |
|||
} |
|||
if (!isOutChoice) { |
|||
rew0StateProbs.push_back(storm::utility::zero<ValueType>()); |
|||
} |
|||
} |
|||
} |
|||
|
|||
// An upper reward bound can only be computed if it is below infinity
|
|||
if (storm::utility::graph::performProb1A(ecElimRes.matrix, ecElimRes.matrix.getRowGroupIndices(), ecElimRes.matrix.transpose(true), allStates, outStates).full()) { |
|||
|
|||
std::vector<ValueType> rewards; |
|||
rewards.reserve(ecElimRes.matrix.getRowCount()); |
|||
for (auto row : ecElimRes.newToOldRowMapping) { |
|||
rewards.push_back(actionRewards[row]); |
|||
} |
|||
|
|||
storm::modelchecker::helper::BaierUpperRewardBoundsComputer<ValueType> baier(ecElimRes.matrix, rewards, rew0StateProbs); |
|||
if (upperBound) { |
|||
upperBound = std::min(upperBound.get(), baier.computeUpperBound()); |
|||
} else { |
|||
upperBound = baier.computeUpperBound(); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
if (upperBound) { |
|||
STORM_LOG_INFO("Computed upper result bound " << upperBound.get() << " for objective " << *result.objectives[objIndex].formula << "."); |
|||
} else { |
|||
STORM_LOG_WARN("Could not compute upper result bound for objective " << *result.objectives[objIndex].formula); |
|||
} |
|||
return upperBound; |
|||
|
|||
} |
|||
|
|||
|
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<double>>; |
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<double>>; |
|||
|
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<storm::RationalNumber>>; |
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>; |
|||
} |
|||
} |
|||
} |
@ -1,105 +0,0 @@ |
|||
#pragma once |
|||
|
|||
#include <memory> |
|||
#include <string> |
|||
|
|||
#include "storm/logic/Formulas.h" |
|||
#include "storm/storage/BitVector.h" |
|||
#include "storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h" |
|||
#include "storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorTask.h" |
|||
#include "storm/storage/memorystructure/MemoryStructure.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
|
|||
/* |
|||
* This class invokes the necessary preprocessing for the constraint based multi-objective model checking algorithm |
|||
*/ |
|||
template <class SparseModelType> |
|||
class SparseMultiObjectivePreprocessor { |
|||
public: |
|||
typedef typename SparseModelType::ValueType ValueType; |
|||
typedef typename SparseModelType::RewardModelType RewardModelType; |
|||
typedef SparseMultiObjectivePreprocessorResult<SparseModelType> ReturnType; |
|||
|
|||
/*! |
|||
* Preprocesses the given model w.r.t. the given formulas |
|||
* @param originalModel The considered model |
|||
* @param originalFormula the considered formula. The subformulas should only contain one OperatorFormula at top level. |
|||
*/ |
|||
static ReturnType preprocess(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula); |
|||
|
|||
private: |
|||
|
|||
struct PreprocessorData { |
|||
SparseModelType const& originalModel; |
|||
std::vector<std::shared_ptr<Objective<ValueType>>> objectives; |
|||
std::vector<std::shared_ptr<SparseMultiObjectivePreprocessorTask<SparseModelType>>> tasks; |
|||
std::shared_ptr<storm::storage::MemoryStructure> memory; |
|||
|
|||
// Indices of the objectives that require a check for finite reward |
|||
storm::storage::BitVector finiteRewardCheckObjectives; |
|||
|
|||
// Indices of the objectives for which we need to compute an upper bound for the result |
|||
storm::storage::BitVector upperResultBoundObjectives; |
|||
|
|||
std::string memoryLabelPrefix; |
|||
std::string rewardModelNamePrefix; |
|||
|
|||
PreprocessorData(SparseModelType const& model); |
|||
}; |
|||
|
|||
/*! |
|||
* Apply the neccessary preprocessing for the given formula. |
|||
* @param formula the current (sub)formula |
|||
* @param opInfo the information of the resulting operator formula |
|||
* @param data the current data. The currently processed objective is located at data.objectives.back() |
|||
* @param optionalRewardModelName the reward model name that is considered for the formula (if available) |
|||
*/ |
|||
static void preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data); |
|||
static void preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula = nullptr); |
|||
static void preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); |
|||
static void preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); |
|||
static void preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); // The total reward formula itself does not need to be provided as it is unique. |
|||
|
|||
|
|||
/*! |
|||
* Builds the result from preprocessing |
|||
*/ |
|||
static ReturnType buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data, std::shared_ptr<SparseModelType> const& preprocessedModel, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
/*! |
|||
* Returns the query type |
|||
*/ |
|||
static typename ReturnType::QueryType getQueryType(std::vector<Objective<ValueType>> const& objectives); |
|||
|
|||
|
|||
/*! |
|||
* Computes the set of states that have zero expected reward w.r.t. all expected reward objectives |
|||
*/ |
|||
static void setReward0States(ReturnType& result, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
/*! |
|||
* Checks whether the occurring expected rewards are finite and sets the RewardFinitenessType accordingly |
|||
* Returns the set of states for which a scheduler exists that induces finite reward for all objectives |
|||
*/ |
|||
static void checkRewardFiniteness(ReturnType& result, storm::storage::BitVector const& finiteRewardCheckObjectives, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
/*! |
|||
* Finds an upper bound for the expected reward of the objective with the given index (assuming it considers an expected reward objective) |
|||
*/ |
|||
static boost::optional<ValueType> computeUpperResultBound(ReturnType const& result, uint64_t objIndex, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
}; |
|||
} |
|||
} |
|||
} |
|||
|
@ -1,105 +0,0 @@ |
|||
#pragma once |
|||
|
|||
#include <vector> |
|||
#include <memory> |
|||
#include <boost/optional.hpp> |
|||
|
|||
#include "storm/logic/Formulas.h" |
|||
#include "storm/modelchecker/multiobjective/Objective.h" |
|||
#include "storm/storage/BitVector.h" |
|||
|
|||
#include "storm/exceptions/UnexpectedException.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
|
|||
template <class SparseModelType> |
|||
struct SparseMultiObjectivePreprocessorResult { |
|||
|
|||
enum class QueryType { Achievability, Quantitative, Pareto }; |
|||
enum class RewardFinitenessType { |
|||
AllFinite, // The expected reward is finite for all objectives and all schedulers |
|||
ExistsParetoFinite, // There is a Pareto optimal scheduler yielding finite rewards for all objectives |
|||
Infinite // All Pareto optimal schedulers yield infinite reward for at least one objective |
|||
}; |
|||
|
|||
// Original data |
|||
storm::logic::MultiObjectiveFormula const& originalFormula; |
|||
SparseModelType const& originalModel; |
|||
|
|||
// The preprocessed model and objectives |
|||
std::shared_ptr<SparseModelType> preprocessedModel; |
|||
std::vector<Objective<typename SparseModelType::ValueType>> objectives; |
|||
|
|||
// Data about the query |
|||
QueryType queryType; |
|||
RewardFinitenessType rewardFinitenessType; |
|||
|
|||
// The states of the preprocessed model for which... |
|||
storm::storage::BitVector reward0EStates; // ... there is a scheduler such that all expected reward objectives have value zero |
|||
storm::storage::BitVector reward0AStates; // ... all schedulers induce value 0 for all expected reward objectives |
|||
boost::optional<storm::storage::BitVector> rewardLessInfinityEStates; // ... there is a scheduler yielding finite reward for all expected reward objectives |
|||
// Note that other types of objectives (e.g., reward bounded reachability objectives) are not considered. |
|||
|
|||
// Encodes a superset of the set of choices of preprocessedModel that are part of an end component (if given). |
|||
//boost::optional<storm::storage::BitVector> ecChoicesHint; |
|||
|
|||
SparseMultiObjectivePreprocessorResult(storm::logic::MultiObjectiveFormula const& originalFormula, SparseModelType const& originalModel) : originalFormula(originalFormula), originalModel(originalModel) { |
|||
// Intentionally left empty |
|||
} |
|||
|
|||
bool containsOnlyTrivialObjectives() const { |
|||
// Trivial objectives are either total reward formulas or single-dimensional step or time bounded cumulative reward formulas |
|||
for (auto const& obj : objectives) { |
|||
if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isTotalRewardFormula()) { |
|||
continue; |
|||
} |
|||
if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isCumulativeRewardFormula()) { |
|||
auto const& subf = obj.formula->getSubformula().asCumulativeRewardFormula(); |
|||
if (!subf.isMultiDimensional() && (subf.getTimeBoundReference().isTimeBound() || subf.getTimeBoundReference().isStepBound())) { |
|||
continue; |
|||
} |
|||
} |
|||
// Reaching this point means that the objective is considered as non-trivial |
|||
return false; |
|||
} |
|||
return true; |
|||
} |
|||
|
|||
void printToStream(std::ostream& out) const { |
|||
out << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
out << " Multi-objective Query " << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
out << std::endl; |
|||
out << "Original Formula: " << std::endl; |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
out << "\t" << originalFormula << std::endl; |
|||
out << std::endl; |
|||
out << "Objectives:" << std::endl; |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
for (auto const& obj : objectives) { |
|||
obj.printToStream(out); |
|||
} |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
out << std::endl; |
|||
out << "Original Model Information:" << std::endl; |
|||
originalModel.printModelInformationToStream(out); |
|||
out << std::endl; |
|||
out << "Preprocessed Model Information:" << std::endl; |
|||
preprocessedModel->printModelInformationToStream(out); |
|||
out << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
} |
|||
|
|||
friend std::ostream& operator<<(std::ostream& out, SparseMultiObjectivePreprocessorResult<SparseModelType> const& ret) { |
|||
ret.printToStream(out); |
|||
return out; |
|||
} |
|||
|
|||
}; |
|||
} |
|||
} |
|||
} |
|||
|
@ -1,137 +0,0 @@ |
|||
#pragma once |
|||
|
|||
#include <boost/optional.hpp> |
|||
#include <memory> |
|||
|
|||
#include "storm/logic/Formula.h" |
|||
#include "storm/logic/Bound.h" |
|||
#include "storm/solver/OptimizationDirection.h" |
|||
#include "storm/modelchecker/multiobjective/Objective.h" |
|||
#include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h" |
|||
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h" |
|||
#include "storm/models/sparse/Mdp.h" |
|||
#include "storm/models/sparse/MarkovAutomaton.h" |
|||
#include "storm/models/sparse/StandardRewardModel.h" |
|||
#include "storm/utility/vector.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
|
|||
template <typename SparseModelType> |
|||
class SparseMultiObjectivePreprocessorTask { |
|||
public: |
|||
SparseMultiObjectivePreprocessorTask(std::shared_ptr<Objective<typename SparseModelType::ValueType>> const& objective) : objective(objective) { |
|||
// intentionally left empty |
|||
} |
|||
|
|||
virtual ~SparseMultiObjectivePreprocessorTask() = default; |
|||
|
|||
virtual void perform(SparseModelType& preprocessedModel) const = 0; |
|||
|
|||
virtual bool requiresEndComponentAnalysis() const { |
|||
return false; |
|||
} |
|||
|
|||
|
|||
protected: |
|||
std::shared_ptr<Objective<typename SparseModelType::ValueType>> objective; |
|||
}; |
|||
|
|||
// Transforms reachability probabilities to total expected rewards by adding a rewardModel |
|||
// such that one reward is given whenever a goal state is reached from a relevant state |
|||
template <typename SparseModelType> |
|||
class SparseMultiObjectivePreprocessorReachProbToTotalRewTask : public SparseMultiObjectivePreprocessorTask<SparseModelType> { |
|||
public: |
|||
SparseMultiObjectivePreprocessorReachProbToTotalRewTask(std::shared_ptr<Objective<typename SparseModelType::ValueType>> const& objective, std::shared_ptr<storm::logic::Formula const> const& relevantStateFormula, std::shared_ptr<storm::logic::Formula const> const& goalStateFormula) : SparseMultiObjectivePreprocessorTask<SparseModelType>(objective), relevantStateFormula(relevantStateFormula), goalStateFormula(goalStateFormula) { |
|||
// Intentionally left empty |
|||
} |
|||
|
|||
virtual void perform(SparseModelType& preprocessedModel) const override { |
|||
|
|||
// build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from a relevantState to a goalState |
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(preprocessedModel); |
|||
storm::storage::BitVector relevantStates = mc.check(*relevantStateFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
storm::storage::BitVector goalStates = mc.check(*goalStateFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
|
|||
std::vector<typename SparseModelType::ValueType> objectiveRewards(preprocessedModel.getTransitionMatrix().getRowCount(), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
for (auto const& state : relevantStates) { |
|||
for (uint_fast64_t row = preprocessedModel.getTransitionMatrix().getRowGroupIndices()[state]; row < preprocessedModel.getTransitionMatrix().getRowGroupIndices()[state + 1]; ++row) { |
|||
objectiveRewards[row] = preprocessedModel.getTransitionMatrix().getConstrainedRowSum(row, goalStates); |
|||
} |
|||
} |
|||
STORM_LOG_ASSERT(this->objective->formula->isRewardOperatorFormula(), "No reward operator formula."); |
|||
STORM_LOG_ASSERT(this->objective->formula->asRewardOperatorFormula().hasRewardModelName(), "No reward model name has been specified"); |
|||
preprocessedModel.addRewardModel(this->objective->formula->asRewardOperatorFormula().getRewardModelName(), typename SparseModelType::RewardModelType(boost::none, std::move(objectiveRewards))); |
|||
} |
|||
|
|||
private: |
|||
std::shared_ptr<storm::logic::Formula const> relevantStateFormula; |
|||
std::shared_ptr<storm::logic::Formula const> goalStateFormula; |
|||
}; |
|||
|
|||
// Transforms expected reachability rewards to total expected rewards by adding a rewardModel |
|||
// such that non-relevant states get reward zero |
|||
template <typename SparseModelType> |
|||
class SparseMultiObjectivePreprocessorReachRewToTotalRewTask : public SparseMultiObjectivePreprocessorTask<SparseModelType> { |
|||
public: |
|||
SparseMultiObjectivePreprocessorReachRewToTotalRewTask(std::shared_ptr<Objective<typename SparseModelType::ValueType>> const& objective, std::shared_ptr<storm::logic::Formula const> const& relevantStateFormula, std::string const& originalRewardModelName) : SparseMultiObjectivePreprocessorTask<SparseModelType>(objective), relevantStateFormula(relevantStateFormula), originalRewardModelName(originalRewardModelName) { |
|||
// Intentionally left empty |
|||
} |
|||
|
|||
virtual void perform(SparseModelType& preprocessedModel) const override { |
|||
|
|||
// build stateAction reward vector that only gives reward for relevant states |
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(preprocessedModel); |
|||
storm::storage::BitVector nonRelevantStates = ~mc.check(*relevantStateFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
typename SparseModelType::RewardModelType objectiveRewards = preprocessedModel.getRewardModel(originalRewardModelName); |
|||
objectiveRewards.reduceToStateBasedRewards(preprocessedModel.getTransitionMatrix(), false); |
|||
if (objectiveRewards.hasStateRewards()) { |
|||
storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), nonRelevantStates, storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
} |
|||
if (objectiveRewards.hasStateActionRewards()) { |
|||
for (auto state : nonRelevantStates) { |
|||
std::fill_n(objectiveRewards.getStateActionRewardVector().begin() + preprocessedModel.getTransitionMatrix().getRowGroupIndices()[state], preprocessedModel.getTransitionMatrix().getRowGroupSize(state), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
} |
|||
} |
|||
STORM_LOG_ASSERT(this->objective->formula->isRewardOperatorFormula(), "No reward operator formula."); |
|||
STORM_LOG_ASSERT(this->objective->formula->asRewardOperatorFormula().hasRewardModelName(), "No reward model name has been specified"); |
|||
preprocessedModel.addRewardModel(this->objective->formula->asRewardOperatorFormula().getRewardModelName(), std::move(objectiveRewards)); |
|||
} |
|||
|
|||
private: |
|||
std::shared_ptr<storm::logic::Formula const> relevantStateFormula; |
|||
std::string originalRewardModelName; |
|||
}; |
|||
|
|||
// Transforms expected reachability time to total expected rewards by adding a rewardModel |
|||
// such that every time step done from a relevant state yields one reward |
|||
template <typename SparseModelType> |
|||
class SparseMultiObjectivePreprocessorReachTimeToTotalRewTask : public SparseMultiObjectivePreprocessorTask<SparseModelType> { |
|||
public: |
|||
SparseMultiObjectivePreprocessorReachTimeToTotalRewTask(std::shared_ptr<Objective<typename SparseModelType::ValueType>> const& objective, std::shared_ptr<storm::logic::Formula const> const& relevantStateFormula) : SparseMultiObjectivePreprocessorTask<SparseModelType>(objective), relevantStateFormula(relevantStateFormula) { |
|||
// Intentionally left empty |
|||
} |
|||
|
|||
virtual void perform(SparseModelType& preprocessedModel) const override { |
|||
|
|||
// build stateAction reward vector that only gives reward for relevant states |
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(preprocessedModel); |
|||
storm::storage::BitVector relevantStates = mc.check(*relevantStateFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
|
|||
std::vector<typename SparseModelType::ValueType> timeRewards(preprocessedModel.getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
storm::utility::vector::setVectorValues(timeRewards, dynamic_cast<storm::models::sparse::MarkovAutomaton<typename SparseModelType::ValueType> const&>(preprocessedModel).getMarkovianStates() & relevantStates, storm::utility::one<typename SparseModelType::ValueType>()); |
|||
|
|||
STORM_LOG_ASSERT(this->objective->formula->isRewardOperatorFormula(), "No reward operator formula."); |
|||
STORM_LOG_ASSERT(this->objective->formula->asRewardOperatorFormula().hasRewardModelName(), "No reward model name has been specified"); |
|||
preprocessedModel.addRewardModel(this->objective->formula->asRewardOperatorFormula().getRewardModelName(), typename SparseModelType::RewardModelType(std::move(timeRewards))); |
|||
} |
|||
|
|||
private: |
|||
std::shared_ptr<storm::logic::Formula const> relevantStateFormula; |
|||
}; |
|||
|
|||
} |
|||
} |
|||
} |
|||
|
@ -0,0 +1,103 @@ |
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveMemoryIncorporation.h"
|
|||
|
|||
#include "storm/models/sparse/Mdp.h"
|
|||
#include "storm/models/sparse/MarkovAutomaton.h"
|
|||
#include "storm/models/sparse/StandardRewardModel.h"
|
|||
#include "storm/storage/memorystructure/MemoryStructureBuilder.h"
|
|||
#include "storm/storage/memorystructure/SparseModelMemoryProduct.h"
|
|||
#include "storm/logic/Formulas.h"
|
|||
#include "storm/logic/FragmentSpecification.h"
|
|||
#include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h"
|
|||
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
|
|||
|
|||
#include "storm/utility/macros.h"
|
|||
|
|||
#include "storm/exceptions/NotImplementedException.h"
|
|||
#include "storm/exceptions/NotSupportedException.h"
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
template <class SparseModelType> |
|||
storm::storage::MemoryStructure getGoalMemory(SparseModelType const& model, storm::logic::Formula const& propositionalGoalStateFormula) { |
|||
STORM_LOG_THROW(propositionalGoalStateFormula.isInFragment(storm::logic::propositional()), storm::exceptions::NotSupportedException, "The subformula " << propositionalGoalStateFormula << " should be propositional."); |
|||
|
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(model); |
|||
storm::storage::BitVector goalStates = mc.check(propositionalGoalStateFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
|
|||
// Check if the formula is already satisfied for all initial states. In such a case the trivial memory structure suffices.
|
|||
if (model.getInitialStates().isSubsetOf(goalStates)) { |
|||
STORM_LOG_INFO("One objective is already satisfied for all initial states."); |
|||
return storm::storage::MemoryStructureBuilder<typename SparseModelType::ValueType, typename SparseModelType::RewardModelType>::buildTrivialMemoryStructure(model); |
|||
} |
|||
|
|||
|
|||
// Create a memory structure that stores whether a goal state has already been reached
|
|||
storm::storage::MemoryStructureBuilder<typename SparseModelType::ValueType, typename SparseModelType::RewardModelType> builder(2, model); |
|||
builder.setTransition(0, 0, ~goalStates); |
|||
builder.setTransition(0, 1, goalStates); |
|||
builder.setTransition(1, 1, storm::storage::BitVector(model.getNumberOfStates(), true)); |
|||
for (auto const& initState : model.getInitialStates()) { |
|||
builder.setInitialMemoryState(initState, goalStates.get(initState) ? 1 : 0); |
|||
} |
|||
return builder.build(); |
|||
} |
|||
|
|||
template <class SparseModelType> |
|||
storm::storage::MemoryStructure getUntilFormulaMemory(SparseModelType const& model, storm::logic::Formula const& leftSubFormula, storm::logic::Formula const& rightSubFormula) { |
|||
auto notLeftOrRight = std::make_shared<storm::logic::BinaryBooleanStateFormula>(storm::logic::BinaryBooleanStateFormula::OperatorType::Or, |
|||
std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not, leftSubFormula.asSharedPointer()), |
|||
rightSubFormula.asSharedPointer()); |
|||
return getGoalMemory<SparseModelType>(model, *notLeftOrRight); |
|||
|
|||
} |
|||
|
|||
template <class SparseModelType> |
|||
std::shared_ptr<SparseModelType> SparseMultiObjectiveMemoryIncorporation<SparseModelType>::incorporateGoalMemory(SparseModelType const& model, storm::logic::MultiObjectiveFormula const& formula) { |
|||
storm::storage::MemoryStructure memory = storm::storage::MemoryStructureBuilder<ValueType, RewardModelType>::buildTrivialMemoryStructure(model); |
|||
|
|||
for (auto const& subFormula : formula.getSubformulas()) { |
|||
STORM_LOG_THROW(subFormula->isOperatorFormula(), storm::exceptions::NotSupportedException, "The given Formula " << *subFormula << " is not supported."); |
|||
auto const& subsubFormula = subFormula->asOperatorFormula().getSubformula(); |
|||
if (subsubFormula.isEventuallyFormula()) { |
|||
memory = memory.product(getGoalMemory(model, subsubFormula.asEventuallyFormula().getSubformula())); |
|||
} else if (subsubFormula.isUntilFormula()) { |
|||
memory = memory.product(getUntilFormulaMemory(model, subsubFormula.asUntilFormula().getLeftSubformula(), subsubFormula.asUntilFormula().getRightSubformula())); |
|||
} else if (subsubFormula.isBoundedUntilFormula()) { |
|||
// For bounded formulas it is only reasonable to add the goal memory if it considers a single upper step/time bound.
|
|||
auto const& buf = subsubFormula.asBoundedUntilFormula(); |
|||
if (!buf.isMultiDimensional() && !buf.getTimeBoundReference().isRewardBound() && (!buf.hasLowerBound() || (!buf.isLowerBoundStrict() && storm::utility::isZero(buf.template getLowerBound<storm::RationalNumber>())))) { |
|||
memory = memory.product(getUntilFormulaMemory(model, buf.getLeftSubformula(), buf.getRightSubformula())); |
|||
} |
|||
} else if (subsubFormula.isGloballyFormula()) { |
|||
auto notPhi = std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not, subsubFormula.asGloballyFormula().getSubformula().asSharedPointer()); |
|||
memory = memory.product(getGoalMemory(model, *notPhi)); |
|||
} else { |
|||
STORM_LOG_THROW(subFormula->isOperatorFormula(), storm::exceptions::NotSupportedException, "The given Formula " << subsubFormula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
storm::storage::SparseModelMemoryProduct<ValueType> product = memory.product(model); |
|||
return std::dynamic_pointer_cast<SparseModelType>(product.build()); |
|||
} |
|||
|
|||
template <class SparseModelType> |
|||
std::shared_ptr<SparseModelType> SparseMultiObjectiveMemoryIncorporation<SparseModelType>::incorporateFullMemory(SparseModelType const& model, uint64_t memoryStates) { |
|||
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "not implemented"); |
|||
return nullptr; |
|||
} |
|||
|
|||
|
|||
template class SparseMultiObjectiveMemoryIncorporation<storm::models::sparse::Mdp<double>>; |
|||
template class SparseMultiObjectiveMemoryIncorporation<storm::models::sparse::MarkovAutomaton<double>>; |
|||
|
|||
template class SparseMultiObjectiveMemoryIncorporation<storm::models::sparse::Mdp<storm::RationalNumber>>; |
|||
template class SparseMultiObjectiveMemoryIncorporation<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>; |
|||
|
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
|
@ -0,0 +1,33 @@ |
|||
#pragma once |
|||
|
|||
#include <memory> |
|||
|
|||
#include "storm/storage/memorystructure/MemoryStructure.h" |
|||
#include "storm/logic/MultiObjectiveFormula.h" |
|||
#include "storm/logic/Formula.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
|
|||
namespace preprocessing { |
|||
|
|||
template <class SparseModelType> |
|||
class SparseMultiObjectiveMemoryIncorporation { |
|||
|
|||
typedef typename SparseModelType::ValueType ValueType; |
|||
typedef typename SparseModelType::RewardModelType RewardModelType; |
|||
|
|||
|
|||
|
|||
public: |
|||
static std::shared_ptr<SparseModelType> incorporateGoalMemory(SparseModelType const& model, storm::logic::MultiObjectiveFormula const& formula); |
|||
static std::shared_ptr<SparseModelType> incorporateFullMemory(SparseModelType const& model, uint64_t memoryStates); |
|||
|
|||
}; |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
|
@ -0,0 +1,424 @@ |
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h"
|
|||
|
|||
#include <algorithm>
|
|||
#include <set>
|
|||
|
|||
#include "storm/models/sparse/Mdp.h"
|
|||
#include "storm/models/sparse/MarkovAutomaton.h"
|
|||
#include "storm/models/sparse/StandardRewardModel.h"
|
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveMemoryIncorporation.h"
|
|||
#include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h"
|
|||
#include "storm/modelchecker/prctl/helper/BaierUpperRewardBoundsComputer.h"
|
|||
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
|
|||
#include "storm/storage/MaximalEndComponentDecomposition.h"
|
|||
#include "storm/storage/expressions/ExpressionManager.h"
|
|||
#include "storm/transformer/EndComponentEliminator.h"
|
|||
#include "storm/utility/macros.h"
|
|||
#include "storm/utility/vector.h"
|
|||
#include "storm/utility/graph.h"
|
|||
#include "storm/settings/SettingsManager.h"
|
|||
#include "storm/settings/modules/GeneralSettings.h"
|
|||
|
|||
|
|||
#include "storm/exceptions/InvalidPropertyException.h"
|
|||
#include "storm/exceptions/UnexpectedException.h"
|
|||
#include "storm/exceptions/NotImplementedException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::preprocess(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula) { |
|||
|
|||
auto model = SparseMultiObjectiveMemoryIncorporation<SparseModelType>::incorporateGoalMemory(originalModel, originalFormula); |
|||
PreprocessorData data(model); |
|||
|
|||
//Invoke preprocessing on the individual objectives
|
|||
for (auto const& subFormula : originalFormula.getSubformulas()) { |
|||
STORM_LOG_INFO("Preprocessing objective " << *subFormula<< "."); |
|||
data.objectives.push_back(std::make_shared<Objective<ValueType>>()); |
|||
data.objectives.back()->originalFormula = subFormula; |
|||
data.finiteRewardCheckObjectives.resize(data.objectives.size(), false); |
|||
data.upperResultBoundObjectives.resize(data.objectives.size(), false); |
|||
if (data.objectives.back()->originalFormula->isOperatorFormula()) { |
|||
preprocessOperatorFormula(data.objectives.back()->originalFormula->asOperatorFormula(), data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the subformula " << *subFormula << " of " << originalFormula << " because it is not supported"); |
|||
} |
|||
} |
|||
|
|||
// Remove reward models that are not needed anymore
|
|||
std::set<std::string> relevantRewardModels; |
|||
for (auto const& obj : data.objectives) { |
|||
obj->formula->gatherReferencedRewardModels(relevantRewardModels); |
|||
} |
|||
data.model->restrictRewardModels(relevantRewardModels); |
|||
|
|||
// Build the actual result
|
|||
return buildResult(originalModel, originalFormula, data); |
|||
} |
|||
|
|||
template <typename SparseModelType> |
|||
SparseMultiObjectivePreprocessor<SparseModelType>::PreprocessorData::PreprocessorData(std::shared_ptr<SparseModelType> model) : model(model) { |
|||
|
|||
// The rewardModelNamePrefix should not be a prefix of a reward model name of the given model to ensure uniqueness of new reward model names
|
|||
rewardModelNamePrefix = "obj"; |
|||
while (true) { |
|||
bool prefixIsUnique = true; |
|||
for (auto const& rewardModels : model->getRewardModels()) { |
|||
if (rewardModelNamePrefix.size() <= rewardModels.first.size()) { |
|||
if (std::mismatch(rewardModelNamePrefix.begin(), rewardModelNamePrefix.end(), rewardModels.first.begin()).first == rewardModelNamePrefix.end()) { |
|||
prefixIsUnique = false; |
|||
rewardModelNamePrefix = "_" + rewardModelNamePrefix; |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
if (prefixIsUnique) { |
|||
break; |
|||
} |
|||
} |
|||
} |
|||
|
|||
|
|||
storm::logic::OperatorInformation getOperatorInformation(storm::logic::OperatorFormula const& formula, bool considerComplementaryEvent) { |
|||
storm::logic::OperatorInformation opInfo; |
|||
if (formula.hasBound()) { |
|||
opInfo.bound = formula.getBound(); |
|||
// Invert the bound (if necessary)
|
|||
if (considerComplementaryEvent) { |
|||
opInfo.bound->threshold = opInfo.bound->threshold.getManager().rational(storm::utility::one<storm::RationalNumber>()) - opInfo.bound->threshold; |
|||
switch (opInfo.bound->comparisonType) { |
|||
case storm::logic::ComparisonType::Greater: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::Less; |
|||
break; |
|||
case storm::logic::ComparisonType::GreaterEqual: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::LessEqual; |
|||
break; |
|||
case storm::logic::ComparisonType::Less: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::Greater; |
|||
break; |
|||
case storm::logic::ComparisonType::LessEqual: |
|||
opInfo.bound->comparisonType = storm::logic::ComparisonType::GreaterEqual; |
|||
break; |
|||
default: |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Current objective " << formula << " has unexpected comparison type"); |
|||
} |
|||
} |
|||
if (storm::logic::isLowerBound(opInfo.bound->comparisonType)) { |
|||
opInfo.optimalityType = storm::solver::OptimizationDirection::Maximize; |
|||
} else { |
|||
opInfo.optimalityType = storm::solver::OptimizationDirection::Minimize; |
|||
} |
|||
STORM_LOG_WARN_COND(!formula.hasOptimalityType(), "Optimization direction of formula " << formula << " ignored as the formula also specifies a threshold."); |
|||
} else if (formula.hasOptimalityType()){ |
|||
opInfo.optimalityType = formula.getOptimalityType(); |
|||
// Invert the optimality type (if necessary)
|
|||
if (considerComplementaryEvent) { |
|||
opInfo.optimalityType = storm::solver::invert(opInfo.optimalityType.get()); |
|||
} |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Objective " << formula << " does not specify whether to minimize or maximize"); |
|||
} |
|||
return opInfo; |
|||
} |
|||
|
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data) { |
|||
|
|||
Objective<ValueType>& objective = *data.objectives.back(); |
|||
|
|||
// Check whether the complementary event is considered
|
|||
objective.considersComplementaryEvent = formula.isProbabilityOperatorFormula() && formula.getSubformula().isGloballyFormula(); |
|||
|
|||
// Extract the operator information from the formula and potentially invert it for the complementary event
|
|||
storm::logic::OperatorInformation opInfo = getOperatorInformation(formula, objective.considersComplementaryEvent); |
|||
|
|||
if (formula.isProbabilityOperatorFormula()){ |
|||
preprocessProbabilityOperatorFormula(formula.asProbabilityOperatorFormula(), opInfo, data); |
|||
} else if (formula.isRewardOperatorFormula()){ |
|||
preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data); |
|||
} else if (formula.isTimeOperatorFormula()){ |
|||
preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported"); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
// Probabilities are between zero and one
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
data.objectives.back()->upperResultBound = storm::utility::one<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isUntilFormula()){ |
|||
preprocessUntilFormula(formula.getSubformula().asUntilFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isBoundedUntilFormula()){ |
|||
preprocessBoundedUntilFormula(formula.getSubformula().asBoundedUntilFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isGloballyFormula()){ |
|||
preprocessGloballyFormula(formula.getSubformula().asGloballyFormula(), opInfo, data); |
|||
} else if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
STORM_LOG_THROW((formula.hasRewardModelName() && data.model->hasRewardModel(formula.getRewardModelName())) |
|||
|| (!formula.hasRewardModelName() && data.model->hasUniqueRewardModel()), storm::exceptions::InvalidPropertyException, "The reward model is not unique or the formula " << formula << " does not specify an existing reward model."); |
|||
|
|||
std::string rewardModelName; |
|||
if (formula.hasRewardModelName()) { |
|||
rewardModelName = formula.getRewardModelName(); |
|||
STORM_LOG_THROW(data.model->hasRewardModel(rewardModelName), storm::exceptions::InvalidPropertyException, "The reward model specified by formula " << formula << " does not exist in the model"); |
|||
} else { |
|||
STORM_LOG_THROW(data.model->hasUniqueRewardModel(), storm::exceptions::InvalidOperationException, "The formula " << formula << " does not specify a reward model name and the reward model is not unique."); |
|||
rewardModelName = data.model->getRewardModels().begin()->first; |
|||
} |
|||
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data, rewardModelName); |
|||
} else if (formula.getSubformula().isCumulativeRewardFormula()) { |
|||
preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName); |
|||
} else if (formula.getSubformula().isTotalRewardFormula()) { |
|||
preprocessTotalRewardFormula(opInfo, data, rewardModelName); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
// Time formulas are only supported for Markov automata
|
|||
STORM_LOG_THROW(data.model->isOfType(storm::models::ModelType::MarkovAutomaton), storm::exceptions::InvalidPropertyException, "Time operator formulas are only supported for Markov automata."); |
|||
|
|||
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>(); |
|||
|
|||
if (formula.getSubformula().isEventuallyFormula()){ |
|||
preprocessEventuallyFormula(formula.getSubformula().asEventuallyFormula(), opInfo, data); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported."); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula) { |
|||
|
|||
// Try to transform the formula to expected total (or cumulative) rewards
|
|||
|
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(*data.model); |
|||
storm::storage::BitVector rightSubformulaResult = mc.check(formula.getRightSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
// Check if the formula is already satisfied in the initial state because then the transformation to expected rewards will fail.
|
|||
// TODO: Handle this case more properly
|
|||
STORM_LOG_THROW((data.model->getInitialStates() & rightSubformulaResult).empty(), storm::exceptions::NotImplementedException, "The Probability for the objective " << *data.objectives.back()->originalFormula << " is always one as the rhs of the until formula is true in the initial state. This (trivial) case is currently not implemented."); |
|||
|
|||
// Whenever a state that violates the left subformula or satisfies the right subformula is reached, the objective is 'decided', i.e., no more reward should be collected from there
|
|||
storm::storage::BitVector notLeftOrRight = mc.check(formula.getLeftSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
notLeftOrRight.complement(); |
|||
notLeftOrRight |=rightSubformulaResult; |
|||
|
|||
// Get the states that are reachable from a notLeftOrRight state
|
|||
storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false); |
|||
storm::storage::BitVector reachableFromGoal = storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), notLeftOrRight, allStates, noStates); |
|||
// Get the states that are reachable from an initial state, stopping at the states reachable from goal
|
|||
storm::storage::BitVector reachableFromInit = storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), ~notLeftOrRight, reachableFromGoal); |
|||
// Exclude the actual notLeftOrRight states from the states that are reachable from init
|
|||
reachableFromInit &= ~notLeftOrRight; |
|||
// If we can reach a state that is reachable from goal, but which is not a goal state, it means that the transformation to expected rewards is not possible.
|
|||
if ((reachableFromInit & reachableFromGoal).empty()) { |
|||
STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " is transformed to an expected total/cumulative reward property."); |
|||
// Transform to expected total rewards:
|
|||
// build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from a reachableFromInit state to a goalState
|
|||
std::vector<typename SparseModelType::ValueType> objectiveRewards(data.model->getTransitionMatrix().getRowCount(), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
for (auto const& state : reachableFromInit) { |
|||
for (uint_fast64_t row = data.model->getTransitionMatrix().getRowGroupIndices()[state]; row < data.model->getTransitionMatrix().getRowGroupIndices()[state + 1]; ++row) { |
|||
objectiveRewards[row] = data.model->getTransitionMatrix().getConstrainedRowSum(row, rightSubformulaResult); |
|||
} |
|||
} |
|||
std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); |
|||
data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(boost::none, std::move(objectiveRewards))); |
|||
if (subformula == nullptr) { |
|||
subformula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
} |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(subformula, rewardModelName, opInfo); |
|||
} else { |
|||
STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property."); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
|
|||
// Check how to handle this query
|
|||
if (formula.isMultiDimensional() || formula.getTimeBoundReference().isRewardBound()) { |
|||
STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " is not transformed to an expected cumulative reward property."); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::ProbabilityOperatorFormula>(formula.asSharedPointer(), opInfo); |
|||
} else if (!formula.hasLowerBound() || (!formula.isLowerBoundStrict() && storm::utility::isZero(formula.template getLowerBound<storm::RationalNumber>()))) { |
|||
std::shared_ptr<storm::logic::Formula const> subformula; |
|||
if (!formula.hasUpperBound()) { |
|||
// The formula is actually unbounded
|
|||
subformula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
} else { |
|||
STORM_LOG_THROW(!data.model->isOfType(storm::models::ModelType::MarkovAutomaton) || formula.getTimeBoundReference().isTimeBound(), storm::exceptions::InvalidPropertyException, "Bounded until formulas for Markov Automata are only allowed when time bounds are considered."); |
|||
storm::logic::TimeBound bound(formula.isUpperBoundStrict(), formula.getUpperBound()); |
|||
subformula = std::make_shared<storm::logic::CumulativeRewardFormula>(bound, formula.getTimeBoundReference()); |
|||
} |
|||
preprocessUntilFormula(storm::logic::UntilFormula(formula.getLeftSubformula().asSharedPointer(), formula.getRightSubformula().asSharedPointer()), opInfo, data, subformula); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Property " << formula << "is not supported"); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) { |
|||
// The formula is transformed to an until formula for the complementary event.
|
|||
auto negatedSubformula = std::make_shared<storm::logic::UnaryBooleanStateFormula>(storm::logic::UnaryBooleanStateFormula::OperatorType::Not, formula.getSubformula().asSharedPointer()); |
|||
|
|||
preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), negatedSubformula), opInfo, data); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
if (formula.isReachabilityProbabilityFormula()){ |
|||
preprocessUntilFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), opInfo, data); |
|||
return; |
|||
} |
|||
|
|||
// Analyze the subformula
|
|||
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(*data.model); |
|||
storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|||
|
|||
|
|||
// Get the states that are reachable from a goal state
|
|||
storm::storage::BitVector allStates(data.model->getNumberOfStates(), true), noStates(data.model->getNumberOfStates(), false); |
|||
storm::storage::BitVector reachableFromGoal = storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), subFormulaResult, allStates, noStates); |
|||
// Get the states that are reachable from an initial state, stopping at the states reachable from goal
|
|||
storm::storage::BitVector reachableFromInit = storm::utility::graph::getReachableStates(data.model->getTransitionMatrix(), data.model->getInitialStates(), allStates, reachableFromGoal); |
|||
// Exclude the actual goal states from the states that are reachable from an initial state
|
|||
reachableFromInit &= ~subFormulaResult; |
|||
// If we can reach a state that is reachable from goal but which is not a goal state, it means that the transformation to expected total rewards is not possible.
|
|||
if ((reachableFromInit & reachableFromGoal).empty()) { |
|||
STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " is transformed to an expected total reward property."); |
|||
// Transform to expected total rewards:
|
|||
|
|||
std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size()); |
|||
auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, rewardModelName, opInfo); |
|||
|
|||
if (formula.isReachabilityRewardFormula()) { |
|||
// build stateAction reward vector that only gives reward for states that are reachable from init
|
|||
assert(optionalRewardModelName.is_initialized()); |
|||
typename SparseModelType::RewardModelType objectiveRewards = data.model->getRewardModel(optionalRewardModelName.get()); |
|||
objectiveRewards.reduceToStateBasedRewards(data.model->getTransitionMatrix(), false); |
|||
if (objectiveRewards.hasStateRewards()) { |
|||
storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), reachableFromGoal, storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
} |
|||
if (objectiveRewards.hasStateActionRewards()) { |
|||
for (auto state : reachableFromGoal) { |
|||
std::fill_n(objectiveRewards.getStateActionRewardVector().begin() + data.model->getTransitionMatrix().getRowGroupIndices()[state], data.model->getTransitionMatrix().getRowGroupSize(state), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
} |
|||
} |
|||
data.model->addRewardModel(rewardModelName, std::move(objectiveRewards)); |
|||
} else if (formula.isReachabilityTimeFormula() && data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) { |
|||
|
|||
// build stateAction reward vector that only gives reward for relevant states
|
|||
std::vector<typename SparseModelType::ValueType> timeRewards(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>()); |
|||
storm::utility::vector::setVectorValues(timeRewards, dynamic_cast<storm::models::sparse::MarkovAutomaton<typename SparseModelType::ValueType> const&>(*data.model).getMarkovianStates() & reachableFromInit, storm::utility::one<typename SparseModelType::ValueType>()); |
|||
data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(timeRewards))); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability probabilities nor reachability rewards " << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "") << ". This is not supported."); |
|||
} |
|||
} else { |
|||
STORM_LOG_INFO("Objective " << data.objectives.back()->originalFormula << " can not be transformed to an expected total/cumulative reward property."); |
|||
if (formula.isReachabilityRewardFormula()) { |
|||
assert(optionalRewardModelName.is_initialized()); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(formula.asSharedPointer(), optionalRewardModelName.get(), opInfo); |
|||
} else if (formula.isReachabilityTimeFormula() && data.model->isOfType(storm::models::ModelType::MarkovAutomaton)) { |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::TimeOperatorFormula>(formula.asSharedPointer(), opInfo); |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability probabilities nor reachability rewards " << (data.model->isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "") << ". This is not supported."); |
|||
} |
|||
} |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
STORM_LOG_THROW(data.model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::InvalidPropertyException, "Cumulative reward formulas are not supported for the given model type."); |
|||
|
|||
auto cumulativeRewardFormula = std::make_shared<storm::logic::CumulativeRewardFormula>(formula); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(cumulativeRewardFormula, *optionalRewardModelName, opInfo); |
|||
bool onlyRewardBounds = true; |
|||
for (uint64_t i = 0; i < cumulativeRewardFormula->getDimension(); ++i) { |
|||
if (!cumulativeRewardFormula->getTimeBoundReference(i).isRewardBound()) { |
|||
onlyRewardBounds = false; |
|||
break; |
|||
} |
|||
} |
|||
if (onlyRewardBounds) { |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) { |
|||
|
|||
auto totalRewardFormula = std::make_shared<storm::logic::TotalRewardFormula>(); |
|||
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(totalRewardFormula, *optionalRewardModelName, opInfo); |
|||
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data) { |
|||
ReturnType result(originalFormula, originalModel); |
|||
auto backwardTransitions = data.model->getBackwardTransitions(); |
|||
result.preprocessedModel = data.model; |
|||
|
|||
for (auto& obj : data.objectives) { |
|||
result.objectives.push_back(std::move(*obj)); |
|||
} |
|||
result.queryType = getQueryType(result.objectives); |
|||
result.maybeInfiniteRewardObjectives = std::move(data.finiteRewardCheckObjectives); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType::QueryType SparseMultiObjectivePreprocessor<SparseModelType>::getQueryType(std::vector<Objective<ValueType>> const& objectives) { |
|||
uint_fast64_t numOfObjectivesWithThreshold = 0; |
|||
for (auto& obj : objectives) { |
|||
if (obj.formula->hasBound()) { |
|||
++numOfObjectivesWithThreshold; |
|||
} |
|||
} |
|||
if (numOfObjectivesWithThreshold == objectives.size()) { |
|||
return ReturnType::QueryType::Achievability; |
|||
} else if (numOfObjectivesWithThreshold + 1 == objectives.size()) { |
|||
// Note: We do not want to consider a Pareto query when the total number of objectives is one.
|
|||
return ReturnType::QueryType::Quantitative; |
|||
} else if (numOfObjectivesWithThreshold == 0) { |
|||
return ReturnType::QueryType::Pareto; |
|||
} else { |
|||
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Invalid Multi-objective query: The numer of qualitative objectives should be either 0 (Pareto query), 1 (quantitative query), or #objectives (achievability query)."); |
|||
} |
|||
} |
|||
|
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<double>>; |
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<double>>; |
|||
|
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::Mdp<storm::RationalNumber>>; |
|||
template class SparseMultiObjectivePreprocessor<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>; |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,105 @@ |
|||
#pragma once |
|||
|
|||
#include <memory> |
|||
#include <string> |
|||
|
|||
#include "storm/logic/Formulas.h" |
|||
#include "storm/storage/BitVector.h" |
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h" |
|||
#include "storm/storage/memorystructure/MemoryStructure.h" |
|||
|
|||
namespace storm { |
|||
|
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
/* |
|||
* This class invokes the necessary preprocessing for the constraint based multi-objective model checking algorithm |
|||
*/ |
|||
template <class SparseModelType> |
|||
class SparseMultiObjectivePreprocessor { |
|||
public: |
|||
typedef typename SparseModelType::ValueType ValueType; |
|||
typedef typename SparseModelType::RewardModelType RewardModelType; |
|||
typedef SparseMultiObjectivePreprocessorResult<SparseModelType> ReturnType; |
|||
|
|||
/*! |
|||
* Preprocesses the given model w.r.t. the given formulas |
|||
* @param originalModel The considered model |
|||
* @param originalFormula the considered formula. The subformulas should only contain one OperatorFormula at top level. |
|||
*/ |
|||
static ReturnType preprocess(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula); |
|||
|
|||
private: |
|||
|
|||
struct PreprocessorData { |
|||
std::shared_ptr<SparseModelType> model; |
|||
std::vector<std::shared_ptr<Objective<ValueType>>> objectives; |
|||
|
|||
// Indices of the objectives that require a check for finite reward |
|||
storm::storage::BitVector finiteRewardCheckObjectives; |
|||
|
|||
// Indices of the objectives for which we need to compute an upper bound for the result |
|||
storm::storage::BitVector upperResultBoundObjectives; |
|||
|
|||
std::string rewardModelNamePrefix; |
|||
|
|||
PreprocessorData(std::shared_ptr<SparseModelType> model); |
|||
}; |
|||
|
|||
/*! |
|||
* Apply the neccessary preprocessing for the given formula. |
|||
* @param formula the current (sub)formula |
|||
* @param opInfo the information of the resulting operator formula |
|||
* @param data the current data. The currently processed objective is located at data.objectives.back() |
|||
* @param optionalRewardModelName the reward model name that is considered for the formula (if available) |
|||
*/ |
|||
static void preprocessOperatorFormula(storm::logic::OperatorFormula const& formula, PreprocessorData& data); |
|||
static void preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula = nullptr); |
|||
static void preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data); |
|||
static void preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); |
|||
static void preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); |
|||
static void preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); // The total reward formula itself does not need to be provided as it is unique. |
|||
|
|||
|
|||
/*! |
|||
* Builds the result from preprocessing |
|||
*/ |
|||
static ReturnType buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data); |
|||
|
|||
/*! |
|||
* Returns the query type |
|||
*/ |
|||
static typename ReturnType::QueryType getQueryType(std::vector<Objective<ValueType>> const& objectives); |
|||
|
|||
|
|||
/*! |
|||
* Computes the set of states that have zero expected reward w.r.t. all expected reward objectives |
|||
*/ |
|||
static void setReward0States(ReturnType& result, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
/*! |
|||
* Checks whether the occurring expected rewards are finite and sets the RewardFinitenessType accordingly |
|||
* Returns the set of states for which a scheduler exists that induces finite reward for all objectives |
|||
*/ |
|||
static void checkRewardFiniteness(ReturnType& result, storm::storage::BitVector const& finiteRewardCheckObjectives, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
/*! |
|||
* Finds an upper bound for the expected reward of the objective with the given index (assuming it considers an expected reward objective) |
|||
*/ |
|||
static boost::optional<ValueType> computeUpperResultBound(ReturnType const& result, uint64_t objIndex, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
}; |
|||
|
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
@ -0,0 +1,95 @@ |
|||
#pragma once |
|||
|
|||
#include <vector> |
|||
#include <memory> |
|||
#include <boost/optional.hpp> |
|||
|
|||
#include "storm/logic/Formulas.h" |
|||
#include "storm/modelchecker/multiobjective/Objective.h" |
|||
#include "storm/storage/BitVector.h" |
|||
|
|||
#include "storm/exceptions/UnexpectedException.h" |
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
template <class SparseModelType> |
|||
struct SparseMultiObjectivePreprocessorResult { |
|||
|
|||
enum class QueryType { Achievability, Quantitative, Pareto }; |
|||
|
|||
// Original data |
|||
storm::logic::MultiObjectiveFormula const& originalFormula; |
|||
SparseModelType const& originalModel; |
|||
|
|||
// The preprocessed model and objectives |
|||
std::shared_ptr<SparseModelType> preprocessedModel; |
|||
std::vector<Objective<typename SparseModelType::ValueType>> objectives; |
|||
|
|||
// Data about the query |
|||
QueryType queryType; |
|||
|
|||
// Indices of the objectives that can potentially yield infinite reward |
|||
storm::storage::BitVector maybeInfiniteRewardObjectives; |
|||
|
|||
SparseMultiObjectivePreprocessorResult(storm::logic::MultiObjectiveFormula const& originalFormula, SparseModelType const& originalModel) : originalFormula(originalFormula), originalModel(originalModel) { |
|||
// Intentionally left empty |
|||
} |
|||
|
|||
bool containsOnlyTrivialObjectives() const { |
|||
// Trivial objectives are either total reward formulas or single-dimensional step or time bounded cumulative reward formulas |
|||
for (auto const& obj : objectives) { |
|||
if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isTotalRewardFormula()) { |
|||
continue; |
|||
} |
|||
if (obj.formula->isRewardOperatorFormula() && obj.formula->getSubformula().isCumulativeRewardFormula()) { |
|||
auto const& subf = obj.formula->getSubformula().asCumulativeRewardFormula(); |
|||
if (!subf.isMultiDimensional() && (subf.getTimeBoundReference().isTimeBound() || subf.getTimeBoundReference().isStepBound())) { |
|||
continue; |
|||
} |
|||
} |
|||
// Reaching this point means that the objective is considered as non-trivial |
|||
return false; |
|||
} |
|||
return true; |
|||
} |
|||
|
|||
void printToStream(std::ostream& out) const { |
|||
out << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
out << " Multi-objective Query " << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
out << std::endl; |
|||
out << "Original Formula: " << std::endl; |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
out << "\t" << originalFormula << std::endl; |
|||
out << std::endl; |
|||
out << "Objectives:" << std::endl; |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
for (auto const& obj : objectives) { |
|||
obj.printToStream(out); |
|||
} |
|||
out << "--------------------------------------------------------------" << std::endl; |
|||
out << std::endl; |
|||
out << "Original Model Information:" << std::endl; |
|||
originalModel.printModelInformationToStream(out); |
|||
out << std::endl; |
|||
out << "Preprocessed Model Information:" << std::endl; |
|||
preprocessedModel->printModelInformationToStream(out); |
|||
out << std::endl; |
|||
out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl; |
|||
} |
|||
|
|||
friend std::ostream& operator<<(std::ostream& out, SparseMultiObjectivePreprocessorResult<SparseModelType> const& ret) { |
|||
ret.printToStream(out); |
|||
return out; |
|||
} |
|||
|
|||
}; |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
@ -0,0 +1,230 @@ |
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveRewardAnalysis.h"
|
|||
|
|||
#include <algorithm>
|
|||
#include <set>
|
|||
#include <storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h>
|
|||
|
|||
#include "storm/models/sparse/Mdp.h"
|
|||
#include "storm/models/sparse/MarkovAutomaton.h"
|
|||
#include "storm/models/sparse/StandardRewardModel.h"
|
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectiveMemoryIncorporation.h"
|
|||
#include "storm/modelchecker/propositional/SparsePropositionalModelChecker.h"
|
|||
#include "storm/modelchecker/prctl/helper/BaierUpperRewardBoundsComputer.h"
|
|||
#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
|
|||
#include "storm/storage/MaximalEndComponentDecomposition.h"
|
|||
#include "storm/storage/expressions/ExpressionManager.h"
|
|||
#include "storm/transformer/EndComponentEliminator.h"
|
|||
#include "storm/utility/macros.h"
|
|||
#include "storm/utility/vector.h"
|
|||
#include "storm/utility/graph.h"
|
|||
#include "storm/settings/SettingsManager.h"
|
|||
#include "storm/settings/modules/GeneralSettings.h"
|
|||
|
|||
|
|||
#include "storm/exceptions/InvalidPropertyException.h"
|
|||
#include "storm/exceptions/UnexpectedException.h"
|
|||
#include "storm/exceptions/NotImplementedException.h"
|
|||
|
|||
namespace storm { |
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
|
|||
template<typename SparseModelType> |
|||
typename SparseMultiObjectiveRewardAnalysis<SparseModelType>::ReturnType SparseMultiObjectiveRewardAnalysis<SparseModelType>::analyze(storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult) { |
|||
ReturnType result; |
|||
auto backwardTransitions = preprocessorResult.preprocessedModel->getBackwardTransitions(); |
|||
|
|||
setReward0States(result, preprocessorResult, backwardTransitions); |
|||
checkRewardFiniteness(result, preprocessorResult, backwardTransitions); |
|||
|
|||
return result; |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectiveRewardAnalysis<SparseModelType>::setReward0States(ReturnType& result, storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
|
|||
uint_fast64_t stateCount = preprocessorResult.preprocessedModel->getNumberOfStates(); |
|||
auto const& transitions = preprocessorResult.preprocessedModel->getTransitionMatrix(); |
|||
std::vector<uint_fast64_t> const& groupIndices = transitions.getRowGroupIndices(); |
|||
storm::storage::BitVector allStates(stateCount, true); |
|||
|
|||
// Get the choices that yield non-zero reward
|
|||
storm::storage::BitVector zeroRewardChoices(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); |
|||
for (auto const& obj : preprocessorResult.objectives) { |
|||
if (obj.formula->isRewardOperatorFormula()) { |
|||
STORM_LOG_WARN_COND(obj.formula->getSubformula().isTotalRewardFormula() || obj.formula->getSubformula().isCumulativeRewardFormula(), "Analyzing reachability reward formulas is not supported properly."); |
|||
auto const& rewModel = preprocessorResult.preprocessedModel->getRewardModel(obj.formula->asRewardOperatorFormula().getRewardModelName()); |
|||
zeroRewardChoices &= rewModel.getChoicesWithZeroReward(transitions); |
|||
} |
|||
} |
|||
|
|||
// Get the states that have reward for at least one (or for all) choices assigned to it.
|
|||
storm::storage::BitVector statesWithRewardForOneChoice = storm::storage::BitVector(stateCount, false); |
|||
storm::storage::BitVector statesWithRewardForAllChoices = storm::storage::BitVector(stateCount, true); |
|||
for (uint_fast64_t state = 0; state < stateCount; ++state) { |
|||
bool stateHasChoiceWithReward = false; |
|||
bool stateHasChoiceWithoutReward = false; |
|||
uint_fast64_t const& groupEnd = groupIndices[state + 1]; |
|||
for (uint_fast64_t choice = groupIndices[state]; choice < groupEnd; ++choice) { |
|||
if (zeroRewardChoices.get(choice)) { |
|||
stateHasChoiceWithoutReward = true; |
|||
} else { |
|||
stateHasChoiceWithReward = true; |
|||
} |
|||
} |
|||
if (stateHasChoiceWithReward) { |
|||
statesWithRewardForOneChoice.set(state, true); |
|||
} |
|||
if (stateHasChoiceWithoutReward) { |
|||
statesWithRewardForAllChoices.set(state, false); |
|||
} |
|||
} |
|||
|
|||
// get the states for which there is a scheduler yielding reward zero
|
|||
result.reward0EStates = storm::utility::graph::performProbGreater0A(transitions, groupIndices, backwardTransitions, allStates, statesWithRewardForAllChoices, false, 0, zeroRewardChoices); |
|||
result.reward0EStates.complement(); |
|||
result.reward0AStates = storm::utility::graph::performProb0A(backwardTransitions, allStates, statesWithRewardForOneChoice); |
|||
assert(result.reward0AStates.isSubsetOf(result.reward0EStates)); |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectiveRewardAnalysis<SparseModelType>::checkRewardFiniteness(ReturnType& result, storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
|
|||
result.rewardFinitenessType = RewardFinitenessType::AllFinite; |
|||
|
|||
auto const& transitions = preprocessorResult.preprocessedModel->getTransitionMatrix(); |
|||
std::vector<uint_fast64_t> const& groupIndices = transitions.getRowGroupIndices(); |
|||
|
|||
storm::storage::BitVector maxRewardsToCheck(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); |
|||
storm::storage::BitVector minRewardsToCheck(preprocessorResult.preprocessedModel->getNumberOfChoices(), true); |
|||
for (auto const& objIndex : preprocessorResult.maybeInfiniteRewardObjectives) { |
|||
STORM_LOG_ASSERT(preprocessorResult.objectives[objIndex].formula->isRewardOperatorFormula(), "Objective needs to be checked for finite reward but has no reward operator."); |
|||
auto const& rewModel = preprocessorResult.preprocessedModel->getRewardModel(preprocessorResult.objectives[objIndex].formula->asRewardOperatorFormula().getRewardModelName()); |
|||
auto unrelevantChoices = rewModel.getChoicesWithZeroReward(transitions); |
|||
// For (upper) reward bounded cumulative reward formulas, we do not need to consider the choices where boundReward is collected.
|
|||
if (preprocessorResult.objectives[objIndex].formula->getSubformula().isCumulativeRewardFormula()) { |
|||
auto const& timeBoundReference = preprocessorResult.objectives[objIndex].formula->getSubformula().asCumulativeRewardFormula().getTimeBoundReference(); |
|||
// Only reward bounded formulas need a finiteness check
|
|||
assert(timeBoundReference.isRewardBound()); |
|||
auto const& rewModelOfBound = preprocessorResult.preprocessedModel->getRewardModel(timeBoundReference.getRewardName()); |
|||
unrelevantChoices |= ~rewModelOfBound.getChoicesWithZeroReward(transitions); |
|||
} |
|||
if (storm::solver::minimize(preprocessorResult.objectives[objIndex].formula->getOptimalityType())) { |
|||
minRewardsToCheck &= unrelevantChoices; |
|||
} else { |
|||
maxRewardsToCheck &= unrelevantChoices; |
|||
} |
|||
} |
|||
maxRewardsToCheck.complement(); |
|||
minRewardsToCheck.complement(); |
|||
|
|||
// Check reward finiteness under all schedulers
|
|||
storm::storage::BitVector allStates(preprocessorResult.preprocessedModel->getNumberOfStates(), true); |
|||
if (storm::utility::graph::checkIfECWithChoiceExists(transitions, backwardTransitions, allStates, maxRewardsToCheck | minRewardsToCheck)) { |
|||
// Check whether there is a scheduler yielding infinite reward for a maximizing objective
|
|||
if (storm::utility::graph::checkIfECWithChoiceExists(transitions, backwardTransitions, allStates, maxRewardsToCheck)) { |
|||
result.rewardFinitenessType = RewardFinitenessType::Infinite; |
|||
} else { |
|||
// Check whether there is a scheduler under which all rewards are finite.
|
|||
result.rewardLessInfinityEStates = storm::utility::graph::performProb1E(transitions, groupIndices, backwardTransitions, allStates, result.reward0EStates); |
|||
if ((result.rewardLessInfinityEStates.get() & preprocessorResult.preprocessedModel->getInitialStates()).empty()) { |
|||
// There is no scheduler that induces finite reward for the initial state
|
|||
result.rewardFinitenessType = RewardFinitenessType::Infinite; |
|||
} else { |
|||
result.rewardFinitenessType = RewardFinitenessType::ExistsParetoFinite; |
|||
} |
|||
} |
|||
} else { |
|||
result.rewardLessInfinityEStates = allStates; |
|||
} |
|||
} |
|||
|
|||
template<typename SparseModelType> |
|||
void SparseMultiObjectiveRewardAnalysis<SparseModelType>::computeUpperResultBound(SparseModelType const& model, storm::modelchecker::multiobjective::Objective<ValueType>& objective, storm::storage::SparseMatrix<ValueType> const& backwardTransitions) { |
|||
STORM_LOG_INFO_COND(!objective.upperResultBound.is_initialized(), "Tried to find an upper result bound for an objective, but a result bound is already there."); |
|||
|
|||
if (model.isOfType(storm::models::ModelType::Mdp)) { |
|||
|
|||
auto const& transitions = model.getTransitionMatrix(); |
|||
|
|||
if (objective.formula->isRewardOperatorFormula()) { |
|||
auto const& rewModel = model.getRewardModel(objective.formula->asRewardOperatorFormula().getRewardModelName()); |
|||
auto actionRewards = rewModel.getTotalRewardVector(transitions); |
|||
|
|||
if (objective.formula->getSubformula().isTotalRewardFormula() || objective.formula->getSubformula().isCumulativeRewardFormula()) { |
|||
// We have to eliminate ECs here to treat zero-reward ECs
|
|||
|
|||
storm::storage::BitVector allStates(model.getNumberOfStates(), true); |
|||
|
|||
// Get the set of states from which no reward is reachable
|
|||
auto nonZeroRewardStates = rewModel.getStatesWithZeroReward(transitions); |
|||
nonZeroRewardStates.complement(); |
|||
auto expRewGreater0EStates = storm::utility::graph::performProbGreater0E(backwardTransitions, allStates, nonZeroRewardStates); |
|||
|
|||
auto zeroRewardChoices = rewModel.getChoicesWithZeroReward(transitions); |
|||
|
|||
auto ecElimRes = storm::transformer::EndComponentEliminator<ValueType>::transform(transitions, expRewGreater0EStates, zeroRewardChoices, ~allStates); |
|||
|
|||
allStates.resize(ecElimRes.matrix.getRowGroupCount()); |
|||
storm::storage::BitVector outStates(allStates.size(), false); |
|||
std::vector<ValueType> rew0StateProbs; |
|||
rew0StateProbs.reserve(ecElimRes.matrix.getRowCount()); |
|||
for (uint64_t state = 0; state < allStates.size(); ++ state) { |
|||
for (uint64_t choice = ecElimRes.matrix.getRowGroupIndices()[state]; choice < ecElimRes.matrix.getRowGroupIndices()[state + 1]; ++choice) { |
|||
// Check whether the choice lead to a state with expRew 0 in the original model
|
|||
bool isOutChoice = false; |
|||
uint64_t originalModelChoice = ecElimRes.newToOldRowMapping[choice]; |
|||
for (auto const& entry : transitions.getRow(originalModelChoice)) { |
|||
if (!expRewGreater0EStates.get(entry.getColumn())) { |
|||
isOutChoice = true; |
|||
outStates.set(state, true); |
|||
rew0StateProbs.push_back(storm::utility::one<ValueType>() - ecElimRes.matrix.getRowSum(choice)); |
|||
assert (!storm::utility::isZero(rew0StateProbs.back())); |
|||
break; |
|||
} |
|||
} |
|||
if (!isOutChoice) { |
|||
rew0StateProbs.push_back(storm::utility::zero<ValueType>()); |
|||
} |
|||
} |
|||
} |
|||
|
|||
// An upper reward bound can only be computed if it is below infinity
|
|||
if (storm::utility::graph::performProb1A(ecElimRes.matrix, ecElimRes.matrix.getRowGroupIndices(), ecElimRes.matrix.transpose(true), allStates, outStates).full()) { |
|||
|
|||
std::vector<ValueType> rewards; |
|||
rewards.reserve(ecElimRes.matrix.getRowCount()); |
|||
for (auto row : ecElimRes.newToOldRowMapping) { |
|||
rewards.push_back(actionRewards[row]); |
|||
} |
|||
|
|||
storm::modelchecker::helper::BaierUpperRewardBoundsComputer<ValueType> baier(ecElimRes.matrix, rewards, rew0StateProbs); |
|||
if (objective.upperResultBound) { |
|||
objective.upperResultBound = std::min(objective.upperResultBound.get(), baier.computeUpperBound()); |
|||
} else { |
|||
objective.upperResultBound = baier.computeUpperBound(); |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
if (objective.upperResultBound) { |
|||
STORM_LOG_INFO("Computed upper result bound " << objective.upperResultBound.get() << " for objective " << *objective.formula << "."); |
|||
} else { |
|||
STORM_LOG_WARN("Could not compute upper result bound for objective " << *objective.formula); |
|||
} |
|||
} |
|||
} |
|||
|
|||
|
|||
template class SparseMultiObjectiveRewardAnalysis<storm::models::sparse::Mdp<double>>; |
|||
template class SparseMultiObjectiveRewardAnalysis<storm::models::sparse::MarkovAutomaton<double>>; |
|||
|
|||
template class SparseMultiObjectiveRewardAnalysis<storm::models::sparse::Mdp<storm::RationalNumber>>; |
|||
template class SparseMultiObjectiveRewardAnalysis<storm::models::sparse::MarkovAutomaton<storm::RationalNumber>>; |
|||
} |
|||
} |
|||
} |
|||
} |
@ -0,0 +1,73 @@ |
|||
#pragma once |
|||
|
|||
#include <memory> |
|||
#include <string> |
|||
|
|||
#include "storm/logic/Formulas.h" |
|||
#include "storm/storage/BitVector.h" |
|||
#include "storm/storage/SparseMatrix.h" |
|||
#include "storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessorResult.h" |
|||
|
|||
namespace storm { |
|||
|
|||
namespace modelchecker { |
|||
namespace multiobjective { |
|||
namespace preprocessing { |
|||
|
|||
|
|||
enum class RewardFinitenessType { |
|||
AllFinite, // The expected reward is finite for all objectives and all schedulers |
|||
ExistsParetoFinite, // There is a Pareto optimal scheduler yielding finite rewards for all objectives |
|||
Infinite // All Pareto optimal schedulers yield infinite reward for at least one objective |
|||
}; |
|||
|
|||
/* |
|||
* This class performs some analysis task regarding the occurring expected reward objectives |
|||
*/ |
|||
template <class SparseModelType> |
|||
class SparseMultiObjectiveRewardAnalysis { |
|||
public: |
|||
typedef typename SparseModelType::ValueType ValueType; |
|||
typedef typename SparseModelType::RewardModelType RewardModelType; |
|||
|
|||
struct ReturnType { |
|||
RewardFinitenessType rewardFinitenessType; |
|||
|
|||
// The states of the preprocessed model for which... |
|||
storm::storage::BitVector reward0EStates; // ... there is a scheduler such that all expected reward objectives have value zero |
|||
storm::storage::BitVector reward0AStates; // ... all schedulers induce value 0 for all expected reward objectives |
|||
boost::optional<storm::storage::BitVector> rewardLessInfinityEStates; // ... there is a scheduler yielding finite reward for all expected reward objectives |
|||
}; |
|||
|
|||
/*! |
|||
* Analyzes the reward objectives of the multi objective query |
|||
* @param preprocessorResult The result from preprocessing. Must only contain expected total reward or cumulative reward objectives. |
|||
*/ |
|||
static ReturnType analyze(storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult); |
|||
|
|||
/*! |
|||
* Tries to finds an upper bound for the expected reward of the objective (assuming it considers an expected total reward objective) |
|||
*/ |
|||
static void computeUpperResultBound(SparseModelType const& model, storm::modelchecker::multiobjective::Objective<ValueType>& objective, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
private: |
|||
/*! |
|||
* Computes the set of states that have zero expected reward w.r.t. all expected total/cumulative reward objectives |
|||
*/ |
|||
static void setReward0States(ReturnType& result, storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
/*! |
|||
* Checks whether the occurring expected rewards are finite and sets the RewardFinitenessType accordingly |
|||
* Returns the set of states for which a scheduler exists that induces finite reward for all objectives |
|||
*/ |
|||
static void checkRewardFiniteness(ReturnType& result, storm::modelchecker::multiobjective::preprocessing::SparseMultiObjectivePreprocessorResult<SparseModelType> const& preprocessorResult, storm::storage::SparseMatrix<ValueType> const& backwardTransitions); |
|||
|
|||
|
|||
}; |
|||
|
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
Write
Preview
Loading…
Cancel
Save
Reference in new issue