Browse Source

Added preprocessing of multi-objective lra formulas

tempestpy_adaptions
TimQu 4 years ago
parent
commit
fa0ad07f8d
  1. 44
      src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp
  2. 3
      src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h

44
src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.cpp

@ -169,6 +169,12 @@ namespace storm {
auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asTotalRewardFormula());
storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
absorbingStatesForSubformula = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
} else if (pathFormula.isLongRunAverageRewardFormula()) {
auto rewardModel = storm::utility::createFilteredRewardModel(baseRewardModel, model->isDiscreteTimeModel(), pathFormula.asLongRunAverageRewardFormula());
storm::storage::BitVector statesWithoutReward = rewardModel.get().getStatesWithZeroReward(model->getTransitionMatrix());
// Compute Sat(Forall F (Forall G "statesWithoutReward"))
auto forallGloballyStatesWithoutReward = storm::utility::graph::performProb0A(backwardTransitions, statesWithoutReward, ~statesWithoutReward);
absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions, storm::storage::BitVector(model->getNumberOfStates(), true), forallGloballyStatesWithoutReward);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
}
@ -179,6 +185,11 @@ namespace storm {
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << pathFormula << " is not supported.");
}
} else if (opFormula->isLongRunAverageRewardFormula()) {
auto lraStates = mc.check(pathFormula)->asExplicitQualitativeCheckResult().getTruthValuesVector();
// Compute Sat(Forall F (Forall G not "lraStates"))
auto forallGloballyNotLraStates = storm::utility::graph::performProb0A(backwardTransitions, ~lraStates, lraStates);
absorbingStatesForSubformula = storm::utility::graph::performProb1A(model->getTransitionMatrix(), model->getNondeterministicChoiceIndices(), backwardTransitions, storm::storage::BitVector(model->getNumberOfStates(), true), forallGloballyNotLraStates);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the subformula " << *opFormula << " of " << originalFormula << " because it is not supported");
}
@ -288,6 +299,8 @@ namespace storm {
preprocessRewardOperatorFormula(formula.asRewardOperatorFormula(), opInfo, data);
} else if (formula.isTimeOperatorFormula()){
preprocessTimeOperatorFormula(formula.asTimeOperatorFormula(), opInfo, data);
} else if (formula.isLongRunAverageOperatorFormula()) {
preprocessLongRunAverageOperatorFormula(formula.asLongRunAverageOperatorFormula(), opInfo, data);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the objective " << formula << " because it is not supported");
}
@ -336,6 +349,8 @@ namespace storm {
preprocessCumulativeRewardFormula(formula.getSubformula().asCumulativeRewardFormula(), opInfo, data, rewardModelName);
} else if (formula.getSubformula().isTotalRewardFormula()) {
preprocessTotalRewardFormula(opInfo, data, rewardModelName);
} else if (formula.getSubformula().isLongRunAverageRewardFormula()) {
preprocessLongRunAverageRewardFormula(opInfo, data, rewardModelName);
} else {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
}
@ -354,6 +369,28 @@ namespace storm {
STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
}
}
template<typename SparseModelType>
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data) {
data.objectives.back()->lowerResultBound = storm::utility::zero<ValueType>();
data.objectives.back()->upperResultBound = storm::utility::one<ValueType>();
// Convert to a long run average reward formula
// Create and add the new formula
std::string rewardModelName = data.rewardModelNamePrefix + std::to_string(data.objectives.size());
auto lraRewardFormula = std::make_shared<storm::logic::LongRunAverageRewardFormula>();
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(lraRewardFormula, rewardModelName, opInfo);
// Create and add the new reward model that only gives one reward for goal states
storm::modelchecker::SparsePropositionalModelChecker<SparseModelType> mc(*data.model);
storm::storage::BitVector subFormulaResult = mc.check(formula.getSubformula())->asExplicitQualitativeCheckResult().getTruthValuesVector();
std::vector<typename SparseModelType::ValueType> lraRewards(data.model->getNumberOfStates(), storm::utility::zero<typename SparseModelType::ValueType>());
storm::utility::vector::setVectorValues(lraRewards, subFormulaResult, storm::utility::one<typename SparseModelType::ValueType>());
data.model->addRewardModel(rewardModelName, typename SparseModelType::RewardModelType(std::move(lraRewards)));
}
template<typename SparseModelType>
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula) {
@ -548,6 +585,13 @@ namespace storm {
data.finiteRewardCheckObjectives.set(data.objectives.size() - 1, true);
}
template<typename SparseModelType>
void SparseMultiObjectivePreprocessor<SparseModelType>::preprocessLongRunAverageRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName) {
auto longRunAverageRewardFormula = std::make_shared<storm::logic::LongRunAverageRewardFormula>();
data.objectives.back()->formula = std::make_shared<storm::logic::RewardOperatorFormula>(longRunAverageRewardFormula, *optionalRewardModelName, opInfo);
}
template<typename SparseModelType>
typename SparseMultiObjectivePreprocessor<SparseModelType>::ReturnType SparseMultiObjectivePreprocessor<SparseModelType>::buildResult(SparseModelType const& originalModel, storm::logic::MultiObjectiveFormula const& originalFormula, PreprocessorData& data) {
ReturnType result(originalFormula, originalModel);

3
src/storm/modelchecker/multiobjective/preprocessing/SparseMultiObjectivePreprocessor.h

@ -69,13 +69,14 @@ namespace storm {
static void preprocessProbabilityOperatorFormula(storm::logic::ProbabilityOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessRewardOperatorFormula(storm::logic::RewardOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessTimeOperatorFormula(storm::logic::TimeOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessLongRunAverageOperatorFormula(storm::logic::LongRunAverageOperatorFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessUntilFormula(storm::logic::UntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, std::shared_ptr<storm::logic::Formula const> subformula = nullptr);
static void preprocessBoundedUntilFormula(storm::logic::BoundedUntilFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessGloballyFormula(storm::logic::GloballyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data);
static void preprocessEventuallyFormula(storm::logic::EventuallyFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none);
static void preprocessCumulativeRewardFormula(storm::logic::CumulativeRewardFormula const& formula, storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none);
static void preprocessTotalRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); // The total reward formula itself does not need to be provided as it is unique.
static void preprocessLongRunAverageRewardFormula(storm::logic::OperatorInformation const& opInfo, PreprocessorData& data, boost::optional<std::string> const& optionalRewardModelName = boost::none); // The long run average reward formula itself does not need to be provided as it is unique.
/*!
* Builds the result from preprocessing

Loading…
Cancel
Save