|
@ -44,6 +44,26 @@ namespace storm { |
|
|
return checkResult->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|
|
return checkResult->asExplicitQualitativeCheckResult().getTruthValuesVector(); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename ValueType> |
|
|
|
|
|
std::vector<ValueType> getTotalRewardVector(storm::models::sparse::MarkovAutomaton<ValueType> const& model, storm::logic::Formula const& formula) { |
|
|
|
|
|
boost::optional<std::string> rewardModelName = formula.asRewardOperatorFormula().getOptionalRewardModelName(); |
|
|
|
|
|
typename storm::models::sparse::MarkovAutomaton<ValueType>::RewardModelType const& rewardModel = rewardModelName.is_initialized() ? model.getRewardModel(rewardModelName.get()) : model.getUniqueRewardModel(); |
|
|
|
|
|
|
|
|
|
|
|
// Get a reward model where the state rewards are scaled accordingly
|
|
|
|
|
|
std::vector<ValueType> stateRewardWeights(model.getNumberOfStates(), storm::utility::zero<ValueType>()); |
|
|
|
|
|
for (auto const markovianState : model.getMarkovianStates()) { |
|
|
|
|
|
stateRewardWeights[markovianState] = storm::utility::one<ValueType>() / model.getExitRate(markovianState); |
|
|
|
|
|
} |
|
|
|
|
|
return rewardModel.getTotalActionRewardVector(model.getTransitionMatrix(), stateRewardWeights); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename ValueType> |
|
|
|
|
|
std::vector<ValueType> getTotalRewardVector(storm::models::sparse::Mdp<ValueType> const& model, storm::logic::Formula const& formula) { |
|
|
|
|
|
boost::optional<std::string> rewardModelName = formula.asRewardOperatorFormula().getOptionalRewardModelName(); |
|
|
|
|
|
typename storm::models::sparse::Mdp<ValueType>::RewardModelType const& rewardModel = rewardModelName.is_initialized() ? model.getRewardModel(rewardModelName.get()) : model.getUniqueRewardModel(); |
|
|
|
|
|
return rewardModel.getTotalRewardVector(model.getTransitionMatrix()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
template <typename ModelType> |
|
|
template <typename ModelType> |
|
|
std::map<uint64_t, typename ModelType::ValueType> const& DeterministicSchedsObjectiveHelper<ModelType>::getSchedulerIndependentStateValues() const { |
|
|
std::map<uint64_t, typename ModelType::ValueType> const& DeterministicSchedsObjectiveHelper<ModelType>::getSchedulerIndependentStateValues() const { |
|
|
if (!schedulerIndependentStateValues) { |
|
|
if (!schedulerIndependentStateValues) { |
|
@ -104,7 +124,8 @@ namespace storm { |
|
|
} else if (formula.isRewardOperatorFormula() && (subformula.isTotalRewardFormula() || subformula.isEventuallyFormula())) { |
|
|
} else if (formula.isRewardOperatorFormula() && (subformula.isTotalRewardFormula() || subformula.isEventuallyFormula())) { |
|
|
auto const& baseRewardModel = formula.asRewardOperatorFormula().hasRewardModelName() ? model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()) : model.getUniqueRewardModel(); |
|
|
auto const& baseRewardModel = formula.asRewardOperatorFormula().hasRewardModelName() ? model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()) : model.getUniqueRewardModel(); |
|
|
auto rewardModel = subformula.isEventuallyFormula() ? storm::utility::createFilteredRewardModel(baseRewardModel, model.isDiscreteTimeModel(), subformula.asEventuallyFormula()) : storm::utility::createFilteredRewardModel(baseRewardModel, model.isDiscreteTimeModel(), subformula.asTotalRewardFormula()); |
|
|
auto rewardModel = subformula.isEventuallyFormula() ? storm::utility::createFilteredRewardModel(baseRewardModel, model.isDiscreteTimeModel(), subformula.asEventuallyFormula()) : storm::utility::createFilteredRewardModel(baseRewardModel, model.isDiscreteTimeModel(), subformula.asTotalRewardFormula()); |
|
|
std::vector<ValueType> choiceBasedRewards = rewardModel.get().getTotalRewardVector(model.getTransitionMatrix()); |
|
|
|
|
|
|
|
|
std::vector<ValueType> choiceBasedRewards = getTotalRewardVector(model, *objective.formula); |
|
|
|
|
|
|
|
|
// Set entries for all non-zero reward choices at states whose value is not already known.
|
|
|
// Set entries for all non-zero reward choices at states whose value is not already known.
|
|
|
// This relies on the fact that for goal states in reachability reward formulas, getSchedulerIndependentStateValues()[state] is set to zero.
|
|
|
// This relies on the fact that for goal states in reachability reward formulas, getSchedulerIndependentStateValues()[state] is set to zero.
|
|
|
auto const& rowGroupIndices = model.getTransitionMatrix().getRowGroupIndices(); |
|
|
auto const& rowGroupIndices = model.getTransitionMatrix().getRowGroupIndices(); |
|
@ -128,7 +149,6 @@ namespace storm { |
|
|
rates = &ma->getExitRates(); |
|
|
rates = &ma->getExitRates(); |
|
|
ms = &ma->getMarkovianStates(); |
|
|
ms = &ma->getMarkovianStates(); |
|
|
} |
|
|
} |
|
|
if (model.isOfType(storm::models::ModelType::Mdp)) { |
|
|
|
|
|
// Set all choice offsets to one, except for the ones at states in scheduerIndependentStateValues.
|
|
|
// Set all choice offsets to one, except for the ones at states in scheduerIndependentStateValues.
|
|
|
for (uint64_t state = 0; state < model.getNumberOfStates(); ++state) { |
|
|
for (uint64_t state = 0; state < model.getNumberOfStates(); ++state) { |
|
|
if (stateValues.find(state) == stateValues.end()) { |
|
|
if (stateValues.find(state) == stateValues.end()) { |
|
@ -146,7 +166,6 @@ namespace storm { |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
} else { |
|
|
} else { |
|
|
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The given formula " << formula << " is not supported."); |
|
|
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The given formula " << formula << " is not supported."); |
|
|
} |
|
|
} |
|
@ -200,26 +219,6 @@ namespace storm { |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
template <typename ValueType> |
|
|
|
|
|
std::vector<ValueType> getTotalRewardVector(storm::models::sparse::MarkovAutomaton<ValueType> const& model, storm::logic::Formula const& formula) { |
|
|
|
|
|
boost::optional<std::string> rewardModelName = formula.asRewardOperatorFormula().getOptionalRewardModelName(); |
|
|
|
|
|
typename storm::models::sparse::MarkovAutomaton<ValueType>::RewardModelType const& rewardModel = rewardModelName.is_initialized() ? model.getRewardModel(rewardModelName.get()) : model.getUniqueRewardModel(); |
|
|
|
|
|
|
|
|
|
|
|
// Get a reward model where the state rewards are scaled accordingly
|
|
|
|
|
|
std::vector<ValueType> stateRewardWeights(model.getNumberOfStates(), storm::utility::zero<ValueType>()); |
|
|
|
|
|
for (auto const markovianState : model.getMarkovianStates()) { |
|
|
|
|
|
stateRewardWeights[markovianState] = storm::utility::one<ValueType>() / model.getExitRate(markovianState); |
|
|
|
|
|
} |
|
|
|
|
|
return rewardModel.getTotalActionRewardVector(model.getTransitionMatrix(), stateRewardWeights); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename ValueType> |
|
|
|
|
|
std::vector<ValueType> getTotalRewardVector(storm::models::sparse::Mdp<ValueType> const& model, storm::logic::Formula const& formula) { |
|
|
|
|
|
boost::optional<std::string> rewardModelName = formula.asRewardOperatorFormula().getOptionalRewardModelName(); |
|
|
|
|
|
typename storm::models::sparse::Mdp<ValueType>::RewardModelType const& rewardModel = rewardModelName.is_initialized() ? model.getRewardModel(rewardModelName.get()) : model.getUniqueRewardModel(); |
|
|
|
|
|
return rewardModel.getTotalRewardVector(model.getTransitionMatrix()); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
template <typename ModelType> |
|
|
template <typename ModelType> |
|
|
typename ModelType::ValueType const& DeterministicSchedsObjectiveHelper<ModelType>::getUpperValueBoundAtState(Environment const& env, uint64_t state) const { |
|
|
typename ModelType::ValueType const& DeterministicSchedsObjectiveHelper<ModelType>::getUpperValueBoundAtState(Environment const& env, uint64_t state) const { |
|
|
computeUpperBounds(env); |
|
|
computeUpperBounds(env); |
|
|