|
|
@ -470,7 +470,7 @@ namespace storm { |
|
|
|
}; |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
|
DdPrismModelBuilder<Type, ValueType>::Options::Options() : buildAllRewardModels(true), rewardModelsToBuild(), buildAllLabels(true), labelsToBuild(), terminalStates(), negatedTerminalStates() { |
|
|
|
DdPrismModelBuilder<Type, ValueType>::Options::Options() : buildAllRewardModels(false), rewardModelsToBuild(), buildAllLabels(false), labelsToBuild(), terminalStates(), negatedTerminalStates() { |
|
|
|
// Intentionally left empty.
|
|
|
|
} |
|
|
|
|
|
|
@ -482,10 +482,6 @@ namespace storm { |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
|
DdPrismModelBuilder<Type, ValueType>::Options::Options(std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas) : buildAllRewardModels(false), rewardModelsToBuild(), buildAllLabels(false), labelsToBuild(), terminalStates(), negatedTerminalStates() { |
|
|
|
if (formulas.empty()) { |
|
|
|
this->buildAllRewardModels = true; |
|
|
|
this->buildAllLabels = true; |
|
|
|
} else { |
|
|
|
for (auto const& formula : formulas) { |
|
|
|
this->preserveFormula(*formula); |
|
|
|
} |
|
|
@ -493,7 +489,6 @@ namespace storm { |
|
|
|
this->setTerminalStatesFromFormula(*formulas.front()); |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
|
void DdPrismModelBuilder<Type, ValueType>::Options::preserveFormula(storm::logic::Formula const& formula) { |
|
|
@ -553,13 +548,13 @@ namespace storm { |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
|
struct DdPrismModelBuilder<Type, ValueType>::SystemResult { |
|
|
|
SystemResult(storm::dd::Add<Type, ValueType> const& allTransitionsDd, DdPrismModelBuilder<Type, ValueType>::ModuleDecisionDiagram const& globalModule, storm::dd::Add<Type, ValueType> const& stateActionDd) : allTransitionsDd(allTransitionsDd), globalModule(globalModule), stateActionDd(stateActionDd) { |
|
|
|
SystemResult(storm::dd::Add<Type, ValueType> const& allTransitionsDd, DdPrismModelBuilder<Type, ValueType>::ModuleDecisionDiagram const& globalModule, boost::optional<storm::dd::Add<Type, ValueType>> const& stateActionDd) : allTransitionsDd(allTransitionsDd), globalModule(globalModule), stateActionDd(stateActionDd) { |
|
|
|
// Intentionally left empty.
|
|
|
|
} |
|
|
|
|
|
|
|
storm::dd::Add<Type, ValueType> allTransitionsDd; |
|
|
|
typename DdPrismModelBuilder<Type, ValueType>::ModuleDecisionDiagram globalModule; |
|
|
|
storm::dd::Add<Type, ValueType> stateActionDd; |
|
|
|
boost::optional<storm::dd::Add<Type, ValueType>> stateActionDd; |
|
|
|
}; |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
@ -1089,11 +1084,12 @@ namespace storm { |
|
|
|
storm::dd::Add<Type, ValueType> result = createSystemFromModule(generationInfo, system); |
|
|
|
|
|
|
|
// Create an auxiliary DD that is used later during the construction of reward models.
|
|
|
|
storm::dd::Add<Type, ValueType> stateActionDd = result.sumAbstract(generationInfo.columnMetaVariables); |
|
|
|
boost::optional<storm::dd::Add<Type, ValueType>> stateActionDd; |
|
|
|
|
|
|
|
// For DTMCs, we normalize each row to 1 (to account for non-determinism).
|
|
|
|
if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::DTMC) { |
|
|
|
result = result / stateActionDd; |
|
|
|
stateActionDd = result.sumAbstract(generationInfo.columnMetaVariables); |
|
|
|
result = result / stateActionDd.get(); |
|
|
|
} else if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::MDP) { |
|
|
|
// For MDPs, we need to throw away the nondeterminism variables from the generation information that
|
|
|
|
// were never used.
|
|
|
@ -1107,7 +1103,7 @@ namespace storm { |
|
|
|
} |
|
|
|
|
|
|
|
template <storm::dd::DdType Type, typename ValueType> |
|
|
|
storm::models::symbolic::StandardRewardModel<Type, ValueType> DdPrismModelBuilder<Type, ValueType>::createRewardModelDecisionDiagrams(GenerationInformation& generationInfo, storm::prism::RewardModel const& rewardModel, ModuleDecisionDiagram const& globalModule, storm::dd::Add<Type, ValueType> const& reachableStatesAdd, storm::dd::Add<Type, ValueType> const& stateActionDd) { |
|
|
|
storm::models::symbolic::StandardRewardModel<Type, ValueType> DdPrismModelBuilder<Type, ValueType>::createRewardModelDecisionDiagrams(GenerationInformation& generationInfo, storm::prism::RewardModel const& rewardModel, ModuleDecisionDiagram const& globalModule, storm::dd::Add<Type, ValueType> const& reachableStatesAdd, storm::dd::Add<Type, ValueType> const& transitionMatrix, boost::optional<storm::dd::Add<Type, ValueType>> stateActionDd) { |
|
|
|
|
|
|
|
// Start by creating the state reward vector.
|
|
|
|
boost::optional<storm::dd::Add<Type, ValueType>> stateRewards; |
|
|
@ -1145,13 +1141,15 @@ namespace storm { |
|
|
|
} |
|
|
|
ActionDecisionDiagram const& actionDd = stateActionReward.isLabeled() ? globalModule.synchronizingActionToDecisionDiagramMap.at(stateActionReward.getActionIndex()) : globalModule.independentAction; |
|
|
|
states *= actionDd.guardDd * reachableStatesAdd; |
|
|
|
storm::dd::Add<Type, ValueType> stateActionRewardDd = synchronization * states * rewards; |
|
|
|
storm::dd::Add<Type, ValueType> stateActionRewardDd = synchronization * (reachableStatesAdd * states * rewards); |
|
|
|
|
|
|
|
// If we are building the state-action rewards for an MDP, we need to make sure that the encoding
|
|
|
|
// of the nondeterminism is present in the reward vector, so we ne need to multiply it with the
|
|
|
|
// legal state-actions.
|
|
|
|
if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::MDP) { |
|
|
|
stateActionRewardDd *= stateActionDd; |
|
|
|
// FIXME: get synchronization encoding differently.
|
|
|
|
// stateActionRewardDd *= stateActionDd;
|
|
|
|
stateActionRewardDd *= transitionMatrix.notZero().existsAbstract(generationInfo.columnMetaVariables).template toAdd<ValueType>(); |
|
|
|
} else if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::CTMC) { |
|
|
|
// For CTMCs, we need to multiply the entries with the exit rate of the corresponding action.
|
|
|
|
stateActionRewardDd *= actionDd.transitionsDd.sumAbstract(generationInfo.columnMetaVariables); |
|
|
@ -1167,7 +1165,7 @@ namespace storm { |
|
|
|
|
|
|
|
// Scale state-action rewards for DTMCs and CTMCs.
|
|
|
|
if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::DTMC || generationInfo.program.getModelType() == storm::prism::Program::ModelType::CTMC) { |
|
|
|
stateActionRewards.get() /= stateActionDd; |
|
|
|
// stateActionRewards.get() /= stateActionDd;
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@ -1215,7 +1213,7 @@ namespace storm { |
|
|
|
|
|
|
|
// Scale transition rewards for DTMCs.
|
|
|
|
if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::DTMC) { |
|
|
|
transitionRewards.get() /= stateActionDd; |
|
|
|
transitionRewards.get() /= stateActionDd.get(); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@ -1240,7 +1238,7 @@ namespace storm { |
|
|
|
STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Program still contains these undefined constants: " + stream.str()); |
|
|
|
} |
|
|
|
|
|
|
|
STORM_LOG_DEBUG("Building representation of program:" << std::endl << program << std::endl); |
|
|
|
STORM_LOG_TRACE("Building representation of program:" << std::endl << program << std::endl); |
|
|
|
|
|
|
|
// Start by initializing the structure used for storing all information needed during the model generation.
|
|
|
|
// In particular, this creates the meta variables used to encode the model.
|
|
|
@ -1250,7 +1248,6 @@ namespace storm { |
|
|
|
storm::dd::Add<Type, ValueType> transitionMatrix = system.allTransitionsDd; |
|
|
|
|
|
|
|
ModuleDecisionDiagram const& globalModule = system.globalModule; |
|
|
|
storm::dd::Add<Type, ValueType> stateActionDd = system.stateActionDd; |
|
|
|
|
|
|
|
// If we were asked to treat some states as terminal states, we cut away their transitions now.
|
|
|
|
storm::dd::Bdd<Type> terminalStatesBdd = generationInfo.manager->getBddZero(); |
|
|
@ -1314,7 +1311,9 @@ namespace storm { |
|
|
|
storm::dd::Bdd<Type> reachableStates = storm::utility::dd::computeReachableStates<Type>(initialStates, transitionMatrixBdd, generationInfo.rowMetaVariables, generationInfo.columnMetaVariables); |
|
|
|
storm::dd::Add<Type, ValueType> reachableStatesAdd = reachableStates.template toAdd<ValueType>(); |
|
|
|
transitionMatrix *= reachableStatesAdd; |
|
|
|
stateActionDd *= reachableStatesAdd; |
|
|
|
if (system.stateActionDd) { |
|
|
|
system.stateActionDd.get() *= reachableStatesAdd; |
|
|
|
} |
|
|
|
|
|
|
|
// Detect deadlocks and 1) fix them if requested 2) throw an error otherwise.
|
|
|
|
storm::dd::Bdd<Type> statesWithTransition = transitionMatrixBdd.existsAbstract(generationInfo.columnMetaVariables); |
|
|
@ -1384,7 +1383,7 @@ namespace storm { |
|
|
|
|
|
|
|
std::unordered_map<std::string, storm::models::symbolic::StandardRewardModel<Type, ValueType>> rewardModels; |
|
|
|
for (auto const& rewardModel : selectedRewardModels) { |
|
|
|
rewardModels.emplace(rewardModel.get().getName(), createRewardModelDecisionDiagrams(generationInfo, rewardModel.get(), globalModule, reachableStatesAdd, stateActionDd)); |
|
|
|
rewardModels.emplace(rewardModel.get().getName(), createRewardModelDecisionDiagrams(generationInfo, rewardModel.get(), globalModule, reachableStatesAdd, transitionMatrix, system.stateActionDd)); |
|
|
|
} |
|
|
|
|
|
|
|
// Build the labels that can be accessed as a shortcut.
|
|
|
|