Browse Source

applying the same performance improvements for explicit JANI model building

tempestpy_adaptions
dehnert 7 years ago
parent
commit
e8dc6ee05d
  1. 41
      src/storm/generator/JaniNextStateGenerator.cpp

41
src/storm/generator/JaniNextStateGenerator.cpp

@ -6,7 +6,6 @@
#include "storm/solver/SmtSolver.h" #include "storm/solver/SmtSolver.h"
#include "storm/storage/jani/Edge.h" #include "storm/storage/jani/Edge.h"
#include "storm/storage/jani/EdgeDestination.h" #include "storm/storage/jani/EdgeDestination.h"
#include "storm/storage/jani/Model.h" #include "storm/storage/jani/Model.h"
@ -16,6 +15,8 @@
#include "storm/storage/jani/ParallelComposition.h" #include "storm/storage/jani/ParallelComposition.h"
#include "storm/storage/jani/CompositionInformationVisitor.h" #include "storm/storage/jani/CompositionInformationVisitor.h"
#include "storm/builder/jit/Distribution.h"
#include "storm/utility/constants.h" #include "storm/utility/constants.h"
#include "storm/utility/macros.h" #include "storm/utility/macros.h"
#include "storm/utility/solver.h" #include "storm/utility/solver.h"
@ -414,36 +415,32 @@ namespace storm {
iteratorList[i] = edgeCombination[i].second.cbegin(); iteratorList[i] = edgeCombination[i].second.cbegin();
} }
storm::builder::jit::Distribution<CompressedState, ValueType> currentDistribution;
storm::builder::jit::Distribution<CompressedState, ValueType> nextDistribution;
// As long as there is one feasible combination of commands, keep on expanding it. // As long as there is one feasible combination of commands, keep on expanding it.
bool done = false; bool done = false;
while (!done) { while (!done) {
boost::container::flat_map<CompressedState, ValueType>* currentTargetStates = new boost::container::flat_map<CompressedState, ValueType>();
boost::container::flat_map<CompressedState, ValueType>* newTargetStates = new boost::container::flat_map<CompressedState, ValueType>();
std::vector<ValueType> stateActionRewards(rewardVariables.size(), storm::utility::zero<ValueType>()); std::vector<ValueType> stateActionRewards(rewardVariables.size(), storm::utility::zero<ValueType>());
currentTargetStates->emplace(state, storm::utility::one<ValueType>());
currentDistribution.add(state, storm::utility::one<ValueType>());
for (uint_fast64_t i = 0; i < iteratorList.size(); ++i) { for (uint_fast64_t i = 0; i < iteratorList.size(); ++i) {
storm::jani::Edge const& edge = **iteratorList[i]; storm::jani::Edge const& edge = **iteratorList[i];
for (auto const& destination : edge.getDestinations()) { for (auto const& destination : edge.getDestinations()) {
for (auto const& stateProbabilityPair : *currentTargetStates) {
for (auto const& stateProbability : currentDistribution) {
// Compute the new state under the current update and add it to the set of new target states. // Compute the new state under the current update and add it to the set of new target states.
CompressedState newTargetState = applyUpdate(stateProbabilityPair.first, destination, this->variableInformation.locationVariables[edgeCombination[i].first]);
CompressedState newTargetState = applyUpdate(stateProbability.getState(), destination, this->variableInformation.locationVariables[edgeCombination[i].first]);
// If the new state was already found as a successor state, update the probability // If the new state was already found as a successor state, update the probability
// and otherwise insert it. // and otherwise insert it.
ValueType probability = stateProbabilityPair.second * this->evaluator->asRational(destination.getProbability());
ValueType probability = stateProbability.getValue() * this->evaluator->asRational(destination.getProbability());
if (edge.hasRate()) { if (edge.hasRate()) {
probability *= this->evaluator->asRational(edge.getRate()); probability *= this->evaluator->asRational(edge.getRate());
} }
if (probability != storm::utility::zero<ValueType>()) { if (probability != storm::utility::zero<ValueType>()) {
auto targetStateIt = newTargetStates->find(newTargetState);
if (targetStateIt != newTargetStates->end()) {
targetStateIt->second += probability;
} else {
newTargetStates->emplace(newTargetState, probability);
}
nextDistribution.add(newTargetState, probability);
} }
} }
@ -452,11 +449,11 @@ namespace storm {
performTransientAssignments(edge.getAssignments().getTransientAssignments(), [&valueIt] (ValueType const& value) { *valueIt += value; ++valueIt; } ); performTransientAssignments(edge.getAssignments().getTransientAssignments(), [&valueIt] (ValueType const& value) { *valueIt += value; ++valueIt; } );
} }
nextDistribution.compress();
// If there is one more command to come, shift the target states one time step back. // If there is one more command to come, shift the target states one time step back.
if (i < iteratorList.size() - 1) { if (i < iteratorList.size() - 1) {
delete currentTargetStates;
currentTargetStates = newTargetStates;
newTargetStates = new boost::container::flat_map<CompressedState, ValueType>();
currentDistribution = std::move(nextDistribution);
} }
} }
@ -473,12 +470,12 @@ namespace storm {
// Add the probabilities/rates to the newly created choice. // Add the probabilities/rates to the newly created choice.
ValueType probabilitySum = storm::utility::zero<ValueType>(); ValueType probabilitySum = storm::utility::zero<ValueType>();
for (auto const& stateProbabilityPair : *newTargetStates) {
StateType actualIndex = stateToIdCallback(stateProbabilityPair.first);
choice.addProbability(actualIndex, stateProbabilityPair.second);
for (auto const& stateProbability : nextDistribution) {
StateType actualIndex = stateToIdCallback(stateProbability.getState());
choice.addProbability(actualIndex, stateProbability.getValue());
if (this->options.isExplorationChecksSet()) { if (this->options.isExplorationChecksSet()) {
probabilitySum += stateProbabilityPair.second;
probabilitySum += stateProbability.getValue();
} }
} }
@ -487,10 +484,6 @@ namespace storm {
STORM_LOG_THROW(!this->isDiscreteTimeModel() || !this->comparator.isConstant(probabilitySum) || this->comparator.isOne(probabilitySum), storm::exceptions::WrongFormatException, "Sum of update probabilities do not sum to one for some command (actually sum to " << probabilitySum << ")."); STORM_LOG_THROW(!this->isDiscreteTimeModel() || !this->comparator.isConstant(probabilitySum) || this->comparator.isOne(probabilitySum), storm::exceptions::WrongFormatException, "Sum of update probabilities do not sum to one for some command (actually sum to " << probabilitySum << ").");
} }
// Dispose of the temporary maps.
delete currentTargetStates;
delete newTargetStates;
// Now, check whether there is one more command combination to consider. // Now, check whether there is one more command combination to consider.
bool movedIterator = false; bool movedIterator = false;
for (uint64_t j = 0; !movedIterator && j < iteratorList.size(); ++j) { for (uint64_t j = 0; !movedIterator && j < iteratorList.size(); ++j) {

Loading…
Cancel
Save