Browse Source

use cached memory for the epoch results

tempestpy_adaptions
TimQu 7 years ago
parent
commit
cf91a34970
  1. 22
      src/storm/modelchecker/multiobjective/pcaa/SparseMdpRewardBoundedPcaaWeightVectorChecker.cpp
  2. 12
      src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.cpp
  3. 8
      src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h

22
src/storm/modelchecker/multiobjective/pcaa/SparseMdpRewardBoundedPcaaWeightVectorChecker.cpp

@ -46,10 +46,11 @@ namespace storm {
template <class SparseMdpModelType>
void SparseMdpRewardBoundedPcaaWeightVectorChecker<SparseMdpModelType>::computeEpochSolution(typename MultiDimensionalRewardUnfolding<ValueType, false>::Epoch const& epoch, std::vector<ValueType> const& weightVector) {
auto const& epochModel = rewardUnfolding.setCurrentEpoch(epoch);
auto& epochModel = rewardUnfolding.setCurrentEpoch(epoch);
swEqBuilding.start();
std::vector<typename MultiDimensionalRewardUnfolding<ValueType, false>::SolutionType> result(epochModel.inStates.getNumberOfSetBits());
uint64_t stateSolutionSize = this->objectives.size() + 1;
auto& result = epochModel.inStateSolutions;
result.resize(epochModel.epochInStates.getNumberOfSetBits(), typename MultiDimensionalRewardUnfolding<ValueType, false>::SolutionType(stateSolutionSize));
// Formulate a min-max equation system max(A*x+b)=x for the weighted sum of the objectives
std::vector<ValueType> b(epochModel.epochMatrix.getRowCount(), storm::utility::zero<ValueType>());
@ -81,10 +82,8 @@ namespace storm {
swMinMaxSolving.stop();
swEqBuilding.start();
auto resultIt = result.begin();
uint64_t solSize = this->objectives.size() + 1;
for (auto const& state : epochModel.inStates) {
resultIt->reserve(solSize);
resultIt->push_back(x[state]);
for (auto const& state : epochModel.epochInStates) {
resultIt->front() = x[state];
++resultIt;
}
@ -115,14 +114,15 @@ namespace storm {
linEqSolver->solveEquations(x, b);
swLinEqSolving.stop();
swEqBuilding.start();
auto resultIt = result.begin();
for (auto const& state : epochModel.inStates) {
resultIt->push_back(x[state]);
resultIt = result.begin();
for (auto const& state : epochModel.epochInStates) {
(*resultIt)[objIndex + 1] = x[state];
++resultIt;
}
}
swEqBuilding.stop();
rewardUnfolding.setSolutionForCurrentEpoch(result);
rewardUnfolding.setSolutionForCurrentEpoch();
}
template <class SparseMdpModelType>

12
src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.cpp

@ -182,7 +182,7 @@ namespace storm {
}
template<typename ValueType, bool SingleObjectiveMode>
typename MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::EpochModel const& MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::setCurrentEpoch(Epoch const& epoch) {
typename MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::EpochModel& MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::setCurrentEpoch(Epoch const& epoch) {
// Check if we need to update the current epoch class
if (!currentEpoch || getClassOfEpoch(epoch) != getClassOfEpoch(currentEpoch.get())) {
setCurrentEpochClass(epoch);
@ -334,9 +334,9 @@ namespace storm {
epochModel.objectiveRewards.push_back(std::move(reducedModelObjRewards));
}
epochModel.inStates = storm::storage::BitVector(epochModel.epochMatrix.getRowGroupCount(), false);
epochModel.epochInStates = storm::storage::BitVector(epochModel.epochMatrix.getRowGroupCount(), false);
for (auto const& productState : productInStates) {
epochModel.inStates.set(ecElimResult.oldToNewStateMapping[productState], true);
epochModel.epochInStates.set(ecElimResult.oldToNewStateMapping[productState], true);
}
swSetEpochClass.stop();
@ -443,12 +443,12 @@ namespace storm {
}
template<typename ValueType, bool SingleObjectiveMode>
void MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::setSolutionForCurrentEpoch(std::vector<SolutionType> const& inStateSolutions) {
void MultiDimensionalRewardUnfolding<ValueType, SingleObjectiveMode>::setSolutionForCurrentEpoch() {
swInsertSol.start();
for (uint64_t productState = 0; productState < memoryProduct.getProduct().getNumberOfStates(); ++productState) {
uint64_t reducedModelState = ecElimResult.oldToNewStateMapping[productState];
if (reducedModelState < epochModel.epochMatrix.getRowGroupCount() && epochModel.inStates.get(reducedModelState)) {
setSolutionForCurrentEpoch(productState, inStateSolutions[epochModel.inStates.getNumberOfSetBitsBeforeIndex(reducedModelState)]);
if (reducedModelState < epochModel.epochMatrix.getRowGroupCount() && epochModel.epochInStates.get(reducedModelState)) {
setSolutionForCurrentEpoch(productState, epochModel.inStateSolutions[epochModel.epochInStates.getNumberOfSetBitsBeforeIndex(reducedModelState)]);
}
}
swInsertSol.stop();

8
src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h

@ -33,7 +33,9 @@ namespace storm {
std::vector<SolutionType> stepSolutions;
std::vector<std::vector<ValueType>> objectiveRewards;
std::vector<storm::storage::BitVector> objectiveRewardFilter;
storm::storage::BitVector inStates;
storm::storage::BitVector epochInStates;
std::vector<SolutionType> inStateSolutions;
};
/*
@ -75,9 +77,9 @@ namespace storm {
Epoch getStartEpoch();
std::vector<Epoch> getEpochComputationOrder(Epoch const& startEpoch);
EpochModel const& setCurrentEpoch(Epoch const& epoch);
EpochModel& setCurrentEpoch(Epoch const& epoch);
void setSolutionForCurrentEpoch(std::vector<SolutionType> const& inStateSolutions);
void setSolutionForCurrentEpoch();
SolutionType const& getInitialStateResult(Epoch const& epoch);

Loading…
Cancel
Save