Browse Source

LraViHelper now also computes all choiceValues

tempestpy_adaptions
Stefan Pranger 3 years ago
parent
commit
01335948d4
  1. 2
      src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp
  2. 38
      src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp
  3. 6
      src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h

2
src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp

@ -183,7 +183,7 @@ namespace storm {
} else { } else {
// We assume an MDP (with nondeterministic timed states and no instant states) // We assume an MDP (with nondeterministic timed states and no instant states)
storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor); storm::modelchecker::helper::internal::LraViHelper<ValueType, storm::storage::MaximalEndComponent, storm::modelchecker::helper::internal::LraViTransitionsType::NondetTsNoIs> viHelper(mec, this->_transitionMatrix, aperiodicFactor);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices);
return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices, choiceValues);
} }
} }

38
src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp

@ -159,7 +159,7 @@ namespace storm {
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
ValueType LraViHelper<ValueType, ComponentType, TransitionsType>::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices, std::vector<ValueType>* choiceValues) {
initializeNewValues(stateValueGetter, actionValueGetter, exitRates); initializeNewValues(stateValueGetter, actionValueGetter, exitRates);
ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision()); ValueType precision = storm::utility::convertNumber<ValueType>(env.solver().lra().getPrecision());
bool relative = env.solver().lra().getRelativeTerminationCriterion(); bool relative = env.solver().lra().getRelativeTerminationCriterion();
@ -219,7 +219,7 @@ namespace storm {
if(!gameNondetTs()) { if(!gameNondetTs()) {
prepareNextIteration(env); prepareNextIteration(env);
} }
performIterationStep(env, dir, choices);
performIterationStep(env, dir, choices, choiceValues);
} }
if(gameNondetTs()) { if(gameNondetTs()) {
storm::utility::vector::applyPointwise<ValueType, ValueType>(xNew(), xNew(), [&iter] (ValueType const& x_i) -> ValueType { return x_i / (double)iter; }); storm::utility::vector::applyPointwise<ValueType, ValueType>(xNew(), xNew(), [&iter] (ValueType const& x_i) -> ValueType { return x_i / (double)iter; });
@ -358,7 +358,20 @@ namespace storm {
} }
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType> template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices) {
void LraViHelper<ValueType, ComponentType, TransitionsType>::setInputModelChoiceValues(std::vector<ValueType>& choiceValues, std::vector<ValueType> const& localMecChoiceValues) const {
// Transform the local choiceValues (within this mec) to choice values for the input model
uint64_t localState = 0;
for (auto const& element : _component) {
uint64_t elementState = element.first;
uint64_t rowIndex = _transitionMatrix.getRowGroupIndices()[elementState];
uint64_t rowGroupSize = _transitionMatrix.getRowGroupEntryCount(elementState);
std::copy(localMecChoiceValues.begin(), localMecChoiceValues.begin() + rowGroupSize, &choiceValues.at(rowIndex));
localState++;
}
}
template <typename ValueType, typename ComponentType, LraViTransitionsType TransitionsType>
void LraViHelper<ValueType, ComponentType, TransitionsType>::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector<uint64_t>* choices, std::vector<ValueType>* choiceValues) {
STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism"); STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism");
// Initialize value vectors, multiplers, and solver if this has not been done, yet // Initialize value vectors, multiplers, and solver if this has not been done, yet
if (!_TsMultiplier) { if (!_TsMultiplier) {
@ -378,19 +391,34 @@ namespace storm {
} else { } else {
// Also keep track of the choices made. // Also keep track of the choices made.
std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount()); std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount());
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices);
std::vector<ValueType> resultChoiceValues(_TsTransitions.getRowCount());
_TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, resultChoiceValues);
auto rowGroupIndices = this->_TsTransitions.getRowGroupIndices();
rowGroupIndices.erase(rowGroupIndices.begin());
_TsMultiplier->reduce(env, *dir, rowGroupIndices, resultChoiceValues, xNew(), &tsChoices);
// Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs) // Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs)
// Hence, in this branch we don't have to care for choices at instant states. // Hence, in this branch we don't have to care for choices at instant states.
STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states."); STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states.");
setInputModelChoices(*choices, tsChoices); setInputModelChoices(*choices, tsChoices);
setInputModelChoiceValues(*choiceValues, resultChoiceValues);
} }
} else if(gameNondetTs()) { // TODO DRYness? exact same behaviour as case above? } else if(gameNondetTs()) { // TODO DRYness? exact same behaviour as case above?
if (choices == nullptr) { if (choices == nullptr) {
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), nullptr, _statesOfCoalition); _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), nullptr, _statesOfCoalition);
} else { } else {
// Also keep track of the choices made.
std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount()); std::vector<uint64_t> tsChoices(_TsTransitions.getRowGroupCount());
_TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices, _statesOfCoalition);
std::vector<ValueType> resultChoiceValues(_TsTransitions.getRowCount());
_TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, resultChoiceValues);
auto rowGroupIndices = this->_TsTransitions.getRowGroupIndices();
rowGroupIndices.erase(rowGroupIndices.begin());
_TsMultiplier->reduce(env, *dir, rowGroupIndices, resultChoiceValues, xNew(), &tsChoices);
setInputModelChoices(*choices, tsChoices); // no components -> no need for that call? setInputModelChoices(*choices, tsChoices); // no components -> no need for that call?
setInputModelChoiceValues(*choiceValues, resultChoiceValues);
} }
} else { } else {
_TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew()); _TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew());

6
src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h

@ -68,7 +68,7 @@ namespace storm {
* @return The (optimal) long run average value of the specified component. * @return The (optimal) long run average value of the specified component.
* @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching. * @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching.
*/ */
ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector<ValueType> const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr, std::vector<ValueType>* choiceValues = nullptr);
private: private:
@ -89,7 +89,7 @@ namespace storm {
* Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model * Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model
* @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this. * @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this.
*/ */
void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr);
void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector<uint64_t>* choices = nullptr, std::vector<ValueType>* choiceValues = nullptr);
struct ConvergenceCheckResult { struct ConvergenceCheckResult {
bool isPrecisionAchieved; bool isPrecisionAchieved;
@ -111,6 +111,8 @@ namespace storm {
void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const; void setInputModelChoices(std::vector<uint64_t>& choices, std::vector<uint64_t> const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const;
void setInputModelChoiceValues(std::vector<ValueType>& choiceValues, std::vector<ValueType> const& localMecChoiceValues) const;
/// Returns true iff the given state is a timed state /// Returns true iff the given state is a timed state
bool isTimedState(uint64_t const& inputModelStateIndex) const; bool isTimedState(uint64_t const& inputModelStateIndex) const;

Loading…
Cancel
Save