diff --git a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp index 1fc6aa295..907d76ecb 100644 --- a/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp +++ b/src/storm/modelchecker/helper/infinitehorizon/SparseNondeterministicInfiniteHorizonHelper.cpp @@ -183,19 +183,19 @@ namespace storm { } else { // We assume an MDP (with nondeterministic timed states and no instant states) storm::modelchecker::helper::internal::LraViHelper viHelper(mec, this->_transitionMatrix, aperiodicFactor); - return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices); + return viHelper.performValueIteration(env, stateRewardsGetter, actionRewardsGetter, nullptr, &this->getOptimizationDirection(), optimalChoices, choiceValues); } } - + template ValueType SparseNondeterministicInfiniteHorizonHelper::computeLraForMecLp(Environment const& env, ValueGetter const& stateRewardsGetter, ValueGetter const& actionRewardsGetter, storm::storage::MaximalEndComponent const& mec) { // Create an LP solver auto solver = storm::utility::solver::LpSolverFactory().create("LRA for MEC"); - + // Now build the LP formulation as described in: // Guck et al.: Modelling and Analysis of Markov Reward Automata (ATVA'14), https://doi.org/10.1007/978-3-319-11936-6_13 solver->setOptimizationDirection(invert(this->getOptimizationDirection())); - + // Create variables // TODO: Investigate whether we can easily make the variables bounded std::map stateToVariableMap; diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp index 15c12ba95..83181ca37 100644 --- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp +++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.cpp @@ -159,7 +159,7 @@ namespace storm { template - ValueType LraViHelper::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector* choices) { + ValueType LraViHelper::performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector const* exitRates, storm::solver::OptimizationDirection const* dir, std::vector* choices, std::vector* choiceValues) { initializeNewValues(stateValueGetter, actionValueGetter, exitRates); ValueType precision = storm::utility::convertNumber(env.solver().lra().getPrecision()); bool relative = env.solver().lra().getRelativeTerminationCriterion(); @@ -219,7 +219,7 @@ namespace storm { if(!gameNondetTs()) { prepareNextIteration(env); } - performIterationStep(env, dir, choices); + performIterationStep(env, dir, choices, choiceValues); } if(gameNondetTs()) { storm::utility::vector::applyPointwise(xNew(), xNew(), [&iter] (ValueType const& x_i) -> ValueType { return x_i / (double)iter; }); @@ -358,7 +358,20 @@ namespace storm { } template - void LraViHelper::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector* choices) { + void LraViHelper::setInputModelChoiceValues(std::vector& choiceValues, std::vector const& localMecChoiceValues) const { + // Transform the local choiceValues (within this mec) to choice values for the input model + uint64_t localState = 0; + for (auto const& element : _component) { + uint64_t elementState = element.first; + uint64_t rowIndex = _transitionMatrix.getRowGroupIndices()[elementState]; + uint64_t rowGroupSize = _transitionMatrix.getRowGroupEntryCount(elementState); + std::copy(localMecChoiceValues.begin(), localMecChoiceValues.begin() + rowGroupSize, &choiceValues.at(rowIndex)); + localState++; + } + } + + template + void LraViHelper::performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir, std::vector* choices, std::vector* choiceValues) { STORM_LOG_ASSERT(!((nondetTs() || nondetIs()) && dir == nullptr), "No optimization direction provided for model with nondeterminism"); // Initialize value vectors, multiplers, and solver if this has not been done, yet if (!_TsMultiplier) { @@ -378,19 +391,34 @@ namespace storm { } else { // Also keep track of the choices made. std::vector tsChoices(_TsTransitions.getRowGroupCount()); - _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices); + std::vector resultChoiceValues(_TsTransitions.getRowCount()); + + _TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, resultChoiceValues); + auto rowGroupIndices = this->_TsTransitions.getRowGroupIndices(); + rowGroupIndices.erase(rowGroupIndices.begin()); + _TsMultiplier->reduce(env, *dir, rowGroupIndices, resultChoiceValues, xNew(), &tsChoices); + // Note that nondeterminism within the timed states means that there can not be instant states (We either have MDPs or MAs) // Hence, in this branch we don't have to care for choices at instant states. STORM_LOG_ASSERT(!_hasInstantStates, "Nondeterministic timed states are only supported if there are no instant states."); setInputModelChoices(*choices, tsChoices); + setInputModelChoiceValues(*choiceValues, resultChoiceValues); } } else if(gameNondetTs()) { // TODO DRYness? exact same behaviour as case above? if (choices == nullptr) { _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), nullptr, _statesOfCoalition); } else { + // Also keep track of the choices made. std::vector tsChoices(_TsTransitions.getRowGroupCount()); - _TsMultiplier->multiplyAndReduce(env, *dir, xOld(), &_TsChoiceValues, xNew(), &tsChoices, _statesOfCoalition); + std::vector resultChoiceValues(_TsTransitions.getRowCount()); + + _TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, resultChoiceValues); + auto rowGroupIndices = this->_TsTransitions.getRowGroupIndices(); + rowGroupIndices.erase(rowGroupIndices.begin()); + _TsMultiplier->reduce(env, *dir, rowGroupIndices, resultChoiceValues, xNew(), &tsChoices); + setInputModelChoices(*choices, tsChoices); // no components -> no need for that call? + setInputModelChoiceValues(*choiceValues, resultChoiceValues); } } else { _TsMultiplier->multiply(env, xOld(), &_TsChoiceValues, xNew()); diff --git a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h index abd1576de..369d718b8 100644 --- a/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h +++ b/src/storm/modelchecker/helper/infinitehorizon/internal/LraViHelper.h @@ -68,7 +68,7 @@ namespace storm { * @return The (optimal) long run average value of the specified component. * @note it is possible to call this method multiple times with different values. However, other changes to the environment or the optimization direction might not have the expected effect due to caching. */ - ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector* choices = nullptr); + ValueType performValueIteration(Environment const& env, ValueGetter const& stateValueGetter, ValueGetter const& actionValueGetter, std::vector const* exitRates = nullptr, storm::solver::OptimizationDirection const* dir = nullptr, std::vector* choices = nullptr, std::vector* choiceValues = nullptr); private: @@ -89,7 +89,7 @@ namespace storm { * Note that these choices will be inserted w.r.t. the original model states/choices, i.e. the size of the vector should match the state-count of the input model * @pre when calling this the first time, initializeNewValues must have been called before. Moreover, prepareNextIteration must be called between two calls of this. */ - void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector* choices = nullptr); + void performIterationStep(Environment const& env, storm::solver::OptimizationDirection const* dir = nullptr, std::vector* choices = nullptr, std::vector* choiceValues = nullptr); struct ConvergenceCheckResult { bool isPrecisionAchieved; @@ -111,6 +111,8 @@ namespace storm { void setInputModelChoices(std::vector& choices, std::vector const& localMecChoices, bool setChoiceZeroToMarkovianStates = false, bool setChoiceZeroToProbabilisticStates = false) const; + void setInputModelChoiceValues(std::vector& choiceValues, std::vector const& localMecChoiceValues) const; + /// Returns true iff the given state is a timed state bool isTimedState(uint64_t const& inputModelStateIndex) const;