From 721dd37a62acdca0fb1befe0d536b9d60f878cd2 Mon Sep 17 00:00:00 2001 From: TimQu Date: Fri, 1 Sep 2017 14:12:33 +0200 Subject: [PATCH] Moved reduction of the preprocessed model into the weightvectorchecker --- .../SparseMultiObjectivePreprocessor.cpp | 31 --- .../SparseMultiObjectivePreprocessorResult.h | 2 +- .../SparseCbAchievabilityQuery.cpp | 38 ++-- .../SparseCbAchievabilityQuery.h | 2 +- .../constraintbased/SparseCbQuery.cpp | 32 ++- .../constraintbased/SparseCbQuery.h | 8 +- .../pcaa/PcaaWeightVectorChecker.cpp | 25 +-- .../pcaa/PcaaWeightVectorChecker.h | 16 +- .../pcaa/SparseMaPcaaWeightVectorChecker.cpp | 77 +++---- .../pcaa/SparseMaPcaaWeightVectorChecker.h | 13 +- .../pcaa/SparseMdpPcaaWeightVectorChecker.cpp | 150 ++------------ .../pcaa/SparseMdpPcaaWeightVectorChecker.h | 31 +-- .../multiobjective/pcaa/SparsePcaaQuery.cpp | 8 +- .../multiobjective/pcaa/SparsePcaaQuery.h | 1 - .../pcaa/SparsePcaaWeightVectorChecker.cpp | 195 +++++++++++------- .../pcaa/SparsePcaaWeightVectorChecker.h | 47 +++-- 16 files changed, 293 insertions(+), 383 deletions(-) diff --git a/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.cpp b/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.cpp index 7e426e0cd..f541372bc 100644 --- a/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.cpp +++ b/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessor.cpp @@ -12,7 +12,6 @@ #include "storm/storage/memorystructure/MemoryStructureBuilder.h" #include "storm/storage/memorystructure/SparseModelMemoryProduct.h" #include "storm/storage/expressions/ExpressionManager.h" -#include "storm/transformer/GoalStateMerger.h" #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/utility/graph.h" @@ -388,36 +387,6 @@ namespace storm { setReward0States(result, backwardTransitions); checkRewardFiniteness(result, data.finiteRewardCheckObjectives, backwardTransitions); - ///// - - std::set relevantRewardModels; - for (auto const& obj : result.objectives) { - obj.formula->gatherReferencedRewardModels(relevantRewardModels); - } - - STORM_LOG_THROW(result.rewardFinitenessType != ReturnType::RewardFinitenessType::Infinite, storm::exceptions::InvalidPropertyException, "Infinite rewards unsupported."); - - if (result.containsOnlyRewardObjectives()) { - // Build a subsystem that discards states that yield infinite reward for all schedulers. - // We can also merge the states that will have reward zero anyway. - storm::storage::BitVector maybeStates = result.rewardLessInfinityEStates.get() & ~result.reward0AStates; - storm::transformer::GoalStateMerger merger(*result.preprocessedModel); - typename storm::transformer::GoalStateMerger::ReturnType mergerResult = merger.mergeTargetAndSinkStates(maybeStates, result.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end())); - - result.preprocessedModel = mergerResult.model; - result.reward0EStates = result.reward0EStates % maybeStates; - if (mergerResult.targetState) { - storm::storage::BitVector targetStateAsVector(result.preprocessedModel->getNumberOfStates(), false); - targetStateAsVector.set(*mergerResult.targetState, true); - // The overapproximation for the possible ec choices consists of the states that can reach the target states with prob. 0 and the target state itself. - //result.ecChoicesHint = result.preprocessedModel->getTransitionMatrix().getRowFilter(storm::utility::graph::performProb0E(*result.preprocessedModel, result.preprocessedModel->getBackwardTransitions(), storm::storage::BitVector(targetStateAsVector.size(), true), targetStateAsVector)); - //result.ecChoicesHint->set(result.preprocessedModel->getTransitionMatrix().getRowGroupIndices()[*mergerResult.targetState], true); - // There is an additional state in the result - result.reward0EStates.resize(result.reward0EStates.size() + 1, true); - } - assert(result.reward0EStates.size() == result.preprocessedModel->getNumberOfStates()); - } - return result; } diff --git a/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h b/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h index 3486e38ce..91b509847 100644 --- a/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h +++ b/src/storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h @@ -49,7 +49,7 @@ namespace storm { // Intentionally left empty } - bool containsOnlyRewardObjectives() { + bool containsOnlyRewardObjectives() const { for (auto const& obj : objectives) { if (!(obj.formula->isRewardOperatorFormula() && (obj.formula->getSubformula().isTotalRewardFormula() || obj.formula->getSubformula().isCumulativeRewardFormula()))) { return false; diff --git a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.cpp b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.cpp index dd20095f2..ef33a1ab4 100644 --- a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.cpp +++ b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.cpp @@ -25,7 +25,7 @@ namespace storm { template - SparseCbAchievabilityQuery::SparseCbAchievabilityQuery(SparseMultiObjectivePreprocessorResult& preprocessorResult) : SparseCbQuery(preprocessorResult) { + SparseCbAchievabilityQuery::SparseCbAchievabilityQuery(SparseMultiObjectivePreprocessorResult const& preprocessorResult) : SparseCbQuery(preprocessorResult) { STORM_LOG_ASSERT(preprocessorResult.queryType == SparseMultiObjectivePreprocessorResult::QueryType::Achievability, "Invalid query Type"); solver = storm::utility::solver::SmtSolverFactory().create(*this->expressionManager); } @@ -42,7 +42,7 @@ namespace storm { template bool SparseCbAchievabilityQuery::checkAchievability() { STORM_LOG_INFO("Building constraint system to check achievability."); - //this->preprocessedModel.writeDotToStream(std::cout); + //this->preprocessedModel->writeDotToStream(std::cout); storm::utility::Stopwatch swInitialization(true); initializeConstraintSystem(); STORM_LOG_INFO("Constraint system consists of " << expectedChoiceVariables.size() << " + " << bottomStateVariables.size() << " variables"); @@ -78,9 +78,9 @@ namespace storm { template void SparseCbAchievabilityQuery::initializeConstraintSystem() { - uint_fast64_t numStates = this->preprocessedModel.getNumberOfStates(); - uint_fast64_t numChoices = this->preprocessedModel.getNumberOfChoices(); - uint_fast64_t numBottomStates = this->possibleBottomStates.getNumberOfSetBits(); + uint_fast64_t numStates = this->preprocessedModel->getNumberOfStates(); + uint_fast64_t numChoices = this->preprocessedModel->getNumberOfChoices(); + uint_fast64_t numBottomStates = this->reward0EStates.getNumberOfSetBits(); storm::expressions::Expression zero = this->expressionManager->rational(storm::utility::zero()); storm::expressions::Expression one = this->expressionManager->rational(storm::utility::one()); @@ -107,20 +107,20 @@ namespace storm { solver->add(bottomStateSum == one); // assert that the "incoming" value of each state equals the "outgoing" value - storm::storage::SparseMatrix backwardsTransitions = this->preprocessedModel.getTransitionMatrix().transpose(); + storm::storage::SparseMatrix backwardsTransitions = this->preprocessedModel->getTransitionMatrix().transpose(); auto bottomStateVariableIt = bottomStateVariables.begin(); for (uint_fast64_t state = 0; state < numStates; ++state) { // get the "incomming" value - storm::expressions::Expression value = this->preprocessedModel.getInitialStates().get(state) ? one : zero; + storm::expressions::Expression value = this->preprocessedModel->getInitialStates().get(state) ? one : zero; for (auto const& backwardsEntry : backwardsTransitions.getRow(state)) { value = value + (this->expressionManager->rational(backwardsEntry.getValue()) * expectedChoiceVariables[backwardsEntry.getColumn()].getExpression()); } // subtract the "outgoing" value - for (uint_fast64_t choice = this->preprocessedModel.getTransitionMatrix().getRowGroupIndices()[state]; choice < this->preprocessedModel.getTransitionMatrix().getRowGroupIndices()[state + 1]; ++choice) { + for (uint_fast64_t choice = this->preprocessedModel->getTransitionMatrix().getRowGroupIndices()[state]; choice < this->preprocessedModel->getTransitionMatrix().getRowGroupIndices()[state + 1]; ++choice) { value = value - expectedChoiceVariables[choice]; } - if (this->possibleBottomStates.get(state)) { + if (this->reward0EStates.get(state)) { value = value - (*bottomStateVariableIt); ++bottomStateVariableIt; } @@ -167,18 +167,18 @@ namespace storm { template <> std::vector SparseCbAchievabilityQuery>::getActionBasedExpectedRewards(std::string const& rewardModelName) const { - return this->preprocessedModel.getRewardModel(rewardModelName).getTotalRewardVector(this->preprocessedModel.getTransitionMatrix()); + return this->preprocessedModel->getRewardModel(rewardModelName).getTotalRewardVector(this->preprocessedModel->getTransitionMatrix()); } template <> std::vector SparseCbAchievabilityQuery>::getActionBasedExpectedRewards(std::string const& rewardModelName) const { - auto const& rewModel = this->preprocessedModel.getRewardModel(rewardModelName); + auto const& rewModel = this->preprocessedModel->getRewardModel(rewardModelName); STORM_LOG_ASSERT(!rewModel.hasTransitionRewards(), "Preprocessed Reward model has transition rewards which is not expected."); - std::vector result = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(this->preprocessedModel.getNumberOfChoices(), storm::utility::zero()); + std::vector result = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(this->preprocessedModel->getNumberOfChoices(), storm::utility::zero()); if(rewModel.hasStateRewards()) { // Note that state rewards are earned over time and thus play no role for probabilistic states - for(auto markovianState : this->preprocessedModel.getMarkovianStates()) { - result[this->preprocessedModel.getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / this->preprocessedModel.getExitRate(markovianState); + for(auto markovianState : this->preprocessedModel->getMarkovianStates()) { + result[this->preprocessedModel->getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / this->preprocessedModel->getExitRate(markovianState); } } return result; @@ -186,13 +186,13 @@ namespace storm { template <> std::vector SparseCbAchievabilityQuery>::getActionBasedExpectedRewards(std::string const& rewardModelName) const { - auto const& rewModel = this->preprocessedModel.getRewardModel(rewardModelName); + auto const& rewModel = this->preprocessedModel->getRewardModel(rewardModelName); STORM_LOG_ASSERT(!rewModel.hasTransitionRewards(), "Preprocessed Reward model has transition rewards which is not expected."); - std::vector result = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(this->preprocessedModel.getNumberOfChoices(), storm::utility::zero()); + std::vector result = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(this->preprocessedModel->getNumberOfChoices(), storm::utility::zero()); if(rewModel.hasStateRewards()) { // Note that state rewards are earned over time and thus play no role for probabilistic states - for(auto markovianState : this->preprocessedModel.getMarkovianStates()) { - result[this->preprocessedModel.getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / this->preprocessedModel.getExitRate(markovianState); + for(auto markovianState : this->preprocessedModel->getMarkovianStates()) { + result[this->preprocessedModel->getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / this->preprocessedModel->getExitRate(markovianState); } } return result; @@ -200,7 +200,7 @@ namespace storm { template <> std::vector SparseCbAchievabilityQuery>::getActionBasedExpectedRewards(std::string const& rewardModelName) const { - return this->preprocessedModel.getRewardModel(rewardModelName).getTotalRewardVector(this->preprocessedModel.getTransitionMatrix()); + return this->preprocessedModel->getRewardModel(rewardModelName).getTotalRewardVector(this->preprocessedModel->getTransitionMatrix()); } diff --git a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.h b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.h index 2fc2f8f7d..3ccb69b40 100644 --- a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.h +++ b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbAchievabilityQuery.h @@ -19,7 +19,7 @@ namespace storm { typedef typename SparseModelType::ValueType ValueType; - SparseCbAchievabilityQuery(SparseMultiObjectivePreprocessorResult& preprocessorResult); + SparseCbAchievabilityQuery(SparseMultiObjectivePreprocessorResult const& preprocessorResult); virtual ~SparseCbAchievabilityQuery() = default; diff --git a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp index 529531c2d..942684376 100644 --- a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp +++ b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.cpp @@ -9,8 +9,10 @@ #include "storm/settings/modules/MultiObjectiveSettings.h" #include "storm/utility/constants.h" #include "storm/utility/vector.h" +#include "storm/transformer/GoalStateMerger.h" #include "storm/exceptions/UnexpectedException.h" +#include "storm/exceptions/NotSupportedException.h" namespace storm { namespace modelchecker { @@ -18,13 +20,31 @@ namespace storm { template - SparseCbQuery::SparseCbQuery(SparseMultiObjectivePreprocessorResult& preprocessorResult) : - originalModel(preprocessorResult.originalModel), originalFormula(preprocessorResult.originalFormula), - preprocessedModel(std::move(*preprocessorResult.preprocessedModel)), objectives(std::move(preprocessorResult.objectives)), - possibleBottomStates(std::move(preprocessorResult.reward0EStates)) { + SparseCbQuery::SparseCbQuery(SparseMultiObjectivePreprocessorResult const& preprocessorResult) : + originalModel(preprocessorResult.originalModel), originalFormula(preprocessorResult.originalFormula), objectives(std::move(preprocessorResult.objectives)) { + + STORM_LOG_THROW(preprocessorResult.rewardFinitenessType != SparseMultiObjectivePreprocessorResult::RewardFinitenessType::Infinite, storm::exceptions::NotSupportedException, "There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported."); + STORM_LOG_THROW(preprocessorResult.rewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); + STORM_LOG_THROW(preprocessorResult.containsOnlyRewardObjectives(), storm::exceptions::NotSupportedException, "At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker."); + STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::NotSupportedException, "The model has multiple initial states."); + + // Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers. + // We can also merge the states that will have reward zero anyway. + storm::storage::BitVector maybeStates = preprocessorResult.rewardLessInfinityEStates.get() & ~preprocessorResult.reward0AStates; + std::set relevantRewardModels; + for (auto const& obj : this->objectives) { + obj.formula->gatherReferencedRewardModels(relevantRewardModels); + } + storm::transformer::GoalStateMerger merger(*preprocessorResult.preprocessedModel); + auto mergerResult = merger.mergeTargetAndSinkStates(maybeStates, preprocessorResult.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end())); + + preprocessedModel = mergerResult.model; + reward0EStates = preprocessorResult.reward0EStates % maybeStates; + if (mergerResult.targetState) { + // There is an additional state in the result + reward0EStates.resize(reward0EStates.size() + 1, true); + } expressionManager = std::make_shared(); - STORM_LOG_WARN("TODO"); - std::cout << "TODO" << std::endl; } diff --git a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.h b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.h index fa3e13119..8d22580aa 100644 --- a/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.h +++ b/src/storm/modelchecker/multiobjective/constraintbased/SparseCbQuery.h @@ -27,17 +27,15 @@ namespace storm { protected: - SparseCbQuery(SparseMultiObjectivePreprocessorResult& preprocessorResult); + SparseCbQuery(SparseMultiObjectivePreprocessorResult const& preprocessorResult); SparseModelType const& originalModel; storm::logic::MultiObjectiveFormula const& originalFormula; - - SparseModelType preprocessedModel; std::vector> objectives; - + std::shared_ptr preprocessedModel; + storm::storage::BitVector reward0EStates; std::shared_ptr expressionManager; - storm::storage::BitVector possibleBottomStates; }; diff --git a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp index 03568318e..e201b595c 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.cpp @@ -11,7 +11,7 @@ namespace storm { namespace multiobjective { template - PcaaWeightVectorChecker::PcaaWeightVectorChecker(ModelType const& model, std::vector> const& objectives) : model(model), objectives(objectives) { + PcaaWeightVectorChecker::PcaaWeightVectorChecker(std::vector> const& objectives) : objectives(objectives) { // Intentionally left empty } @@ -32,21 +32,14 @@ namespace storm { template template>::value, int>::type> - std::unique_ptr> WeightVectorCheckerFactory::create(ModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates) { - return std::make_unique>(model, objectives, possibleECActions, possibleBottomStates); + std::unique_ptr> WeightVectorCheckerFactory::create(SparseMultiObjectivePreprocessorResult const& preprocessorResult) { + return std::make_unique>(preprocessorResult); } template template>::value, int>::type> - std::unique_ptr> WeightVectorCheckerFactory::create(ModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates) { - - return std::make_unique>(model, objectives, possibleECActions, possibleBottomStates); + std::unique_ptr> WeightVectorCheckerFactory::create(SparseMultiObjectivePreprocessorResult const& preprocessorResult) { + return std::make_unique>(preprocessorResult); } template class PcaaWeightVectorChecker>; @@ -59,10 +52,10 @@ namespace storm { template class WeightVectorCheckerFactory>; template class WeightVectorCheckerFactory>; - template std::unique_ptr>> WeightVectorCheckerFactory>::create(storm::models::sparse::Mdp const& model, std::vector> const& objectives, storm::storage::BitVector const& possibleECActions, storm::storage::BitVector const& possibleBottomStates); - template std::unique_ptr>> WeightVectorCheckerFactory>::create(storm::models::sparse::Mdp const& model, std::vector> const& objectives, storm::storage::BitVector const& possibleECActions, storm::storage::BitVector const& possibleBottomStates); - template std::unique_ptr>> WeightVectorCheckerFactory>::create(storm::models::sparse::MarkovAutomaton const& model, std::vector> const& objectives, storm::storage::BitVector const& possibleECActions, storm::storage::BitVector const& possibleBottomStates); - template std::unique_ptr>> WeightVectorCheckerFactory>::create(storm::models::sparse::MarkovAutomaton const& model, std::vector> const& objectives, storm::storage::BitVector const& possibleECActions, storm::storage::BitVector const& possibleBottomStates); + template std::unique_ptr>> WeightVectorCheckerFactory>::create(SparseMultiObjectivePreprocessorResult> const& preprocessorResult); + template std::unique_ptr>> WeightVectorCheckerFactory>::create(SparseMultiObjectivePreprocessorResult> const& preprocessorResult); + template std::unique_ptr>> WeightVectorCheckerFactory>::create(SparseMultiObjectivePreprocessorResult> const& preprocessorResult); + template std::unique_ptr>> WeightVectorCheckerFactory>::create(SparseMultiObjectivePreprocessorResult> const& preprocessorResult); } diff --git a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h index c6e7f96eb..6d547de58 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h @@ -31,7 +31,7 @@ namespace storm { * */ - PcaaWeightVectorChecker(ModelType const& model, std::vector> const& objectives); + PcaaWeightVectorChecker(std::vector> const& objectives); virtual ~PcaaWeightVectorChecker() = default; @@ -70,10 +70,8 @@ namespace storm { protected: - // The (preprocessed) model - ModelType const& model; // The (preprocessed) objectives - std::vector> const& objectives; + std::vector> objectives; // The precision of this weight vector checker. ValueType weightedPrecision; @@ -84,16 +82,10 @@ namespace storm { public: template>::value, int>::type = 0> - static std::unique_ptr> create(ModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates); + static std::unique_ptr> create(SparseMultiObjectivePreprocessorResult const& preprocessorResult); template>::value, int>::type = 0> - static std::unique_ptr> create(ModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates); + static std::unique_ptr> create(SparseMultiObjectivePreprocessorResult const& preprocessorResult); }; } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.cpp index e695fa8db..6325a9d66 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.cpp @@ -20,29 +20,40 @@ namespace storm { template - SparseMaPcaaWeightVectorChecker::SparseMaPcaaWeightVectorChecker(SparseMaModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates) : - SparsePcaaWeightVectorChecker(model, objectives, possibleECActions, possibleBottomStates) { + SparseMaPcaaWeightVectorChecker::SparseMaPcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult) : + SparsePcaaWeightVectorChecker(preprocessorResult) { + this->initialize(preprocessorResult); + } + + template + void SparseMaPcaaWeightVectorChecker::initializeModelTypeSpecificData(SparseMaModelType const& model) { + + markovianStates = model.getMarkovianStates(); + exitRates = model.getExitRates(); + // Set the (discretized) state action rewards. - this->discreteActionRewards.resize(objectives.size()); - for (auto objIndex : this->objectivesWithNoUpperTimeBound) { - auto const& formula = *objectives[objIndex].formula; + this->actionRewards.resize(this->objectives.size()); + for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + auto const& formula = *this->objectives[objIndex].formula; STORM_LOG_THROW(formula.isRewardOperatorFormula() && formula.asRewardOperatorFormula().hasRewardModelName(), storm::exceptions::UnexpectedException, "Unexpected type of operator formula: " << formula); - STORM_LOG_THROW(formula.getSubformula().isTotalRewardFormula() || (formula.getSubformula().isCumulativeRewardFormula() && formula.getSubformula().asCumulativeRewardFormula().isTimeBounded()), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); - STORM_LOG_WARN_COND(!formula.getSubformula().isCumulativeRewardFormula() || (objectives[objIndex].originalFormula->isProbabilityOperatorFormula() && objectives[objIndex].originalFormula->asProbabilityOperatorFormula().getSubformula().isBoundedUntilFormula()), "Objective " << objectives[objIndex].originalFormula << " was simplified to a cumulative reward formula. Correctness of the algorithm is unknown for this type of property."); - typename SparseMaModelType::RewardModelType const& rewModel = this->model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()); + typename SparseMaModelType::RewardModelType const& rewModel = model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()); STORM_LOG_ASSERT(!rewModel.hasTransitionRewards(), "Preprocessed Reward model has transition rewards which is not expected."); - this->discreteActionRewards[objIndex] = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(this->model.getTransitionMatrix().getRowCount(), storm::utility::zero()); - if (rewModel.hasStateRewards()) { - // Note that state rewards are earned over time and thus play no role for probabilistic states - for (auto markovianState : this->model.getMarkovianStates()) { - this->discreteActionRewards[objIndex][this->model.getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / this->model.getExitRate(markovianState); + this->actionRewards[objIndex] = rewModel.hasStateActionRewards() ? rewModel.getStateActionRewardVector() : std::vector(model.getTransitionMatrix().getRowCount(), storm::utility::zero()); + if (formula.getSubformula().isTotalRewardFormula()) { + if (rewModel.hasStateRewards()) { + // Note that state rewards are earned over time and thus play no role for probabilistic states + for (auto markovianState : markovianStates) { + this->actionRewards[objIndex][model.getTransitionMatrix().getRowGroupIndices()[markovianState]] += rewModel.getStateReward(markovianState) / exitRates[markovianState]; + } } + } else { + STORM_LOG_THROW(formula.getSubformula().isCumulativeRewardFormula() && formula.getSubformula().asCumulativeRewardFormula().isTimeBounded(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); + STORM_LOG_THROW(!rewModel.hasStateRewards(), storm::exceptions::InvalidPropertyException, "Found state rewards for time bounded objective " << this->objectives[objIndex].originalFormula << ". This is not supported."); + STORM_LOG_WARN_COND(this->objectives[objIndex].originalFormula->isProbabilityOperatorFormula() && this->objectives[objIndex].originalFormula->asProbabilityOperatorFormula().getSubformula().isBoundedUntilFormula(), "Objective " << this->objectives[objIndex].originalFormula << " was simplified to a cumulative reward formula. Correctness of the algorithm is unknown for this type of property."); } } } + template void SparseMaPcaaWeightVectorChecker::boundedPhase(std::vector const& weightVector, std::vector& weightedRewardVector) { @@ -106,33 +117,27 @@ namespace storm { typename SparseMaPcaaWeightVectorChecker::SubModel SparseMaPcaaWeightVectorChecker::createSubModel(bool createMS, std::vector const& weightedRewardVector) const { SubModel result; - storm::storage::BitVector probabilisticStates = ~this->model.getMarkovianStates(); - result.states = createMS ? this->model.getMarkovianStates() : probabilisticStates; - result.choices = this->model.getTransitionMatrix().getRowFilter(result.states); + storm::storage::BitVector probabilisticStates = ~markovianStates; + result.states = createMS ? markovianStates : probabilisticStates; + result.choices = this->transitionMatrix.getRowFilter(result.states); STORM_LOG_ASSERT(!createMS || result.states.getNumberOfSetBits() == result.choices.getNumberOfSetBits(), "row groups for Markovian states should consist of exactly one row"); //We need to add diagonal entries for selfloops on Markovian states. - result.toMS = this->model.getTransitionMatrix().getSubmatrix(true, result.states, this->model.getMarkovianStates(), createMS); - result.toPS = this->model.getTransitionMatrix().getSubmatrix(true, result.states, probabilisticStates, false); + result.toMS = this->transitionMatrix.getSubmatrix(true, result.states, markovianStates, createMS); + result.toPS = this->transitionMatrix.getSubmatrix(true, result.states, probabilisticStates, false); STORM_LOG_ASSERT(result.getNumberOfStates() == result.states.getNumberOfSetBits() && result.getNumberOfStates() == result.toMS.getRowGroupCount() && result.getNumberOfStates() == result.toPS.getRowGroupCount(), "Invalid state count for subsystem"); STORM_LOG_ASSERT(result.getNumberOfChoices() == result.choices.getNumberOfSetBits() && result.getNumberOfChoices() == result.toMS.getRowCount() && result.getNumberOfChoices() == result.toPS.getRowCount(), "Invalid choice count for subsystem"); result.weightedRewardVector.resize(result.getNumberOfChoices()); storm::utility::vector::selectVectorValues(result.weightedRewardVector, result.choices, weightedRewardVector); - result.objectiveRewardVectors.resize(this->objectives.size()); for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - std::vector& objVector = result.objectiveRewardVectors[objIndex]; - objVector = std::vector(result.weightedRewardVector.size(), storm::utility::zero()); - if (this->objectivesWithNoUpperTimeBound.get(objIndex)) { - storm::utility::vector::selectVectorValues(objVector, result.choices, this->discreteActionRewards[objIndex]); - } else { - typename SparseMaModelType::RewardModelType const& rewModel = this->model.getRewardModel(this->objectives[objIndex].formula->asRewardOperatorFormula().getRewardModelName()); - STORM_LOG_ASSERT(!rewModel.hasTransitionRewards(), "Preprocessed Reward model has transition rewards which is not expected."); - STORM_LOG_ASSERT(!rewModel.hasStateRewards(), "State rewards for bounded objectives for MAs are not expected (bounded rewards are not supported)."); - if (rewModel.hasStateActionRewards()) { - storm::utility::vector::selectVectorValues(objVector, result.choices, rewModel.getStateActionRewardVector()); - } + std::vector const& objRewards = this->actionRewards[objIndex]; + std::vector subModelObjRewards; + subModelObjRewards.reserve(result.getNumberOfChoices()); + for (auto const& choice : result.choices) { + subModelObjRewards.push_back(objRewards[choice]); } + result.objectiveRewardVectors.push_back(std::move(subModelObjRewards)); } result.weightedSolutionVector.resize(result.getNumberOfStates()); @@ -161,7 +166,7 @@ namespace storm { // ) <= this->maximumLowerUpperDistance // Initialize some data for fast and easy access - VT const maxRate = this->model.getMaximalExitRate(); + VT const maxRate = storm::utility::vector::max_if(exitRates, markovianStates); std::vector timeBounds; std::vector eToPowerOfMinusMaxRateTimesBound; VT smallestNonZeroBound = storm::utility::zero(); @@ -229,7 +234,7 @@ namespace storm { template ::SupportsExponential, int>::type> void SparseMaPcaaWeightVectorChecker::digitize(SubModel& MS, VT const& digitizationConstant) const { std::vector rateVector(MS.getNumberOfChoices()); - storm::utility::vector::selectVectorValues(rateVector, MS.states, this->model.getExitRates()); + storm::utility::vector::selectVectorValues(rateVector, MS.states, exitRates); for (uint_fast64_t row = 0; row < rateVector.size(); ++row) { VT const eToMinusRateTimesDelta = std::exp(-rateVector[row] * digitizationConstant); for (auto& entry : MS.toMS.getRow(row)) { @@ -258,7 +263,7 @@ namespace storm { template ::SupportsExponential, int>::type> void SparseMaPcaaWeightVectorChecker::digitizeTimeBounds(TimeBoundMap& upperTimeBounds, VT const& digitizationConstant) { - VT const maxRate = this->model.getMaximalExitRate(); + VT const maxRate = storm::utility::vector::max_if(exitRates, markovianStates); for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { auto const& obj = this->objectives[objIndex]; VT errorTowardsZero = storm::utility::zero(); diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.h index d3baaca42..c2940612f 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMaPcaaWeightVectorChecker.h @@ -24,13 +24,13 @@ namespace storm { public: typedef typename SparseMaModelType::ValueType ValueType; - SparseMaPcaaWeightVectorChecker(SparseMaModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates); + SparseMaPcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult); virtual ~SparseMaPcaaWeightVectorChecker() = default; + protected: + virtual void initializeModelTypeSpecificData(SparseMaModelType const& model) override; + private: /* @@ -151,6 +151,11 @@ namespace storm { */ void performMSStep(SubModel& MS, SubModel const& PS, storm::storage::BitVector const& consideredObjectives, std::vector const& weightVector) const; + + // Data regarding the given Markov automaton + storm::storage::BitVector markovianStates; + std::vector exitRates; + }; } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp index 8b1fad20e..4588779fe 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp @@ -21,48 +21,31 @@ namespace storm { namespace multiobjective { template - SparseMdpPcaaWeightVectorChecker::SparseMdpPcaaWeightVectorChecker(SparseMdpModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates) : - SparsePcaaWeightVectorChecker(model, objectives, possibleECActions, possibleBottomStates) { - // set the state action rewards. Also do some sanity checks on the objectives. - for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - auto const& formula = *objectives[objIndex].formula; - if (!(formula.isProbabilityOperatorFormula() && (formula.getSubformula().isBoundedUntilFormula() || formula.getSubformula().isMultiObjectiveFormula()))) { - STORM_LOG_THROW(formula.isRewardOperatorFormula() && formula.asRewardOperatorFormula().hasRewardModelName(), storm::exceptions::UnexpectedException, "Unexpected type of operator formula: " << formula); - STORM_LOG_THROW(formula.getSubformula().isCumulativeRewardFormula() || formula.getSubformula().isTotalRewardFormula(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); - typename SparseMdpModelType::RewardModelType const& rewModel = this->model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()); - STORM_LOG_THROW(!rewModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Reward model has transition rewards which is not expected."); - this->discreteActionRewards[objIndex] = rewModel.getTotalRewardVector(this->model.getTransitionMatrix()); - } - } + SparseMdpPcaaWeightVectorChecker::SparseMdpPcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult) : + SparsePcaaWeightVectorChecker(preprocessorResult) { + this->initialize(preprocessorResult); } template - void SparseMdpPcaaWeightVectorChecker::boundedPhase(std::vector const& weightVector, std::vector& weightedRewardVector) { - // Currently, only step bounds are considered. - bool containsRewardBoundedObjectives = false; + void SparseMdpPcaaWeightVectorChecker::initializeModelTypeSpecificData(SparseMdpModelType const& model) { + // set the state action rewards. Also do some sanity checks on the objectives. + this->actionRewards.resize(this->objectives.size()); for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - if (this->objectives[objIndex].formula->isProbabilityOperatorFormula()) { - containsRewardBoundedObjectives = true; - break; - } - } - - if (containsRewardBoundedObjectives) { - boundedPhaseWithRewardBounds(weightVector, weightedRewardVector); - } else { - boundedPhaseOnlyStepBounds(weightVector, weightedRewardVector); + auto const& formula = *this->objectives[objIndex].formula; + STORM_LOG_THROW(formula.isRewardOperatorFormula() && formula.asRewardOperatorFormula().hasRewardModelName(), storm::exceptions::UnexpectedException, "Unexpected type of operator formula: " << formula); + STORM_LOG_THROW(formula.getSubformula().isCumulativeRewardFormula() || formula.getSubformula().isTotalRewardFormula(), storm::exceptions::UnexpectedException, "Unexpected type of sub-formula: " << formula.getSubformula()); + typename SparseMdpModelType::RewardModelType const& rewModel = model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()); + STORM_LOG_THROW(!rewModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Reward model has transition rewards which is not expected."); + this->actionRewards[objIndex] = rewModel.getTotalRewardVector(model.getTransitionMatrix()); } } - + template - void SparseMdpPcaaWeightVectorChecker::boundedPhaseOnlyStepBounds(std::vector const& weightVector, std::vector& weightedRewardVector) { + void SparseMdpPcaaWeightVectorChecker::boundedPhase(std::vector const& weightVector, std::vector& weightedRewardVector) { // Allocate some memory so this does not need to happen for each time epoch - std::vector optimalChoicesInCurrentEpoch(this->model.getNumberOfStates()); + std::vector optimalChoicesInCurrentEpoch(this->transitionMatrix.getRowGroupCount()); std::vector choiceValues(weightedRewardVector.size()); - std::vector temporaryResult(this->model.getNumberOfStates()); + std::vector temporaryResult(this->transitionMatrix.getRowGroupCount()); // Get for each occurring timeBound the indices of the objectives with that bound. std::map> stepBounds; for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { @@ -94,29 +77,29 @@ namespace storm { for(auto objIndex : stepBoundIt->second) { // This objective now plays a role in the weighted sum ValueType factor = storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType()) ? -weightVector[objIndex] : weightVector[objIndex]; - storm::utility::vector::addScaledVector(weightedRewardVector, this->discreteActionRewards[objIndex], factor); + storm::utility::vector::addScaledVector(weightedRewardVector, this->actionRewards[objIndex], factor); } ++stepBoundIt; } // Get values and scheduler for weighted sum of objectives - this->model.getTransitionMatrix().multiplyWithVector(this->weightedResult, choiceValues); + this->transitionMatrix.multiplyWithVector(this->weightedResult, choiceValues); storm::utility::vector::addVectors(choiceValues, weightedRewardVector, choiceValues); - storm::utility::vector::reduceVectorMax(choiceValues, this->weightedResult, this->model.getTransitionMatrix().getRowGroupIndices(), &optimalChoicesInCurrentEpoch); + storm::utility::vector::reduceVectorMax(choiceValues, this->weightedResult, this->transitionMatrix.getRowGroupIndices(), &optimalChoicesInCurrentEpoch); // get values for individual objectives // TODO we could compute the result for one of the objectives from the weighted result, the given weight vector, and the remaining objective results. for (auto objIndex : consideredObjectives) { std::vector& objectiveResult = this->objectiveResults[objIndex]; - std::vector const& objectiveRewards = this->discreteActionRewards[objIndex]; - auto rowGroupIndexIt = this->model.getTransitionMatrix().getRowGroupIndices().begin(); + std::vector const& objectiveRewards = this->actionRewards[objIndex]; + auto rowGroupIndexIt = this->transitionMatrix.getRowGroupIndices().begin(); auto optimalChoiceIt = optimalChoicesInCurrentEpoch.begin(); for(ValueType& stateValue : temporaryResult){ uint_fast64_t row = (*rowGroupIndexIt) + (*optimalChoiceIt); ++rowGroupIndexIt; ++optimalChoiceIt; stateValue = objectiveRewards[row]; - for(auto const& entry : this->model.getTransitionMatrix().getRow(row)) { + for(auto const& entry : this->transitionMatrix.getRow(row)) { stateValue += entry.getValue() * objectiveResult[entry.getColumn()]; } } @@ -126,95 +109,6 @@ namespace storm { } } - template - void SparseMdpPcaaWeightVectorChecker::boundedPhaseWithRewardBounds(std::vector const& weightVector, std::vector& weightedRewardVector) { - if (!rewardUnfolding) { - rewardUnfolding = std::make_unique>(this->model, this->objectives, this->possibleECActions, this->possibleBottomStates); - } - auto initEpoch = rewardUnfolding->getStartEpoch(); - auto epochOrder = rewardUnfolding->getEpochComputationOrder(initEpoch); - for (auto const& epoch : epochOrder) { - computeEpochSolution(epoch, weightVector); - } - - auto solution = rewardUnfolding->getInitialStateResult(initEpoch); - this->weightedResult[*this->model.getInitialStates().begin()] = solution.weightedValue; - for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - this->objectiveResults[objIndex][*this->model.getInitialStates().begin()] = solution.objectiveValues[objIndex]; - // Todo: we currently assume precise results... - this->offsetsToUnderApproximation[objIndex] = storm::utility::zero(); - this->offsetsToOverApproximation[objIndex] = storm::utility::zero(); - } - - } - - template - void SparseMdpPcaaWeightVectorChecker::computeEpochSolution(typename MultiDimensionalRewardUnfolding::Epoch const& epoch, std::vector const& weightVector) { - auto const& epochModel = rewardUnfolding->setCurrentEpoch(epoch); - std::vector::SolutionType> result(epochModel.epochMatrix.getRowGroupCount()); - - - // Formulate a min-max equation system max(A*x+b)=x for the weighted sum of the objectives - std::vector b(epochModel.epochMatrix.getRowCount(), storm::utility::zero()); - for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - ValueType weight = storm::solver::minimize(this->objectives[objIndex].formula->getOptimalityType()) ? -weightVector[objIndex] : weightVector[objIndex]; - if (!storm::utility::isZero(weight)) { - std::vector const& objectiveReward = epochModel.objectiveRewards[objIndex]; - for (auto const& choice : epochModel.objectiveRewardFilter[objIndex]) { - b[choice] += weight * objectiveReward[choice]; - } - } - } - auto stepSolutionIt = epochModel.stepSolutions.begin(); - for (auto const& choice : epochModel.stepChoices) { - b[choice] += stepSolutionIt->weightedValue; - ++stepSolutionIt; - } - - // Invoke the min max solver - storm::solver::GeneralMinMaxLinearEquationSolverFactory minMaxSolverFactory; - auto minMaxSolver = minMaxSolverFactory.create(epochModel.epochMatrix); - minMaxSolver->setOptimizationDirection(storm::solver::OptimizationDirection::Maximize); - minMaxSolver->setTrackScheduler(true); - //minMaxSolver->setCachingEnabled(true); - std::vector x(result.size(), storm::utility::zero()); - minMaxSolver->solveEquations(x, b); - for (uint64_t state = 0; state < x.size(); ++state) { - result[state].weightedValue = x[state]; - } - - // Formulate for each objective the linear equation system induced by the performed choices - auto const& choices = minMaxSolver->getSchedulerChoices(); - storm::storage::SparseMatrix subMatrix = epochModel.epochMatrix.selectRowsFromRowGroups(choices, true); - subMatrix.convertToEquationSystem(); - storm::solver::GeneralLinearEquationSolverFactory linEqSolverFactory; - auto linEqSolver = linEqSolverFactory.create(std::move(subMatrix)); - b.resize(choices.size()); - // TODO: start with a better initial guess - x.resize(choices.size()); - for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - std::vector const& objectiveReward = epochModel.objectiveRewards[objIndex]; - for (uint64_t state = 0; state < choices.size(); ++state) { - uint64_t choice = epochModel.epochMatrix.getRowGroupIndices()[state] + choices[state]; - if (epochModel.objectiveRewardFilter[objIndex].get(choice)) { - b[state] = objectiveReward[choice]; - } else { - b[state] = storm::utility::zero(); - } - if (epochModel.stepChoices.get(choice)) { - b[state] += epochModel.stepSolutions[epochModel.stepChoices.getNumberOfSetBitsBeforeIndex(choice)].objectiveValues[objIndex]; - } - } - linEqSolver->solveEquations(x, b); - for (uint64_t state = 0; state < choices.size(); ++state) { - result[state].objectiveValues.push_back(x[state]); - } - } - - rewardUnfolding->setSolutionForCurrentEpoch(result); - } - - template class SparseMdpPcaaWeightVectorChecker>; #ifdef STORM_HAVE_CARL template class SparseMdpPcaaWeightVectorChecker>; diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h index 134228a0c..468175f71 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h @@ -32,17 +32,17 @@ namespace storm { * */ - SparseMdpPcaaWeightVectorChecker(SparseMdpModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates); + SparseMdpPcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult); virtual ~SparseMdpPcaaWeightVectorChecker() = default; + protected: + virtual void initializeModelTypeSpecificData(SparseMdpModelType const& model) override; + private: /*! - * Computes the maximizing scheduler for the weighted sum of the objectives, including also step or reward bounded objectives. + * Computes the maximizing scheduler for the weighted sum of the objectives, including also step bounded objectives. * Moreover, the values of the individual objectives are computed w.r.t. this scheduler. * * @param weightVector the weight vector of the current check @@ -50,27 +50,6 @@ namespace storm { */ virtual void boundedPhase(std::vector const& weightVector, std::vector& weightedRewardVector) override; - /*! - * Computes the bounded phase for the case that only step bounded objectives are considered. - * - * @param weightVector the weight vector of the current check - * @param weightedRewardVector the weighted rewards considering the unbounded objectives. Will be invalidated after calling this. - */ - void boundedPhaseOnlyStepBounds(std::vector const& weightVector, std::vector& weightedRewardVector); - - /*! - * Computes the bounded phase for the case that also reward bounded objectives occurr. - * - * @param weightVector the weight vector of the current check - * @param weightedRewardVector the weighted rewards considering the unbounded objectives. Will be invalidated after calling this. - */ - void boundedPhaseWithRewardBounds(std::vector const& weightVector, std::vector& weightedRewardVector); - - void computeEpochSolution(typename MultiDimensionalRewardUnfolding::Epoch const& epoch, std::vector const& weightVector); - - std::unique_ptr> rewardUnfolding; - - }; } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp index 55fdf5d6d..eb888c92c 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.cpp @@ -21,13 +21,9 @@ namespace storm { template SparsePcaaQuery::SparsePcaaQuery(SparseMultiObjectivePreprocessorResult& preprocessorResult) : - originalModel(preprocessorResult.originalModel), originalFormula(preprocessorResult.originalFormula), - preprocessedModel(std::move(*preprocessorResult.preprocessedModel)), objectives(std::move(preprocessorResult.objectives)) { - - STORM_LOG_WARN("TODO"); - std::cout << "TODO" << std::endl; + originalModel(preprocessorResult.originalModel), originalFormula(preprocessorResult.originalFormula), objectives(preprocessorResult.objectives) { - this->weightVectorChecker = WeightVectorCheckerFactory::create(preprocessedModel, objectives, storm::storage::BitVector(preprocessedModel.getNumberOfChoices(), true), preprocessorResult.reward0EStates); + this->weightVectorChecker = WeightVectorCheckerFactory::create(preprocessorResult); this->diracWeightVectorsToBeChecked = storm::storage::BitVector(this->objectives.size(), true); this->overApproximation = storm::storage::geometry::Polytope::createUniversalPolytope(); diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h index f6996e493..a85222ddf 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaQuery.h @@ -96,7 +96,6 @@ namespace storm { SparseModelType const& originalModel; storm::logic::MultiObjectiveFormula const& originalFormula; - SparseModelType preprocessedModel; std::vector> objectives; // The corresponding weight vector checker diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.cpp index 55358c68a..05f7318fb 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.cpp @@ -1,6 +1,7 @@ #include "storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h" #include +#include #include "storm/adapters/RationalFunctionAdapter.h" #include "storm/models/sparse/Mdp.h" @@ -12,10 +13,12 @@ #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/logic/Formulas.h" +#include "storm/transformer/GoalStateMerger.h" #include "storm/exceptions/IllegalFunctionCallException.h" #include "storm/exceptions/UnexpectedException.h" #include "storm/exceptions/NotImplementedException.h" +#include "storm/exceptions/NotSupportedException.h" namespace storm { namespace modelchecker { @@ -23,39 +26,73 @@ namespace storm { template - SparsePcaaWeightVectorChecker::SparsePcaaWeightVectorChecker(SparseModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates) : - PcaaWeightVectorChecker(model, objectives), - possibleECActions(possibleECActions), - actionsWithoutRewardInUnboundedPhase(this->model.getNumberOfChoices(), true), - possibleBottomStates(possibleBottomStates), - objectivesWithNoUpperTimeBound(objectives.size(), false), - discreteActionRewards(objectives.size()), - checkHasBeenCalled(false), - objectiveResults(objectives.size()), - offsetsToUnderApproximation(objectives.size()), - offsetsToOverApproximation(objectives.size()), - optimalChoices(this->model.getNumberOfStates(), 0) { + SparsePcaaWeightVectorChecker::SparsePcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult) : + PcaaWeightVectorChecker(preprocessorResult.objectives) { + + STORM_LOG_THROW(preprocessorResult.rewardFinitenessType != SparseMultiObjectivePreprocessorResult::RewardFinitenessType::Infinite, storm::exceptions::NotSupportedException, "There is no Pareto optimal scheduler that yields finite reward for all objectives. This is not supported."); + STORM_LOG_THROW(preprocessorResult.rewardLessInfinityEStates, storm::exceptions::UnexpectedException, "The set of states with reward < infinity for some scheduler has not been computed during preprocessing."); + STORM_LOG_THROW(preprocessorResult.containsOnlyRewardObjectives(), storm::exceptions::NotSupportedException, "At least one objective was not reduced to an expected (total or cumulative) reward objective during preprocessing. This is not supported by the considered weight vector checker."); + STORM_LOG_THROW(preprocessorResult.preprocessedModel->getInitialStates().getNumberOfSetBits() == 1, storm::exceptions::NotSupportedException, "The model has multiple initial states."); + + } + + template + void SparsePcaaWeightVectorChecker::initialize(SparseMultiObjectivePreprocessorResult const& preprocessorResult) { + // Build a subsystem of the preprocessor result model that discards states that yield infinite reward for all schedulers. + // We can also merge the states that will have reward zero anyway. + storm::storage::BitVector maybeStates = preprocessorResult.rewardLessInfinityEStates.get() & ~preprocessorResult.reward0AStates; + std::set relevantRewardModels; + for (auto const& obj : this->objectives) { + obj.formula->gatherReferencedRewardModels(relevantRewardModels); + } + storm::transformer::GoalStateMerger merger(*preprocessorResult.preprocessedModel); + auto mergerResult = merger.mergeTargetAndSinkStates(maybeStates, preprocessorResult.reward0AStates, storm::storage::BitVector(maybeStates.size(), false), std::vector(relevantRewardModels.begin(), relevantRewardModels.end())); + + // Initialize data specific for the considered model type + initializeModelTypeSpecificData(*mergerResult.model); + + // Initilize general data of the model + transitionMatrix = std::move(mergerResult.model->getTransitionMatrix()); + initialState = *mergerResult.model->getInitialStates().begin(); + reward0EStates = preprocessorResult.reward0EStates % maybeStates; + if (mergerResult.targetState) { + // There is an additional state in the result + reward0EStates.resize(reward0EStates.size() + 1, true); + + // The overapproximation for the possible ec choices consists of the states that can reach the target states with prob. 0 and the target state itself. + storm::storage::BitVector targetStateAsVector(transitionMatrix.getRowGroupCount(), false); + targetStateAsVector.set(*mergerResult.targetState, true); + ecChoicesHint = transitionMatrix.getRowFilter(storm::utility::graph::performProb0E(transitionMatrix, transitionMatrix.getRowGroupIndices(), transitionMatrix.transpose(true), storm::storage::BitVector(targetStateAsVector.size(), true), targetStateAsVector)); + ecChoicesHint.set(transitionMatrix.getRowGroupIndices()[*mergerResult.targetState], true); + } else { + ecChoicesHint = storm::storage::BitVector(transitionMatrix.getRowCount(), true); + } // set data for unbounded objectives - for(uint_fast64_t objIndex = 0; objIndex < objectives.size(); ++objIndex) { + objectivesWithNoUpperTimeBound = storm::storage::BitVector(this->objectives.size(), false); + actionsWithoutRewardInUnboundedPhase = storm::storage::BitVector(transitionMatrix.getRowCount(), true); + for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { auto const& formula = *this->objectives[objIndex].formula; if (formula.getSubformula().isTotalRewardFormula()) { objectivesWithNoUpperTimeBound.set(objIndex, true); - STORM_LOG_ASSERT(formula.isRewardOperatorFormula() && formula.asRewardOperatorFormula().hasRewardModelName(), "Unexpected type of operator formula."); - actionsWithoutRewardInUnboundedPhase &= this->model.getRewardModel(formula.asRewardOperatorFormula().getRewardModelName()).getChoicesWithZeroReward(this->model.getTransitionMatrix()); + actionsWithoutRewardInUnboundedPhase &= storm::utility::vector::filterZero(actionRewards[objIndex]); } } + + // initialize data for the results + checkHasBeenCalled = false; + objectiveResults.resize(this->objectives.size()); + offsetsToUnderApproximation.resize(this->objectives.size(), storm::utility::zero()); + offsetsToOverApproximation.resize(this->objectives.size(), storm::utility::zero()); + optimalChoices.resize(transitionMatrix.getRowGroupCount(), 0); } - + template void SparsePcaaWeightVectorChecker::check(std::vector const& weightVector) { checkHasBeenCalled = true; STORM_LOG_INFO("Invoked WeightVectorChecker with weights " << std::endl << "\t" << storm::utility::vector::toString(storm::utility::vector::convertNumericVector(weightVector))); - std::vector weightedRewardVector(this->model.getTransitionMatrix().getRowCount(), storm::utility::zero()); + std::vector weightedRewardVector(transitionMatrix.getRowCount(), storm::utility::zero()); boost::optional weightedLowerResultBound = storm::utility::zero(); boost::optional weightedUpperResultBound = storm::utility::zero(); for (auto objIndex : objectivesWithNoUpperTimeBound) { @@ -71,7 +108,7 @@ namespace storm { } else { weightedLowerResultBound = boost::none; } - storm::utility::vector::addScaledVector(weightedRewardVector, discreteActionRewards[objIndex], -weightVector[objIndex]); + storm::utility::vector::addScaledVector(weightedRewardVector, actionRewards[objIndex], -weightVector[objIndex]); } else { if (obj.lowerResultBound && weightedLowerResultBound) { weightedLowerResultBound.get() += weightVector[objIndex] * obj.lowerResultBound.get(); @@ -83,7 +120,7 @@ namespace storm { } else { weightedUpperResultBound = boost::none; } - storm::utility::vector::addScaledVector(weightedRewardVector, discreteActionRewards[objIndex], weightVector[objIndex]); + storm::utility::vector::addScaledVector(weightedRewardVector, actionRewards[objIndex], weightVector[objIndex]); } } @@ -107,11 +144,10 @@ namespace storm { template std::vector::ValueType> SparsePcaaWeightVectorChecker::getUnderApproximationOfInitialStateResults() const { STORM_LOG_THROW(checkHasBeenCalled, storm::exceptions::IllegalFunctionCallException, "Tried to retrieve results but check(..) has not been called before."); - uint_fast64_t initstate = *this->model.getInitialStates().begin(); std::vector res; res.reserve(this->objectives.size()); - for(uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - res.push_back(this->objectiveResults[objIndex][initstate] + this->offsetsToUnderApproximation[objIndex]); + for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + res.push_back(this->objectiveResults[objIndex][initialState] + this->offsetsToUnderApproximation[objIndex]); } return res; } @@ -119,11 +155,10 @@ namespace storm { template std::vector::ValueType> SparsePcaaWeightVectorChecker::getOverApproximationOfInitialStateResults() const { STORM_LOG_THROW(checkHasBeenCalled, storm::exceptions::IllegalFunctionCallException, "Tried to retrieve results but check(..) has not been called before."); - uint_fast64_t initstate = *this->model.getInitialStates().begin(); std::vector res; res.reserve(this->objectives.size()); - for(uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - res.push_back(this->objectiveResults[objIndex][initstate] + this->offsetsToOverApproximation[objIndex]); + for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + res.push_back(this->objectiveResults[objIndex][initialState] + this->offsetsToOverApproximation[objIndex]); } return res; } @@ -148,19 +183,17 @@ namespace storm { void SparsePcaaWeightVectorChecker::unboundedWeightedPhase(std::vector const& weightedRewardVector, boost::optional const& lowerResultBound, boost::optional const& upperResultBound) { if (this->objectivesWithNoUpperTimeBound.empty() || !storm::utility::vector::hasNonZeroEntry(weightedRewardVector)) { - this->weightedResult = std::vector(this->model.getNumberOfStates(), storm::utility::zero()); - this->optimalChoices = std::vector(this->model.getNumberOfStates(), 0); + this->weightedResult = std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); + this->optimalChoices = std::vector(transitionMatrix.getRowGroupCount(), 0); return; } - updateEcElimResult(weightedRewardVector); + updateEcQuotient(weightedRewardVector); - std::vector subRewardVector(cachedEcElimResult->newToOldRowMapping.size()); - storm::utility::vector::selectVectorValues(subRewardVector, cachedEcElimResult->newToOldRowMapping, weightedRewardVector); - std::vector subResult(cachedEcElimResult->matrix.getRowGroupCount()); + storm::utility::vector::selectVectorValues(ecQuotient->auxChoiceValues, ecQuotient->ecqToOriginalChoiceMapping, weightedRewardVector); storm::solver::GeneralMinMaxLinearEquationSolverFactory solverFactory; - std::unique_ptr> solver = solverFactory.create(cachedEcElimResult->matrix); + std::unique_ptr> solver = solverFactory.create(ecQuotient->matrix); solver->setOptimizationDirection(storm::solver::OptimizationDirection::Maximize); solver->setTrackScheduler(true); if (lowerResultBound) { @@ -169,11 +202,15 @@ namespace storm { if (upperResultBound) { solver->setUpperBound(*upperResultBound); } - solver->solveEquations(subResult, subRewardVector); + + // Use the (0...0) vector as initial guess for the solution. + std::fill(ecQuotient->auxStateValues.begin(), ecQuotient->auxStateValues.end(), storm::utility::zero()); + + solver->solveEquations(ecQuotient->auxStateValues, ecQuotient->auxChoiceValues); - this->weightedResult = std::vector(this->model.getNumberOfStates()); + this->weightedResult = std::vector(transitionMatrix.getRowGroupCount()); - transformReducedSolutionToOriginalModel(cachedEcElimResult->matrix, subResult, solver->getSchedulerChoices(), cachedEcElimResult->newToOldRowMapping, cachedEcElimResult->oldToNewStateMapping, this->weightedResult, this->optimalChoices); + transformReducedSolutionToOriginalModel(ecQuotient->matrix, ecQuotient->auxStateValues, solver->getSchedulerChoices(), ecQuotient->ecqToOriginalChoiceMapping, ecQuotient->originalToEcqStateMapping, this->weightedResult, this->optimalChoices); } template @@ -186,11 +223,11 @@ namespace storm { } for (uint_fast64_t objIndex2 = 0; objIndex2 < this->objectives.size(); ++objIndex2) { if (objIndex != objIndex2) { - objectiveResults[objIndex2] = std::vector(this->model.getNumberOfStates(), storm::utility::zero()); + objectiveResults[objIndex2] = std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); } } } else { - storm::storage::SparseMatrix deterministicMatrix = this->model.getTransitionMatrix().selectRowsFromRowGroups(this->optimalChoices, true); + storm::storage::SparseMatrix deterministicMatrix = transitionMatrix.selectRowsFromRowGroups(this->optimalChoices, true); storm::storage::SparseMatrix deterministicBackwardTransitions = deterministicMatrix.transpose(); std::vector deterministicStateRewards(deterministicMatrix.getRowCount()); storm::solver::GeneralLinearEquationSolverFactory linearEquationSolverFactory; @@ -205,7 +242,7 @@ namespace storm { if (objectivesWithNoUpperTimeBound.get(objIndex)) { offsetsToUnderApproximation[objIndex] = storm::utility::zero(); offsetsToOverApproximation[objIndex] = storm::utility::zero(); - storm::utility::vector::selectVectorValues(deterministicStateRewards, this->optimalChoices, this->model.getTransitionMatrix().getRowGroupIndices(), discreteActionRewards[objIndex]); + storm::utility::vector::selectVectorValues(deterministicStateRewards, this->optimalChoices, transitionMatrix.getRowGroupIndices(), actionRewards[objIndex]); storm::storage::BitVector statesWithRewards = ~storm::utility::vector::filterZero(deterministicStateRewards); // As maybestates we pick the states from which a state with reward is reachable storm::storage::BitVector maybeStates = storm::utility::graph::performProbGreater0(deterministicBackwardTransitions, storm::storage::BitVector(deterministicMatrix.getRowCount(), true), statesWithRewards); @@ -221,7 +258,7 @@ namespace storm { storm::utility::vector::clip(objectiveResults[objIndex], obj.lowerResultBound, obj.upperResultBound); } // Make sure that the objectiveResult is initialized correctly - objectiveResults[objIndex].resize(this->model.getNumberOfStates(), storm::utility::zero()); + objectiveResults[objIndex].resize(transitionMatrix.getRowGroupCount(), storm::utility::zero()); if (!maybeStates.empty()) { storm::storage::SparseMatrix submatrix = deterministicMatrix.getSubmatrix( @@ -255,32 +292,40 @@ namespace storm { sumOfWeightsOfUncheckedObjectives -= weightVector[objIndex]; } } else { - objectiveResults[objIndex] = std::vector(this->model.getNumberOfStates(), storm::utility::zero()); + objectiveResults[objIndex] = std::vector(transitionMatrix.getRowGroupCount(), storm::utility::zero()); } } } } template - void SparsePcaaWeightVectorChecker::updateEcElimResult(std::vector const& weightedRewardVector) { + void SparsePcaaWeightVectorChecker::updateEcQuotient(std::vector const& weightedRewardVector) { // Check whether we need to update the currently cached ecElimResult - storm::storage::BitVector newZeroRewardChoices = storm::utility::vector::filterZero(weightedRewardVector); - if (!cachedZeroRewardChoices || cachedZeroRewardChoices.get() != newZeroRewardChoices) { - cachedZeroRewardChoices = std::move(newZeroRewardChoices); + storm::storage::BitVector newReward0Choices = storm::utility::vector::filterZero(weightedRewardVector); + if (!ecQuotient || ecQuotient->origReward0Choices != newReward0Choices) { // It is sufficient to consider the states from which a transition with non-zero reward is reachable. (The remaining states always have reward zero). - storm::storage::BitVector nonZeroRewardStates(this->model.getNumberOfStates(), false); - for (uint_fast64_t state = 0; state < this->model.getNumberOfStates(); ++state){ - if (cachedZeroRewardChoices->getNextUnsetIndex(this->model.getTransitionMatrix().getRowGroupIndices()[state]) < this->model.getTransitionMatrix().getRowGroupIndices()[state+1]) { + storm::storage::BitVector nonZeroRewardStates(transitionMatrix.getRowGroupCount(), false); + for (uint_fast64_t state = 0; state < transitionMatrix.getRowGroupCount(); ++state){ + if (newReward0Choices.getNextUnsetIndex(transitionMatrix.getRowGroupIndices()[state]) < transitionMatrix.getRowGroupIndices()[state+1]) { nonZeroRewardStates.set(state); } } - storm::storage::BitVector subsystemStates = storm::utility::graph::performProbGreater0E(this->model.getTransitionMatrix().transpose(true), storm::storage::BitVector(this->model.getNumberOfStates(), true), nonZeroRewardStates); + storm::storage::BitVector subsystemStates = storm::utility::graph::performProbGreater0E(transitionMatrix.transpose(true), storm::storage::BitVector(transitionMatrix.getRowGroupCount(), true), nonZeroRewardStates); // Remove neutral end components, i.e., ECs in which no reward is earned. - cachedEcElimResult = storm::transformer::EndComponentEliminator::transform(this->model.getTransitionMatrix(), subsystemStates, possibleECActions & cachedZeroRewardChoices.get(), possibleBottomStates); + auto ecElimResult = storm::transformer::EndComponentEliminator::transform(transitionMatrix, subsystemStates, ecChoicesHint & newReward0Choices, reward0EStates); + + ecQuotient = EcQuotient(); + ecQuotient->matrix = std::move(ecElimResult.matrix); + ecQuotient->ecqToOriginalChoiceMapping = std::move(ecElimResult.newToOldRowMapping); + ecQuotient->originalToEcqStateMapping = std::move(ecElimResult.oldToNewStateMapping); + ecQuotient->origReward0Choices = std::move(newReward0Choices); + ecQuotient->auxStateValues.reserve(transitionMatrix.getRowGroupCount()); + ecQuotient->auxStateValues.resize(ecQuotient->matrix.getRowGroupCount()); + ecQuotient->auxChoiceValues.reserve(transitionMatrix.getRowCount()); + ecQuotient->auxChoiceValues.resize(ecQuotient->matrix.getRowCount()); } - STORM_LOG_ASSERT(cachedEcElimResult, "Updating the ecElimResult was not successfull."); } @@ -293,32 +338,32 @@ namespace storm { std::vector& originalSolution, std::vector& originalOptimalChoices) const { - storm::storage::BitVector bottomStates(this->model.getTransitionMatrix().getRowGroupCount(), false); - storm::storage::BitVector statesThatShouldStayInTheirEC(this->model.getTransitionMatrix().getRowGroupCount(), false); - storm::storage::BitVector statesWithUndefSched(this->model.getTransitionMatrix().getRowGroupCount(), false); + storm::storage::BitVector bottomStates(transitionMatrix.getRowGroupCount(), false); + storm::storage::BitVector statesThatShouldStayInTheirEC(transitionMatrix.getRowGroupCount(), false); + storm::storage::BitVector statesWithUndefSched(transitionMatrix.getRowGroupCount(), false); // Handle all the states for which the choice in the original model is uniquely given by the choice in the reduced model // Also store some information regarding the remaining states - for(uint_fast64_t state = 0; state < this->model.getTransitionMatrix().getRowGroupCount(); ++state) { + for (uint_fast64_t state = 0; state < transitionMatrix.getRowGroupCount(); ++state) { // Check if the state exists in the reduced model, i.e., the mapping retrieves a valid index uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - if(stateInReducedModel < reducedMatrix.getRowGroupCount()) { + if (stateInReducedModel < reducedMatrix.getRowGroupCount()) { originalSolution[state] = reducedSolution[stateInReducedModel]; uint_fast64_t chosenRowInReducedModel = reducedMatrix.getRowGroupIndices()[stateInReducedModel] + reducedOptimalChoices[stateInReducedModel]; uint_fast64_t chosenRowInOriginalModel = reducedToOriginalChoiceMapping[chosenRowInReducedModel]; // Check if the state is a bottom state, i.e., the chosen row stays inside its EC. - bool stateIsBottom = possibleBottomStates.get(state); - for(auto const& entry : this->model.getTransitionMatrix().getRow(chosenRowInOriginalModel)) { + bool stateIsBottom = reward0EStates.get(state); + for (auto const& entry : transitionMatrix.getRow(chosenRowInOriginalModel)) { stateIsBottom &= originalToReducedStateMapping[entry.getColumn()] == stateInReducedModel; } - if(stateIsBottom) { + if (stateIsBottom) { bottomStates.set(state); statesThatShouldStayInTheirEC.set(state); } else { // Check if the chosen row originaly belonged to the current state (and not to another state of the EC) - if(chosenRowInOriginalModel >= this->model.getTransitionMatrix().getRowGroupIndices()[state] && - chosenRowInOriginalModel < this->model.getTransitionMatrix().getRowGroupIndices()[state+1]) { - originalOptimalChoices[state] = chosenRowInOriginalModel - this->model.getTransitionMatrix().getRowGroupIndices()[state]; + if (chosenRowInOriginalModel >= transitionMatrix.getRowGroupIndices()[state] && + chosenRowInOriginalModel < transitionMatrix.getRowGroupIndices()[state+1]) { + originalOptimalChoices[state] = chosenRowInOriginalModel - transitionMatrix.getRowGroupIndices()[state]; } else { statesWithUndefSched.set(state); statesThatShouldStayInTheirEC.set(state); @@ -329,7 +374,7 @@ namespace storm { originalSolution[state] = storm::utility::zero(); // However, it might be the case that infinite reward is induced for an objective with weight 0. // To avoid this, all possible bottom states are made bottom and the remaining states have to reach a bottom state with prob. one - if(possibleBottomStates.get(state)) { + if (reward0EStates.get(state)) { bottomStates.set(state); } else { statesWithUndefSched.set(state); @@ -338,21 +383,21 @@ namespace storm { } // Handle bottom states - for(auto state : bottomStates) { + for (auto state : bottomStates) { bool foundRowForState = false; // Find a row with zero rewards that only leads to bottom states. // If the state should stay in its EC, we also need to make sure that all successors map to the same state in the reduced model uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - for(uint_fast64_t row = this->model.getTransitionMatrix().getRowGroupIndices()[state]; row < this->model.getTransitionMatrix().getRowGroupIndices()[state+1]; ++row) { + for (uint_fast64_t row = transitionMatrix.getRowGroupIndices()[state]; row < transitionMatrix.getRowGroupIndices()[state+1]; ++row) { bool rowOnlyLeadsToBottomStates = true; bool rowStaysInEC = true; - for( auto const& entry : this->model.getTransitionMatrix().getRow(row)) { + for ( auto const& entry : transitionMatrix.getRow(row)) { rowOnlyLeadsToBottomStates &= bottomStates.get(entry.getColumn()); rowStaysInEC &= originalToReducedStateMapping[entry.getColumn()] == stateInReducedModel; } - if(rowOnlyLeadsToBottomStates && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state)) && actionsWithoutRewardInUnboundedPhase.get(row)) { + if (rowOnlyLeadsToBottomStates && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state)) && actionsWithoutRewardInUnboundedPhase.get(row)) { foundRowForState = true; - originalOptimalChoices[state] = row - this->model.getTransitionMatrix().getRowGroupIndices()[state]; + originalOptimalChoices[state] = row - transitionMatrix.getRowGroupIndices()[state]; break; } } @@ -361,18 +406,18 @@ namespace storm { // Handle remaining states with still undef. scheduler (either EC states or non-subsystem states) while(!statesWithUndefSched.empty()) { - for(auto state : statesWithUndefSched) { + for (auto state : statesWithUndefSched) { // Iteratively Try to find a choice such that at least one successor has a defined scheduler. uint_fast64_t stateInReducedModel = originalToReducedStateMapping[state]; - for(uint_fast64_t row = this->model.getTransitionMatrix().getRowGroupIndices()[state]; row < this->model.getTransitionMatrix().getRowGroupIndices()[state+1]; ++row) { + for (uint_fast64_t row = transitionMatrix.getRowGroupIndices()[state]; row < transitionMatrix.getRowGroupIndices()[state+1]; ++row) { bool rowStaysInEC = true; bool rowLeadsToDefinedScheduler = false; - for(auto const& entry : this->model.getTransitionMatrix().getRow(row)) { + for (auto const& entry : transitionMatrix.getRow(row)) { rowStaysInEC &= ( stateInReducedModel == originalToReducedStateMapping[entry.getColumn()]); rowLeadsToDefinedScheduler |= !statesWithUndefSched.get(entry.getColumn()); } - if(rowLeadsToDefinedScheduler && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state))) { - originalOptimalChoices[state] = row - this->model.getTransitionMatrix().getRowGroupIndices()[state]; + if (rowLeadsToDefinedScheduler && (rowStaysInEC || !statesThatShouldStayInTheirEC.get(state))) { + originalOptimalChoices[state] = row - transitionMatrix.getRowGroupIndices()[state]; statesWithUndefSched.set(state, false); } } diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h index a30227066..c4316d480 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h @@ -6,6 +6,7 @@ #include "storm/transformer/EndComponentEliminator.h" #include "storm/modelchecker/multiobjective/Objective.h" #include "storm/modelchecker/multiobjective/pcaa/PcaaWeightVectorChecker.h" +#include "storm/modelchecker/multiobjective/SparseMultiObjectivePreprocessorResult.h" #include "storm/utility/vector.h" namespace storm { @@ -33,10 +34,7 @@ namespace storm { * */ - SparsePcaaWeightVectorChecker(SparseModelType const& model, - std::vector> const& objectives, - storm::storage::BitVector const& possibleECActions, - storm::storage::BitVector const& possibleBottomStates); + SparsePcaaWeightVectorChecker(SparseMultiObjectivePreprocessorResult const& preprocessorResult); virtual ~SparsePcaaWeightVectorChecker() = default; @@ -66,6 +64,9 @@ namespace storm { protected: + void initialize(SparseMultiObjectivePreprocessorResult const& preprocessorResult); + virtual void initializeModelTypeSpecificData(SparseModelType const& model) = 0; + /*! * Determines the scheduler that optimizes the weighted reward vector of the unbounded objectives * @@ -91,7 +92,7 @@ namespace storm { */ virtual void boundedPhase(std::vector const& weightVector, std::vector& weightedRewardVector) = 0; - void updateEcElimResult(std::vector const& weightedRewardVector); + void updateEcQuotient(std::vector const& weightedRewardVector); /*! * Transforms the results of a min-max-solver that considers a reduced model (without end components) to a result for the original (unreduced) model @@ -105,18 +106,23 @@ namespace storm { std::vector& originalOptimalChoices) const; + // Data regarding the given model + // The transition matrix of the considered model + storm::storage::SparseMatrix transitionMatrix; + // The initial state of the considered model + uint64_t initialState; // Overapproximation of the set of choices that are part of an end component. - storm::storage::BitVector possibleECActions; + storm::storage::BitVector ecChoicesHint; // The actions that have reward assigned for at least one objective without upper timeBound storm::storage::BitVector actionsWithoutRewardInUnboundedPhase; - // The states for which it is allowed to visit them infinitely often - // Put differently, if one of the states is part of a neutral EC, it is possible to - // stay in this EC forever (withoud inducing infinite reward for some objective). - storm::storage::BitVector possibleBottomStates; + // The states for which there is a scheduler yielding reward 0 for each objective + storm::storage::BitVector reward0EStates; + // stores the state action rewards for each objective. + std::vector> actionRewards; + // stores the indices of the objectives for which there is no upper time bound storm::storage::BitVector objectivesWithNoUpperTimeBound; - // stores the (discretized) state action rewards for each objective. - std::vector> discreteActionRewards; + // Memory for the solution of the most recent call of check(..) // becomes true after the first call of check(..) bool checkHasBeenCalled; @@ -130,10 +136,19 @@ namespace storm { std::vector offsetsToOverApproximation; // The scheduler choices that optimize the weighted rewards of undounded objectives. std::vector optimalChoices; - // Caches the result of the ec elimination (avoiding recomputations for each weightvector) - boost::optional::EndComponentEliminatorReturnType> cachedEcElimResult; - // Stores which choices are considered to have zero reward in the current cachedEcElimiResult. - boost::optional cachedZeroRewardChoices; + + struct EcQuotient { + storm::storage::SparseMatrix matrix; + std::vector ecqToOriginalChoiceMapping; + std::vector originalToEcqStateMapping; + storm::storage::BitVector origReward0Choices; + + std::vector auxStateValues; + std::vector auxChoiceValues; + + }; + + boost::optional ecQuotient; };