From 0f0c21840f0ac487563db444ba19b2d0fe5d90d0 Mon Sep 17 00:00:00 2001 From: TimQu Date: Thu, 30 Jun 2016 21:43:38 +0200 Subject: [PATCH] correct handling of state rewards of markovian states Former-commit-id: 9dc50559fca2e926d06060595e8e0e44461033fb --- ...rseMaMultiObjectiveWeightVectorChecker.cpp | 24 ++++--- ...parseMaMultiObjectiveWeightVectorChecker.h | 6 -- ...seMdpMultiObjectiveWeightVectorChecker.cpp | 18 ++--- ...arseMdpMultiObjectiveWeightVectorChecker.h | 6 -- .../SparseMultiObjectivePreprocessor.cpp | 69 +++++++++++-------- .../SparseMultiObjectivePreprocessorData.h | 4 +- ...parseMultiObjectiveWeightVectorChecker.cpp | 6 +- .../SparseMultiObjectiveWeightVectorChecker.h | 8 +-- 8 files changed, 70 insertions(+), 71 deletions(-) diff --git a/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.cpp b/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.cpp index 18ac306da..4c7fd35a5 100644 --- a/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.cpp +++ b/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.cpp @@ -13,15 +13,19 @@ namespace storm { template SparseMaMultiObjectiveWeightVectorChecker::SparseMaMultiObjectiveWeightVectorChecker(PreprocessorData const& data) : SparseMultiObjectiveWeightVectorChecker(data) { - // Intentionally left empty - } - - template - std::vector::ValueType> SparseMaMultiObjectiveWeightVectorChecker::getObjectiveRewardAsDiscreteActionRewards(uint_fast64_t objectiveIndex) const { - // Assert that the state and transition rewards have already been removed in prerpocessing - STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasStateRewards(), "Reward model has state rewards which is not expected."); - STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasTransitionRewards(), "Reward model has transition rewards which is not expected."); - return this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).getStateActionRewardVector(); + // Set the (discretized) state action rewards. + this->discreteActionRewards.resize(data.objectives.size()); + for(auto objIndex : this->unboundedObjectives) { + STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasTransitionRewards(), "Preprocessed Reward model has transition rewards which is not expected."); + this->discreteActionRewards[objIndex] = this->data.preprocessedModel.getRewardModel(this->data.objectives[objIndex].rewardModelName).getStateActionRewardVector(); + if(this->data.preprocessedModel.getRewardModel(this->data.objectives[objIndex].rewardModelName).hasStateRewards()) { + auto const& stateRewards = this->data.preprocessedModel.getRewardModel(this->data.objectives[objIndex].rewardModelName).getStateRewardVector(); + for(auto markovianState : this->data.getMarkovianStatesOfPreprocessedModel()) { + this->discreteActionRewards[objIndex][this->data.preprocessedModel.getTransitionMatrix().getRowGroupIndices()[markovianState]] += stateRewards[markovianState] / this->data.preprocessedModel.getExitRate(markovianState); + } + } + + } } template @@ -46,7 +50,7 @@ namespace storm { if(timeBoundIt != timeBounds.end() && currentEpoch == timeBoundIt->first) { objectivesAtCurrentEpoch |= timeBoundIt->second; for(auto objIndex : timeBoundIt->second) { - storm::utility::vector::addScaledVector(weightedRewardVector, getObjectiveRewardAsDiscreteActionRewards(objIndex), weightVector[objIndex]); + storm::utility::vector::addScaledVector(weightedRewardVector, this->discreteActionRewards[objIndex], weightVector[objIndex]); } ++timeBoundIt; } diff --git a/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.h b/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.h index dcc007bfe..f94a4094b 100644 --- a/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.h +++ b/src/modelchecker/multiobjective/helper/SparseMaMultiObjectiveWeightVectorChecker.h @@ -25,12 +25,6 @@ namespace storm { private: - - /*! - * Retrieves the rewards for the objective with the given index as state action reward vector. - */ - virtual std::vector getObjectiveRewardAsDiscreteActionRewards(uint_fast64_t objectiveIndex) const override; - /*! * * @param weightVector the weight vector of the current check diff --git a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.cpp b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.cpp index 3c1137721..0bd726fda 100644 --- a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.cpp +++ b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.cpp @@ -13,15 +13,11 @@ namespace storm { template SparseMdpMultiObjectiveWeightVectorChecker::SparseMdpMultiObjectiveWeightVectorChecker(PreprocessorData const& data) : SparseMultiObjectiveWeightVectorChecker(data) { - // Intentionally left empty - } - - template - std::vector::ValueType> SparseMdpMultiObjectiveWeightVectorChecker::getObjectiveRewardAsDiscreteActionRewards(uint_fast64_t objectiveIndex) const { - // Assert that the state and transition rewards have already been removed in prerpocessing - STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasStateRewards(), "Reward model has state rewards which is not expected."); - STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasTransitionRewards(), "Reward model has transition rewards which is not expected."); - return this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).getStateActionRewardVector(); + // set the state action rewards + for(uint_fast64_t objIndex = 0; objIndex < data.objectives.size(); ++objIndex) { + STORM_LOG_ASSERT(!this->data.preprocessedModel.getRewardModel(this->data.objectives[objectiveIndex].rewardModelName).hasTransitionRewards(), "Reward model has transition rewards which is not expected."); + this->discreteActionRewards[objIndex] = this->data.preprocessedModel.getRewardModel(this->data.objectives[objIndex].rewardModelName).getTotalRewardVector(this->data.preprocessedModel.getTransitionMatrix()); + } } template @@ -44,7 +40,7 @@ namespace storm { if(timeBoundIt != timeBounds.end() && currentEpoch == timeBoundIt->first) { objectivesAtCurrentEpoch |= timeBoundIt->second; for(auto objIndex : timeBoundIt->second) { - storm::utility::vector::addScaledVector(weightedRewardVector, getObjectiveRewardAsDiscreteActionRewards(objIndex), weightVector[objIndex]); + storm::utility::vector::addScaledVector(weightedRewardVector, this->discreteActionRewards[objIndex], weightVector[objIndex]); } ++timeBoundIt; } @@ -58,7 +54,7 @@ namespace storm { // TODO we could compute the result for one of the objectives from the weighted result, the given weight vector, and the remaining objective results. for(auto objIndex : objectivesAtCurrentEpoch) { std::vector& objectiveResult = this->objectiveResults[objIndex]; - std::vector objectiveRewards = getObjectiveRewardAsDiscreteActionRewards(objIndex); + std::vector objectiveRewards = this->discreteActionRewards[objIndex]; auto rowGroupIndexIt = this->data.preprocessedModel.getTransitionMatrix().getRowGroupIndices().begin(); auto optimalChoiceIt = optimalChoicesInCurrentEpoch.begin(); for(ValueType& stateValue : temporaryResult){ diff --git a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.h b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.h index ab67bab22..93d67c996 100644 --- a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.h +++ b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectiveWeightVectorChecker.h @@ -25,12 +25,6 @@ namespace storm { private: - - /*! - * Retrieves the rewards for the objective with the given index as state action reward vector. - */ - virtual std::vector getObjectiveRewardAsDiscreteActionRewards(uint_fast64_t objectiveIndex) const override; - /*! * For each time epoch (starting with the maximal stepBound occurring in the objectives), this method * - determines the objectives that are relevant in the current time epoch diff --git a/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessor.cpp b/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessor.cpp index a4149ff58..75cebba30 100644 --- a/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessor.cpp +++ b/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessor.cpp @@ -49,7 +49,7 @@ namespace storm { // set solution for objectives for which there are no rewards left for(uint_fast64_t objIndex = 0; objIndex < data.objectives.size(); ++objIndex) { if(data.solutionsFromPreprocessing[objIndex] == PreprocessorData::PreprocessorObjectiveSolution::None && - !storm::utility::vector::hasNonZeroEntry(data.preprocessedModel.getRewardModel(data.objectives[objIndex].rewardModelName).getStateActionRewardVector())) { + data.preprocessedModel.getRewardModel(data.objectives[objIndex].rewardModelName).isAllZero()) { if(data.objectives[objIndex].threshold) { if(storm::utility::zero() > *data.objectives[objIndex].threshold || (!data.objectives[objIndex].thresholdIsStrict && storm::utility::zero() >= *data.objectives[objIndex].threshold) ){ @@ -306,30 +306,36 @@ namespace storm { updatePreprocessedModel(data, *duplicatorResult.model, duplicatorResult.newToOldStateIndexMapping); // Add a reward model that gives zero reward to the actions of states of the second copy. - std::vector objectiveRewards; + RewardModelType objectiveRewards(boost::none); if(formula.isReachabilityRewardFormula()) { - objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : "").getTotalRewardVector(data.preprocessedModel.getTransitionMatrix()); + objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : ""); + objectiveRewards.reduceToStateBasedRewards(data.preprocessedModel.getTransitionMatrix(), false); + if(objectiveRewards.hasStateRewards()) { + storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), duplicatorResult.secondCopy, storm::utility::zero()); + } + if(objectiveRewards.hasStateActionRewards()) { + for(auto secondCopyState : duplicatorResult.secondCopy) { + std::fill_n(objectiveRewards.getStateActionRewardVector().begin() + data.preprocessedModel.getTransitionMatrix().getRowGroupIndices()[secondCopyState], data.preprocessedModel.getTransitionMatrix().getRowGroupSize(secondCopyState), storm::utility::zero()); + } + } } else if(formula.isReachabilityTimeFormula() && data.preprocessedModel.isOfType(storm::models::ModelType::MarkovAutomaton)) { - objectiveRewards = std::vector(data.preprocessedModel.getTransitionMatrix().getRowCount(), storm::utility::zero()); - storm::utility::vector::setVectorValues(objectiveRewards, data.getMarkovianStatesOfPreprocessedModel(), storm::utility::one()); + objectiveRewards = RewardModelType(std::vector(data.preprocessedModel.getTransitionMatrix().getRowCount(), storm::utility::zero())); + storm::utility::vector::setVectorValues(objectiveRewards.getStateRewardVector(), data.getMarkovianStatesOfPreprocessedModel() & duplicatorResult.firstCopy, storm::utility::one()); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability probabilities nor reachability rewards " << (data.preprocessedModel.isOfType(storm::models::ModelType::MarkovAutomaton) ? "nor reachability time" : "") << ". This is not supported."); } - for(auto secondCopyState : duplicatorResult.secondCopy) { - for(uint_fast64_t choice = data.preprocessedModel.getTransitionMatrix().getRowGroupIndices()[secondCopyState]; choice < data.preprocessedModel.getTransitionMatrix().getRowGroupIndices()[secondCopyState+1]; ++choice) { - objectiveRewards[choice] = storm::utility::zero(); - } - } - storm::storage::BitVector positiveRewards = storm::utility::vector::filterGreaterZero(objectiveRewards); - storm::storage::BitVector nonNegativeRewards = positiveRewards | storm::utility::vector::filterZero(objectiveRewards); - STORM_LOG_THROW(nonNegativeRewards.full() || positiveRewards.empty(), storm::exceptions::InvalidPropertyException, "The reward model for the formula " << formula << " has positive and negative rewards which is not supported."); if(!currentObjective.rewardsArePositive){ - storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one()); + if(objectiveRewards.hasStateRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateRewardVector(), -storm::utility::one()); + } + if(objectiveRewards.hasStateActionRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateActionRewardVector(), -storm::utility::one()); + } } - data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards)); + data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, std::move(objectiveRewards)); // States of the first copy from which the second copy is not reachable with prob 1 under any scheduler can - // be removed as the expected reward is not defined for these states. + // be removed as the expected reward/time is not defined for these states. // We also need to enforce that the second copy will be reached eventually with prob 1. data.preprocessedModel.getStateLabeling().setStates(data.prob1StatesLabel, data.preprocessedModel.getStateLabeling().getStates(data.prob1StatesLabel) & duplicatorResult.secondCopy); storm::storage::BitVector subsystemStates = duplicatorResult.secondCopy | storm::utility::graph::performProb1E(data.preprocessedModel, data.preprocessedModel.getBackwardTransitions(), duplicatorResult.firstCopy, duplicatorResult.gateStates); @@ -348,23 +354,32 @@ namespace storm { STORM_LOG_THROW(formula.getDiscreteTimeBound() > 0, storm::exceptions::InvalidPropertyException, "Got a cumulativeRewardFormula with time bound 0. This is not supported."); currentObjective.timeBounds = formula.getDiscreteTimeBound(); - std::vector objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : "").getTotalRewardVector(data.preprocessedModel.getTransitionMatrix()); + RewardModelType objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : ""); + objectiveRewards.reduceToStateBasedRewards(data.preprocessedModel.getTransitionMatrix(), false); if(!currentObjective.rewardsArePositive){ - storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one()); + if(objectiveRewards.hasStateRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateRewardVector(), -storm::utility::one()); + } + if(objectiveRewards.hasStateActionRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateActionRewardVector(), -storm::utility::one()); + } } - data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards)); + data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, std::move(objectiveRewards)); } template void SparseMultiObjectivePreprocessor::preprocessFormula(storm::logic::TotalRewardFormula const& formula, PreprocessorData& data, ObjectiveInformation& currentObjective, boost::optional const& optionalRewardModelName) { - std::vector objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : "").getTotalRewardVector(data.preprocessedModel.getTransitionMatrix()); - storm::storage::BitVector positiveRewards = storm::utility::vector::filterGreaterZero(objectiveRewards); - storm::storage::BitVector nonNegativeRewards = positiveRewards | storm::utility::vector::filterZero(objectiveRewards); - STORM_LOG_THROW(nonNegativeRewards.full() || positiveRewards.empty(), storm::exceptions::InvalidPropertyException, "The reward model for the formula " << formula << " has positive and negative rewards which is not supported."); + RewardModelType objectiveRewards = data.preprocessedModel.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : ""); + objectiveRewards.reduceToStateBasedRewards(data.preprocessedModel.getTransitionMatrix(), false); if(!currentObjective.rewardsArePositive){ - storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one()); + if(objectiveRewards.hasStateRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateRewardVector(), -storm::utility::one()); + } + if(objectiveRewards.hasStateActionRewards()) { + storm::utility::vector::scaleVectorInPlace(objectiveRewards.getStateActionRewardVector(), -storm::utility::one()); + } } - data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards)); + data.preprocessedModel.addRewardModel(currentObjective.rewardModelName, std::move(objectiveRewards)); } @@ -397,7 +412,7 @@ namespace storm { for(uint_fast64_t objIndex = 0; objIndex < data.objectives.size(); ++objIndex) { if (!data.objectives[objIndex].rewardFinitenessChecked && !data.objectives[objIndex].rewardsArePositive) { data.objectives[objIndex].rewardFinitenessChecked = true; - actionsWithNonNegativeReward &= storm::utility::vector::filterZero(data.preprocessedModel.getRewardModel(data.objectives[objIndex].rewardModelName).getStateActionRewardVector()); + actionsWithNonNegativeReward &= storm::utility::vector::filterZero(data.preprocessedModel.getRewardModel(data.objectives[objIndex].rewardModelName).getTotalRewardVector(data.preprocessedModel.getTransitionMatrix())); objectivesWithNegativeReward.set(objIndex, true); } } @@ -447,7 +462,7 @@ namespace storm { if (!obj.rewardFinitenessChecked && obj.rewardsArePositive) { obj.rewardFinitenessChecked = true; // Find maximal end components that contain a state with positive reward - storm::storage::BitVector actionsWithPositiveRewards = storm::utility::vector::filterGreaterZero(data.preprocessedModel.getRewardModel(obj.rewardModelName).getStateActionRewardVector()); + storm::storage::BitVector actionsWithPositiveRewards = storm::utility::vector::filterGreaterZero(data.preprocessedModel.getRewardModel(obj.rewardModelName).getTotalRewardVector(data.preprocessedModel.getTransitionMatrix())); for(auto const& mec : mecDecomposition) { bool ecHasActionWithPositiveReward = false; for(auto const& stateActionsPair : mec) { diff --git a/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessorData.h b/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessorData.h index 6077758b4..8b6a38a55 100644 --- a/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessorData.h +++ b/src/modelchecker/multiobjective/helper/SparseMultiObjectivePreprocessorData.h @@ -51,13 +51,13 @@ namespace storm { template typename std::enable_if>::value, storm::storage::BitVector const&>::type - getMarkovianStatesOfPreprocessedModel() { + getMarkovianStatesOfPreprocessedModel() const { return preprocessedModel.getMarkovianStates(); } template typename std::enable_if>::value, storm::storage::BitVector const&>::type - getMarkovianStatesOfPreprocessedModel() { + getMarkovianStatesOfPreprocessedModel() const { STORM_LOG_THROW(false, storm::exceptions::UnexpectedException, "Tried to retrieve Markovian states but the considered model is not an MA."); } diff --git a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.cpp b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.cpp index 5bf5e3547..51adf5ee5 100644 --- a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.cpp +++ b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.cpp @@ -22,7 +22,7 @@ namespace storm { template - SparseMultiObjectiveWeightVectorChecker::SparseMultiObjectiveWeightVectorChecker(PreprocessorData const& data) : data(data), unboundedObjectives(data.objectives.size()),checkHasBeenCalled(false), objectiveResults(data.objectives.size()){ + SparseMultiObjectiveWeightVectorChecker::SparseMultiObjectiveWeightVectorChecker(PreprocessorData const& data) : data(data), unboundedObjectives(data.objectives.size()), discreteActionRewards(data.objectives.size()), checkHasBeenCalled(false), objectiveResults(data.objectives.size()){ // set the unbounded objectives for(uint_fast64_t objIndex = 0; objIndex < data.objectives.size(); ++objIndex) { @@ -39,7 +39,7 @@ namespace storm { STORM_LOG_DEBUG("Invoked WeightVectorChecker with weights " << std::endl << "\t" << storm::utility::vector::convertNumericVector(weightVector)); std::vector weightedRewardVector(data.preprocessedModel.getTransitionMatrix().getRowCount(), storm::utility::zero()); for(auto objIndex : unboundedObjectives) { - storm::utility::vector::addScaledVector(weightedRewardVector, getObjectiveRewardAsDiscreteActionRewards(objIndex), weightVector[objIndex]); + storm::utility::vector::addScaledVector(weightedRewardVector, discreteActionRewards[objIndex], weightVector[objIndex]); } unboundedWeightedPhase(weightedRewardVector); STORM_LOG_DEBUG("Unbounded weighted phase result: " << weightedResult[data.preprocessedModel.getInitialStates().getNextSetIndex(0)] << " (value in initial state)."); @@ -147,7 +147,7 @@ namespace storm { //one check can be omitted as the result can be computed back from the weighed result and the results from the remaining objectives for(uint_fast64_t objIndex = 0; objIndex < data.objectives.size(); ++objIndex) { if(unboundedObjectives.get(objIndex)){ - storm::utility::vector::selectVectorValues(deterministicStateRewards, this->scheduler.getChoices(), data.preprocessedModel.getTransitionMatrix().getRowGroupIndices(), getObjectiveRewardAsDiscreteActionRewards(objIndex)); + storm::utility::vector::selectVectorValues(deterministicStateRewards, this->scheduler.getChoices(), data.preprocessedModel.getTransitionMatrix().getRowGroupIndices(), discreteActionRewards[objIndex]); storm::storage::BitVector statesWithRewards = ~storm::utility::vector::filterZero(deterministicStateRewards); // As target states, we pick the states from which no reward is reachable. storm::storage::BitVector targetStates = ~storm::utility::graph::performProbGreater0(deterministicBackwardTransitions, storm::storage::BitVector(deterministicMatrix.getRowCount(), true), statesWithRewards); diff --git a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.h b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.h index f8e8af92e..f93acfb48 100644 --- a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.h +++ b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveWeightVectorChecker.h @@ -45,12 +45,6 @@ namespace storm { protected: - /*! - * Retrieves the rewards for the objective with the given index as state action reward vector. - * Note that this discretizes state rewards of Markovian states by dividing them with the exit rate. - */ - virtual std::vector getObjectiveRewardAsDiscreteActionRewards(uint_fast64_t objectiveIndex) const = 0 ; - /*! * Determines the scheduler that maximizes the weighted reward vector of the unbounded objectives * @@ -80,6 +74,8 @@ namespace storm { PreprocessorData const& data; // stores the indices of the objectives for which there is no time bound storm::storage::BitVector unboundedObjectives; + // stores the (discretized) state action rewards for each objective. + std::vector>discreteActionRewards; // stores the set of states for which it is allowed to visit them infinitely often // This means that, if one of the states is part of a neutral EC, it is allowed to