diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp index 0916fd94c..f7a1a8467 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp @@ -124,6 +124,57 @@ namespace storm { STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Multi-objective model checking with reward bounded objectives is not supported."); } + template + void SparseMdpPcaaWeightVectorChecker::discretizeRewardBounds() { + + std::map rewardModelNameToDimension; + objIndexToRewardDimensionMapping = std::vector(this->objectives.size(), std::numeric_limits::max()); + + // Collect the reward models that occur as reward bound + uint_fast64_t dimensionCount = 0; + for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + auto const& obj = this->objectives[objIndex]; + if (obj.timeBoundReference && obj.timeBoundReference->isRewardBound()) { + auto insertionRes = rewardModelNameToDimension.insert(std::make_pair(obj.timeBoundReference->getRewardName(), dimensionCount)); + objIndexToRewardDimensionMapping[objIndex] = insertionRes.first->second; + if (insertionRes.second) { + ++dimensionCount; + } + } + } + + // Now discretize each reward + discretizedActionRewards.reserve(dimensionCount); + discretizedRewardBounds = std::vector(this->objectives.size(), 0); + for (auto const& rewardName : rewardModelNameToDimension) { + STORM_LOG_THROW(this->model.hasRewardModel(rewardName.first), storm::exceptions::IllegalArgumentException, "No reward model with name '" << rewardName.first << "' found."); + auto const& rewardModel = this->model.getRewardModel(rewardName.first); + STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are currently not supported."); + std::vector actionRewards = rewardModel.getTotalRewardVector(this->model.getTransitionMatrix()); + auto discretizedRewardsAndFactor = storm::utility::vector::toIntegralVector(actionRewards); + + discretizedActionRewards.push_back(std::move(discretizedRewardsAndFactor.first)); + // Find all objectives that reffer to this reward model and apply the discretization factor to the reward bounds + for (uint_fast64_t objIndex = 0; objIndex < objIndexToRewardDimensionMapping.size(); ++objIndex) { + if (objIndexToRewardDimensionMapping[objIndex] == rewardName.second) { + auto const& obj = this->objectives[objIndex]; + STORM_LOG_THROW(!obj.lowerTimeBound, storm::exceptions::NotSupportedException, "Lower time bounds are not supported."); + STORM_LOG_THROW(obj.upperTimeBound, storm::exceptions::UnexpectedException, "Got formula with a bound reference but no bound."); + STORM_LOG_THROW(!obj.upperTimeBound->getBound().containsVariables(), storm::exceptions::UnexpectedException, "The upper bound of a reward bounded formula still contains variables."); + typename RewardModelType::ValueType discretizedBound = storm::utility::convertNumber(obj.upperTimeBound->getBound().evaluateAsRational()); + discretizedBound /= discretizedRewardsAndFactor.second; + if (obj.upperTimeBound->isStrict() && discretizedBound == storm::utility::floor(discretizedBound)) { + discretizedBound = storm::utility::floor(discretizedBound) - storm::utility::one(); + } else { + discretizedBound = storm::utility::floor(discretizedBound); + } + discretizedRewardBounds[objIndex] = storm::utility::convertNumber(discretizedBound); + } + } + } + } + + template class SparseMdpPcaaWeightVectorChecker>; #ifdef STORM_HAVE_CARL diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h index a04bd9455..4e6709680 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h @@ -65,7 +65,19 @@ namespace storm { */ void boundedPhaseWithRewardBounds(std::vector const& weightVector, std::vector& weightedRewardVector); - + + /*! + * Discretizes the reward models that occurr at some bounded formula + */ + void discretizeRewardBounds(); + + + + + std::vector objIndexToRewardDimensionMapping; + std::vector> discretizedActionRewards; + std::vector discretizedRewardBounds; + };