diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp index 89ebcfac0..7e000bffa 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp @@ -115,62 +115,33 @@ namespace storm { } } - template void SparseMdpPcaaWeightVectorChecker::boundedPhaseWithRewardBounds(std::vector const& weightVector, std::vector& weightedRewardVector) { - STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Multi-objective model checking with reward bounded objectives is not supported."); - } - - template - void SparseMdpPcaaWeightVectorChecker::discretizeRewardBounds() { + if (!rewardUnfolding) { + rewardUnfolding = std::make_unique>(this->model, this->objectives, this->possibleECActions, this->possibleBottomStates); + } + auto initEpoch = rewardUnfolding->getStartEpoch(); + auto epochOrder = rewardUnfolding->getEpochComputationOrder(initEpoch); + for (auto const& epoch : epochOrder) { + computeEpochSolution(epoch, weightVector); + } - std::map rewardModelNameToDimension; - objIndexToRewardDimensionMapping = std::vector(this->objectives.size(), std::numeric_limits::max()); + auto solution = rewardUnfolding->getEpochSolution(initEpoch); + this->weightedResult = solution.weightedValues; + this->objectiveResults = solution.objectiveValues; - // Collect the reward models that occur as reward bound - uint_fast64_t dimensionCount = 0; - for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { - auto const& obj = this->objectives[objIndex]; - if (obj.timeBoundReference && obj.timeBoundReference->isRewardBound()) { - auto insertionRes = rewardModelNameToDimension.insert(std::make_pair(obj.timeBoundReference->getRewardName(), dimensionCount)); - objIndexToRewardDimensionMapping[objIndex] = insertionRes.first->second; - if (insertionRes.second) { - ++dimensionCount; - } - } - } - - // Now discretize each reward - discretizedActionRewards.reserve(dimensionCount); - discretizedRewardBounds = std::vector(this->objectives.size(), 0); - for (auto const& rewardName : rewardModelNameToDimension) { - STORM_LOG_THROW(this->model.hasRewardModel(rewardName.first), storm::exceptions::IllegalArgumentException, "No reward model with name '" << rewardName.first << "' found."); - auto const& rewardModel = this->model.getRewardModel(rewardName.first); - STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are currently not supported."); - std::vector actionRewards = rewardModel.getTotalRewardVector(this->model.getTransitionMatrix()); - auto discretizedRewardsAndFactor = storm::utility::vector::toIntegralVector(actionRewards); - - discretizedActionRewards.push_back(std::move(discretizedRewardsAndFactor.first)); - // Find all objectives that reffer to this reward model and apply the discretization factor to the reward bounds - for (uint_fast64_t objIndex = 0; objIndex < objIndexToRewardDimensionMapping.size(); ++objIndex) { - if (objIndexToRewardDimensionMapping[objIndex] == rewardName.second) { - auto const& obj = this->objectives[objIndex]; - STORM_LOG_THROW(!obj.lowerTimeBound, storm::exceptions::NotSupportedException, "Lower time bounds are not supported."); - STORM_LOG_THROW(obj.upperTimeBound, storm::exceptions::UnexpectedException, "Got formula with a bound reference but no bound."); - STORM_LOG_THROW(!obj.upperTimeBound->getBound().containsVariables(), storm::exceptions::UnexpectedException, "The upper bound of a reward bounded formula still contains variables."); - typename RewardModelType::ValueType discretizedBound = storm::utility::convertNumber(obj.upperTimeBound->getBound().evaluateAsRational()); - discretizedBound /= discretizedRewardsAndFactor.second; - if (obj.upperTimeBound->isStrict() && discretizedBound == storm::utility::floor(discretizedBound)) { - discretizedBound = storm::utility::floor(discretizedBound) - storm::utility::one(); - } else { - discretizedBound = storm::utility::floor(discretizedBound); - } - discretizedRewardBounds[objIndex] = storm::utility::convertNumber(discretizedBound); - } - } + for (uint64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + // Todo: we currently assume precise results... + this->offsetsToUnderApproximation[objIndex] = storm::utility::zero(); + this->offsetsToOverApproximation[objIndex] = storm::utility::zero(); } } - + + template + void SparseMdpPcaaWeightVectorChecker::computeEpochSolution(typename MultiDimensionalRewardUnfolding::Epoch const& epoch, std::vector const& weightVector) { + // TODO + } + template class SparseMdpPcaaWeightVectorChecker>; diff --git a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h index 4e6709680..134228a0c 100644 --- a/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h +++ b/src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h @@ -4,6 +4,7 @@ #include #include "storm/modelchecker/multiobjective/pcaa/SparsePcaaWeightVectorChecker.h" +#include "storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h" namespace storm { namespace modelchecker { @@ -65,18 +66,9 @@ namespace storm { */ void boundedPhaseWithRewardBounds(std::vector const& weightVector, std::vector& weightedRewardVector); + void computeEpochSolution(typename MultiDimensionalRewardUnfolding::Epoch const& epoch, std::vector const& weightVector); - /*! - * Discretizes the reward models that occurr at some bounded formula - */ - void discretizeRewardBounds(); - - - - - std::vector objIndexToRewardDimensionMapping; - std::vector> discretizedActionRewards; - std::vector discretizedRewardBounds; + std::unique_ptr> rewardUnfolding; }; diff --git a/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.cpp b/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.cpp new file mode 100644 index 000000000..04b09c5c1 --- /dev/null +++ b/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.cpp @@ -0,0 +1,108 @@ +#include "storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h" + + +namespace storm { + namespace modelchecker { + namespace multiobjective { + + template + MultiDimensionalRewardUnfolding::MultiDimensionalRewardUnfolding(storm::models::sparse::Mdp const& model, std::vector> const& objectives, storm::storage::BitVector const& possibleECActions, storm::storage::BitVector const& allowedBottomStates) : model(model), objectives(objectives), possibleECActions(possibleECActions), allowedBottomStates(allowedBottomStates) { + + + + } + + template + typename MultiDimensionalRewardUnfolding::Epoch MultiDimensionalRewardUnfolding::getStartEpoch() { + return Epoch(); + } + + template + std::vector::Epoch> MultiDimensionalRewardUnfolding::getEpochComputationOrder(Epoch const& startEpoch) { + return std::vector(); + } + + template + typename MultiDimensionalRewardUnfolding::EpochModel const& MultiDimensionalRewardUnfolding::getModelForEpoch(Epoch const& epoch) { + return EpochModel(); + } + + template + void MultiDimensionalRewardUnfolding::setEpochSolution(Epoch const& epoch, EpochSolution const& solution) { + + } + + template + void MultiDimensionalRewardUnfolding::setEpochSolution(Epoch const& epoch, EpochSolution&& solution) { + + } + + template + typename MultiDimensionalRewardUnfolding::EpochSolution const& MultiDimensionalRewardUnfolding::getEpochSolution(Epoch const& epoch) { + return EpochSolution(); + } + + template + typename MultiDimensionalRewardUnfolding::EpochClass MultiDimensionalRewardUnfolding::getClassOfEpoch(Epoch const& epoch) { + return 0; + } + + + /* + template + void SparseMdpPcaaWeightVectorChecker::discretizeRewardBounds() { + + std::map rewardModelNameToDimension; + objIndexToRewardDimensionMapping = std::vector(this->objectives.size(), std::numeric_limits::max()); + + // Collect the reward models that occur as reward bound + uint_fast64_t dimensionCount = 0; + for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) { + auto const& obj = this->objectives[objIndex]; + if (obj.timeBoundReference && obj.timeBoundReference->isRewardBound()) { + auto insertionRes = rewardModelNameToDimension.insert(std::make_pair(obj.timeBoundReference->getRewardName(), dimensionCount)); + objIndexToRewardDimensionMapping[objIndex] = insertionRes.first->second; + if (insertionRes.second) { + ++dimensionCount; + } + } + } + + // Now discretize each reward + discretizedActionRewards.reserve(dimensionCount); + discretizedRewardBounds = std::vector(this->objectives.size(), 0); + for (auto const& rewardName : rewardModelNameToDimension) { + STORM_LOG_THROW(this->model.hasRewardModel(rewardName.first), storm::exceptions::IllegalArgumentException, "No reward model with name '" << rewardName.first << "' found."); + auto const& rewardModel = this->model.getRewardModel(rewardName.first); + STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are currently not supported."); + std::vector actionRewards = rewardModel.getTotalRewardVector(this->model.getTransitionMatrix()); + auto discretizedRewardsAndFactor = storm::utility::vector::toIntegralVector(actionRewards); + + discretizedActionRewards.push_back(std::move(discretizedRewardsAndFactor.first)); + // Find all objectives that reffer to this reward model and apply the discretization factor to the reward bounds + for (uint_fast64_t objIndex = 0; objIndex < objIndexToRewardDimensionMapping.size(); ++objIndex) { + if (objIndexToRewardDimensionMapping[objIndex] == rewardName.second) { + auto const& obj = this->objectives[objIndex]; + STORM_LOG_THROW(!obj.lowerTimeBound, storm::exceptions::NotSupportedException, "Lower time bounds are not supported."); + STORM_LOG_THROW(obj.upperTimeBound, storm::exceptions::UnexpectedException, "Got formula with a bound reference but no bound."); + STORM_LOG_THROW(!obj.upperTimeBound->getBound().containsVariables(), storm::exceptions::UnexpectedException, "The upper bound of a reward bounded formula still contains variables."); + typename RewardModelType::ValueType discretizedBound = storm::utility::convertNumber(obj.upperTimeBound->getBound().evaluateAsRational()); + discretizedBound /= discretizedRewardsAndFactor.second; + if (obj.upperTimeBound->isStrict() && discretizedBound == storm::utility::floor(discretizedBound)) { + discretizedBound = storm::utility::floor(discretizedBound) - storm::utility::one(); + } else { + discretizedBound = storm::utility::floor(discretizedBound); + } + discretizedRewardBounds[objIndex] = storm::utility::convertNumber(discretizedBound); + } + } + } + } + */ + + template class MultiDimensionalRewardUnfolding; + template class MultiDimensionalRewardUnfolding; + + } + } +} \ No newline at end of file diff --git a/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h b/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h new file mode 100644 index 000000000..d38663a69 --- /dev/null +++ b/src/storm/modelchecker/multiobjective/rewardbounded/MultiDimensionalRewardUnfolding.h @@ -0,0 +1,74 @@ +#pragma once + + +#include "storm/storage/BitVector.h" +#include "storm/storage/SparseMatrix.h" +#include "storm/modelchecker/multiobjective/Objective.h" +#include "storm/models/sparse/Mdp.h" +#include "storm/utility/vector.h" + +namespace storm { + namespace modelchecker { + namespace multiobjective { + + + template + class MultiDimensionalRewardUnfolding { + public: + + + struct EpochModel { + storm::storage::SparseMatrix rewardTransitions; + storm::storage::SparseMatrix intermediateTransitions; + std::vector> objectiveRewards; + }; + + struct EpochSolution { + std::vector weightedValues; + std::vector> objectiveValues; + }; + + typedef std::vector Epoch; // The number of reward steps that are "left" for each dimension + typedef uint64_t EpochClass; // Collection of epochs that consider the same epoch model + + /* + * + * @param model The (preprocessed) model + * @param objectives The (preprocessed) objectives + * @param possibleECActions Overapproximation of the actions that are part of an EC + * @param allowedBottomStates The states which are allowed to become a bottom state under a considered scheduler + * + */ + MultiDimensionalRewardUnfolding(storm::models::sparse::Mdp const& model, + std::vector> const& objectives, + storm::storage::BitVector const& possibleECActions, + storm::storage::BitVector const& allowedBottomStates); + + + Epoch getStartEpoch(); + std::vector getEpochComputationOrder(Epoch const& startEpoch); + EpochModel const& getModelForEpoch(Epoch const& epoch); + + void setEpochSolution(Epoch const& epoch, EpochSolution const& solution); + void setEpochSolution(Epoch const& epoch, EpochSolution&& solution); + EpochSolution const& getEpochSolution(Epoch const& epoch); + + private: + EpochClass getClassOfEpoch(Epoch const& epoch); + + + storm::models::sparse::Mdp const& model; + std::vector> const& objectives; + storm::storage::BitVector possibleECActions; + storm::storage::BitVector allowedBottomStates; + + std::vector, uint64_t>> subObjectives; + std::vector> scaledRewards; + std::vector scalingFactors; + + std::map epochModels; + std::map epochSolutions; + }; + } + } +} \ No newline at end of file