Browse Source

initialization of some data for reward bounded objectives

tempestpy_adaptions
TimQu 8 years ago
parent
commit
92f2f23603
  1. 51
      src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp
  2. 14
      src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h

51
src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.cpp

@ -124,6 +124,57 @@ namespace storm {
STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Multi-objective model checking with reward bounded objectives is not supported.");
}
template<class SparseModelType>
void SparseMdpPcaaWeightVectorChecker<SparseModelType>::discretizeRewardBounds() {
std::map<std::string, uint_fast64_t> rewardModelNameToDimension;
objIndexToRewardDimensionMapping = std::vector<uint_fast64_t>(this->objectives.size(), std::numeric_limits<uint_fast64_t>::max());
// Collect the reward models that occur as reward bound
uint_fast64_t dimensionCount = 0;
for (uint_fast64_t objIndex = 0; objIndex < this->objectives.size(); ++objIndex) {
auto const& obj = this->objectives[objIndex];
if (obj.timeBoundReference && obj.timeBoundReference->isRewardBound()) {
auto insertionRes = rewardModelNameToDimension.insert(std::make_pair(obj.timeBoundReference->getRewardName(), dimensionCount));
objIndexToRewardDimensionMapping[objIndex] = insertionRes.first->second;
if (insertionRes.second) {
++dimensionCount;
}
}
}
// Now discretize each reward
discretizedActionRewards.reserve(dimensionCount);
discretizedRewardBounds = std::vector<uint_fast64_t>(this->objectives.size(), 0);
for (auto const& rewardName : rewardModelNameToDimension) {
STORM_LOG_THROW(this->model.hasRewardModel(rewardName.first), storm::exceptions::IllegalArgumentException, "No reward model with name '" << rewardName.first << "' found.");
auto const& rewardModel = this->model.getRewardModel(rewardName.first);
STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Transition rewards are currently not supported.");
std::vector<typename RewardModelType::ValueType> actionRewards = rewardModel.getTotalRewardVector(this->model.getTransitionMatrix());
auto discretizedRewardsAndFactor = storm::utility::vector::toIntegralVector<typename RewardModelType::ValueType, uint_fast64_t>(actionRewards);
discretizedActionRewards.push_back(std::move(discretizedRewardsAndFactor.first));
// Find all objectives that reffer to this reward model and apply the discretization factor to the reward bounds
for (uint_fast64_t objIndex = 0; objIndex < objIndexToRewardDimensionMapping.size(); ++objIndex) {
if (objIndexToRewardDimensionMapping[objIndex] == rewardName.second) {
auto const& obj = this->objectives[objIndex];
STORM_LOG_THROW(!obj.lowerTimeBound, storm::exceptions::NotSupportedException, "Lower time bounds are not supported.");
STORM_LOG_THROW(obj.upperTimeBound, storm::exceptions::UnexpectedException, "Got formula with a bound reference but no bound.");
STORM_LOG_THROW(!obj.upperTimeBound->getBound().containsVariables(), storm::exceptions::UnexpectedException, "The upper bound of a reward bounded formula still contains variables.");
typename RewardModelType::ValueType discretizedBound = storm::utility::convertNumber<typename RewardModelType::ValueType>(obj.upperTimeBound->getBound().evaluateAsRational());
discretizedBound /= discretizedRewardsAndFactor.second;
if (obj.upperTimeBound->isStrict() && discretizedBound == storm::utility::floor(discretizedBound)) {
discretizedBound = storm::utility::floor(discretizedBound) - storm::utility::one<ValueType>();
} else {
discretizedBound = storm::utility::floor(discretizedBound);
}
discretizedRewardBounds[objIndex] = storm::utility::convertNumber<uint_fast64_t>(discretizedBound);
}
}
}
}
template class SparseMdpPcaaWeightVectorChecker<storm::models::sparse::Mdp<double>>;
#ifdef STORM_HAVE_CARL

14
src/storm/modelchecker/multiobjective/pcaa/SparseMdpPcaaWeightVectorChecker.h

@ -65,7 +65,19 @@ namespace storm {
*/
void boundedPhaseWithRewardBounds(std::vector<ValueType> const& weightVector, std::vector<ValueType>& weightedRewardVector);
/*!
* Discretizes the reward models that occurr at some bounded formula
*/
void discretizeRewardBounds();
std::vector<uint64_t> objIndexToRewardDimensionMapping;
std::vector<std::vector<uint64_t>> discretizedActionRewards;
std::vector<uint64_t> discretizedRewardBounds;
};

Loading…
Cancel
Save