Browse Source

moved application of permissive scheduler to an own transformer

tempestpy_adaptions
sjunges 7 years ago
parent
commit
6e506e5a66
  1. 16
      src/storm/models/sparse/Mdp.cpp
  2. 7
      src/storm/models/sparse/Mdp.h
  3. 4
      src/storm/permissivesched/PermissiveSchedulers.h
  4. 26
      src/storm/transformer/ChoiceSelector.cpp
  5. 30
      src/storm/transformer/ChoiceSelector.h

16
src/storm/models/sparse/Mdp.cpp

@ -37,22 +37,6 @@ namespace storm {
// Intentionally left empty // Intentionally left empty
} }
template <typename ValueType, typename RewardModelType>
Mdp<ValueType, RewardModelType> Mdp<ValueType, RewardModelType>::restrictChoices(storm::storage::BitVector const& enabledChoices) const {
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(this->getTransitionMatrix().restrictRows(enabledChoices));
newComponents.stateLabeling = this->getStateLabeling();
for (auto const& rewardModel : this->getRewardModels()) {
newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledChoices));
}
if (this->hasChoiceLabeling()) {
newComponents.choiceLabeling = this->getChoiceLabeling().getSubLabeling(enabledChoices);
}
newComponents.stateValuations = this->getOptionalStateValuations();
if (this->hasChoiceOrigins()) {
newComponents.choiceOrigins = this->getChoiceOrigins()->selectChoices(enabledChoices);
}
return Mdp<ValueType, RewardModelType>(std::move(newComponents));
}
template<typename ValueType, typename RewardModelType> template<typename ValueType, typename RewardModelType>
uint_least64_t Mdp<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const { uint_least64_t Mdp<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const {

7
src/storm/models/sparse/Mdp.h

@ -51,13 +51,6 @@ namespace storm {
Mdp(Mdp<ValueType, RewardModelType>&& other) = default; Mdp(Mdp<ValueType, RewardModelType>&& other) = default;
Mdp& operator=(Mdp<ValueType, RewardModelType>&& other) = default; Mdp& operator=(Mdp<ValueType, RewardModelType>&& other) = default;
/*!
* Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
*
* @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
* @return A subMDP.
*/
Mdp<ValueType, RewardModelType> restrictChoices(storm::storage::BitVector const& enabledActions) const;
/*! /*!
* For a state/action pair, get the choice index referring to the state-action pair. * For a state/action pair, get the choice index referring to the state-action pair.

4
src/storm/permissivesched/PermissiveSchedulers.h

@ -2,6 +2,7 @@
#ifndef PERMISSIVESCHEDULERS_H #ifndef PERMISSIVESCHEDULERS_H
#define PERMISSIVESCHEDULERS_H #define PERMISSIVESCHEDULERS_H
#include <storm/transformer/ChoiceSelector.h>
#include "../logic/ProbabilityOperatorFormula.h" #include "../logic/ProbabilityOperatorFormula.h"
#include "../models/sparse/Mdp.h" #include "../models/sparse/Mdp.h"
#include "../models/sparse/StandardRewardModel.h" #include "../models/sparse/StandardRewardModel.h"
@ -38,7 +39,8 @@ namespace storm {
storm::models::sparse::Mdp<double, RM> apply() const { storm::models::sparse::Mdp<double, RM> apply() const {
return mdp.restrictChoices(enabledChoices);
storm::transformer::ChoiceSelector<double, RM> cs(mdp);
return *(cs.transform(enabledChoices)->template as<storm::models::sparse::Mdp<double, RM>>());
} }
template<typename T> template<typename T>

26
src/storm/transformer/ChoiceSelector.cpp

@ -0,0 +1,26 @@
#include "storm/transformer/ChoiceSelector.h"
#include "storm/models/sparse/Mdp.h"
namespace storm {
namespace transformer {
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> ChoiceSelector<ValueType, RewardModelType>::transform(storm::storage::BitVector const& enabledActions) const
{
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(inputModel.getTransitionMatrix().restrictRows(enabledActions));
newComponents.stateLabeling = inputModel.getStateLabeling();
for (auto const& rewardModel : inputModel.getRewardModels()) {
newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledActions));
}
if (inputModel.hasChoiceLabeling()) {
newComponents.choiceLabeling = inputModel.getChoiceLabeling().getSubLabeling(enabledActions);
}
newComponents.stateValuations = inputModel.getOptionalStateValuations();
if (inputModel.hasChoiceOrigins()) {
newComponents.choiceOrigins = inputModel.getChoiceOrigins()->selectChoices(enabledActions);
}
return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(newComponents));
}
template class ChoiceSelector<double>;
}
}

30
src/storm/transformer/ChoiceSelector.h

@ -0,0 +1,30 @@
#pragma once
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/models/sparse/NondeterministicModel.h"
namespace storm {
namespace transformer {
template<typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
class ChoiceSelector {
public:
ChoiceSelector(storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel) : inputModel(inputModel) {
}
/*!
* Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
*
* @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
* @return A subMDP.
*/
std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> transform(storm::storage::BitVector const& enabledActions) const;
private:
storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel;
};
}
}
Loading…
Cancel
Save