Browse Source

Merge branch 'master' into pomdp_datastructures

tempestpy_adaptions
sjunges 8 years ago
parent
commit
5931e19a09
  1. 20
      src/storm/models/sparse/Mdp.cpp
  2. 15
      src/storm/models/sparse/Mdp.h
  3. 5
      src/storm/models/sparse/NondeterministicModel.cpp
  4. 6
      src/storm/models/sparse/NondeterministicModel.h
  5. 4
      src/storm/permissivesched/PermissiveSchedulers.h
  6. 26
      src/storm/transformer/ChoiceSelector.cpp
  7. 30
      src/storm/transformer/ChoiceSelector.h

20
src/storm/models/sparse/Mdp.cpp

@ -39,27 +39,7 @@ namespace storm {
// Intentionally left empty
}
template <typename ValueType, typename RewardModelType>
Mdp<ValueType, RewardModelType> Mdp<ValueType, RewardModelType>::restrictChoices(storm::storage::BitVector const& enabledChoices) const {
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(this->getTransitionMatrix().restrictRows(enabledChoices));
newComponents.stateLabeling = this->getStateLabeling();
for (auto const& rewardModel : this->getRewardModels()) {
newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledChoices));
}
if (this->hasChoiceLabeling()) {
newComponents.choiceLabeling = this->getChoiceLabeling().getSubLabeling(enabledChoices);
}
newComponents.stateValuations = this->getOptionalStateValuations();
if (this->hasChoiceOrigins()) {
newComponents.choiceOrigins = this->getChoiceOrigins()->selectChoices(enabledChoices);
}
return Mdp<ValueType, RewardModelType>(std::move(newComponents));
}
template<typename ValueType, typename RewardModelType>
uint_least64_t Mdp<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const {
return this->getNondeterministicChoiceIndices()[stateactPair.getState()]+stateactPair.getAction();
}
template class Mdp<double>;
template class Mdp<storm::RationalNumber>;

15
src/storm/models/sparse/Mdp.h

@ -1,9 +1,7 @@
#ifndef STORM_MODELS_SPARSE_MDP_H_
#define STORM_MODELS_SPARSE_MDP_H_
#include "storm/storage/StateActionPair.h"
#include "storm/models/sparse/NondeterministicModel.h"
#include "storm/utility/OsDetection.h"
namespace storm {
namespace models {
@ -50,19 +48,6 @@ namespace storm {
Mdp(Mdp<ValueType, RewardModelType>&& other) = default;
Mdp& operator=(Mdp<ValueType, RewardModelType>&& other) = default;
/*!
* Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
*
* @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
* @return A subMDP.
*/
Mdp<ValueType, RewardModelType> restrictChoices(storm::storage::BitVector const& enabledActions) const;
/*!
* For a state/action pair, get the choice index referring to the state-action pair.
*/
uint_fast64_t getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const;
};
} // namespace sparse

5
src/storm/models/sparse/NondeterministicModel.cpp

@ -186,6 +186,11 @@ namespace storm {
}
}
template<typename ValueType, typename RewardModelType>
uint_least64_t NondeterministicModel<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const {
return this->getNondeterministicChoiceIndices()[stateactPair.getState()]+stateactPair.getAction();
}
template class NondeterministicModel<double>;
#ifdef STORM_HAVE_CARL

6
src/storm/models/sparse/NondeterministicModel.h

@ -2,7 +2,7 @@
#define STORM_MODELS_SPARSE_NONDETERMINISTICMODEL_H_
#include "storm/models/sparse/Model.h"
#include "storm/utility/OsDetection.h"
#include "storm/storage/StateActionPair.h"
namespace storm {
@ -55,6 +55,10 @@ namespace storm {
virtual void reduceToStateBasedRewards() override;
/*!
* For a state/action pair, get the choice index referring to the state-action pair.
*/
uint_fast64_t getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const;
/*!
* Applies the given scheduler to this model.
* @param scheduler the considered scheduler.

4
src/storm/permissivesched/PermissiveSchedulers.h

@ -2,6 +2,7 @@
#ifndef PERMISSIVESCHEDULERS_H
#define PERMISSIVESCHEDULERS_H
#include <storm/transformer/ChoiceSelector.h>
#include "../logic/ProbabilityOperatorFormula.h"
#include "../models/sparse/Mdp.h"
#include "../models/sparse/StandardRewardModel.h"
@ -38,7 +39,8 @@ namespace storm {
storm::models::sparse::Mdp<double, RM> apply() const {
return mdp.restrictChoices(enabledChoices);
storm::transformer::ChoiceSelector<double, RM> cs(mdp);
return *(cs.transform(enabledChoices)->template as<storm::models::sparse::Mdp<double, RM>>());
}
template<typename T>

26
src/storm/transformer/ChoiceSelector.cpp

@ -0,0 +1,26 @@
#include "storm/transformer/ChoiceSelector.h"
#include "storm/models/sparse/Mdp.h"
namespace storm {
namespace transformer {
template <typename ValueType, typename RewardModelType>
std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> ChoiceSelector<ValueType, RewardModelType>::transform(storm::storage::BitVector const& enabledActions) const
{
storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(inputModel.getTransitionMatrix().restrictRows(enabledActions));
newComponents.stateLabeling = inputModel.getStateLabeling();
for (auto const& rewardModel : inputModel.getRewardModels()) {
newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledActions));
}
if (inputModel.hasChoiceLabeling()) {
newComponents.choiceLabeling = inputModel.getChoiceLabeling().getSubLabeling(enabledActions);
}
newComponents.stateValuations = inputModel.getOptionalStateValuations();
if (inputModel.hasChoiceOrigins()) {
newComponents.choiceOrigins = inputModel.getChoiceOrigins()->selectChoices(enabledActions);
}
return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(newComponents));
}
template class ChoiceSelector<double>;
}
}

30
src/storm/transformer/ChoiceSelector.h

@ -0,0 +1,30 @@
#pragma once
#include "storm/models/sparse/StandardRewardModel.h"
#include "storm/models/sparse/NondeterministicModel.h"
namespace storm {
namespace transformer {
template<typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
class ChoiceSelector {
public:
ChoiceSelector(storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel) : inputModel(inputModel) {
}
/*!
* Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
*
* @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
* @return A subMDP.
*/
std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> transform(storm::storage::BitVector const& enabledActions) const;
private:
storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel;
};
}
}
Loading…
Cancel
Save