From 916706d06dc5febde384b90f80b1db9f5c8d4095 Mon Sep 17 00:00:00 2001 From: Sebastian Junges Date: Sun, 13 Aug 2017 16:12:23 +0200 Subject: [PATCH] parametric reward model --- src/storage/model.cpp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/storage/model.cpp b/src/storage/model.cpp index 510c936..6415e8a 100644 --- a/src/storage/model.cpp +++ b/src/storage/model.cpp @@ -174,7 +174,16 @@ void define_model(py::module& m) { py::class_, std::shared_ptr>>(m, "SparseParametricMA", "pMA in sparse representation", modelRatFunc) ; - py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models"); + py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models") + .def_property_readonly("has_state_rewards", &RewardModel::hasStateRewards) + .def_property_readonly("has_state_action_rewards", &RewardModel::hasStateActionRewards) + .def_property_readonly("has_transition_rewards", &RewardModel::hasTransitionRewards) + .def_property_readonly("transition_rewards", [](RewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) + .def_property_readonly("state_rewards", [](RewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) + .def_property_readonly("state_action_rewards", [](RewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) + .def("reduce_to_state_based_rewards", [](RewardModel& rewardModel, SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") + + ; }