diff --git a/src/storage/model.cpp b/src/storage/model.cpp index 510c936..6415e8a 100644 --- a/src/storage/model.cpp +++ b/src/storage/model.cpp @@ -174,7 +174,16 @@ void define_model(py::module& m) { py::class_, std::shared_ptr>>(m, "SparseParametricMA", "pMA in sparse representation", modelRatFunc) ; - py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models"); + py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models") + .def_property_readonly("has_state_rewards", &RewardModel::hasStateRewards) + .def_property_readonly("has_state_action_rewards", &RewardModel::hasStateActionRewards) + .def_property_readonly("has_transition_rewards", &RewardModel::hasTransitionRewards) + .def_property_readonly("transition_rewards", [](RewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) + .def_property_readonly("state_rewards", [](RewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) + .def_property_readonly("state_action_rewards", [](RewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) + .def("reduce_to_state_based_rewards", [](RewardModel& rewardModel, SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") + + ; }