From 9e31a5b83a1c0a77e58df9aa008fb30290c21790 Mon Sep 17 00:00:00 2001 From: Matthias Volk Date: Tue, 21 Jan 2020 22:50:42 +0100 Subject: [PATCH] Fixed __str__ of models --- src/storage/model.cpp | 111 ++++++++++++++++++------------------------ 1 file changed, 47 insertions(+), 64 deletions(-) diff --git a/src/storage/model.cpp b/src/storage/model.cpp index da572db..5c1ef5a 100644 --- a/src/storage/model.cpp +++ b/src/storage/model.cpp @@ -71,27 +71,10 @@ std::set allVariables(SparseModel getParameters(SymbolicModel const& model) { - return model->getParameters(); -}*/ - -template -std::function const&)> getModelInfoPrinter(std::string name = "Model") { - // look, C++ has lambdas and stuff! - return [name](storm::models::Model const& model) { - std::stringstream ss; - model.printModelInformationToStream(ss); - - // attempting a slightly readable output - std::string text = name + " ("; - std::string line; - for (int i = 0; std::getline(ss, line); i++) { - if (line != "-------------------------------------------------------------- ") - text += line + " "; - } - return text + ")"; - }; +std::string getModelInfoPrinter(ModelBase const& model) { + std::stringstream ss; + model.printModelInformationToStream(ss); + return ss.str(); } template @@ -198,32 +181,32 @@ void define_sparse_model(py::module& m) { .def("has_state_valuations", [](SparseModel const& model) {return model.hasStateValuations();}, "has state valuation?") .def_property_readonly("state_valuations", [](SparseModel const& model) {return model.getStateValuations();}, "state valuations") .def("reduce_to_state_based_rewards", &SparseModel::reduceToStateBasedRewards) - .def("__str__", getModelInfoPrinter()) + .def("__str__", &getModelInfoPrinter) .def("to_dot", [](SparseModel& model) { std::stringstream ss; model.writeDotToStream(ss); return ss.str(); }, "Write dot to a string") ; py::class_, std::shared_ptr>>(m, "SparseDtmc", "DTMC in sparse representation", model) - .def(py::init>(), py::arg("other_model")) - .def("__str__", getModelInfoPrinter("DTMC")) + .def(py::init>(), py::arg("other_model")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseMdp", "MDP in sparse representation", model) .def(py::init>(), py::arg("other_model")) .def_property_readonly("nondeterministic_choice_indices", [](SparseMdp const& mdp) { return mdp.getNondeterministicChoiceIndices(); }) .def("apply_scheduler", [](SparseMdp const& mdp, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) - .def("__str__", getModelInfoPrinter("MDP")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparsePomdp", "POMDP in sparse representation", model) - .def(py::init>(), py::arg("other_model")) - .def("__str__", getModelInfoPrinter("POMDP")) - .def_property_readonly("observations", &SparsePomdp::getObservations) - .def_property_readonly("nr_observations", &SparsePomdp::getNrObservations) - ; + .def(py::init>(), py::arg("other_model")) + .def("__str__", &getModelInfoPrinter) + .def_property_readonly("observations", &SparsePomdp::getObservations) + .def_property_readonly("nr_observations", &SparsePomdp::getNrObservations) + ; py::class_, std::shared_ptr>>(m, "SparseCtmc", "CTMC in sparse representation", model) - .def(py::init>(), py::arg("other_model")) - .def("__str__", getModelInfoPrinter("CTMC")) + .def(py::init>(), py::arg("other_model")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseMA", "MA in sparse representation", model) - .def(py::init>(), py::arg("other_model")) - .def("__str__", getModelInfoPrinter("MA")) + .def(py::init>(), py::arg("other_model")) + .def("__str__", &getModelInfoPrinter) ; py::class_>(m, "SparseRewardModel", "Reward structure for sparse models") @@ -257,36 +240,36 @@ void define_sparse_model(py::module& m) { .def("has_state_valuations", [](SparseModel const& model) {return model.hasStateValuations();}, "has state valuation?") .def_property_readonly("state_valuations", [](SparseModel const& model) {return model.getStateValuations();}, "state valuations") .def("reduce_to_state_based_rewards", &SparseModel::reduceToStateBasedRewards) - .def("__str__", getModelInfoPrinter("ParametricModel")) + .def("__str__", &getModelInfoPrinter) .def("to_dot", [](SparseModel& model) { std::stringstream ss; model.writeDotToStream(ss); return ss.str(); }, "Write dot to a string") ; py::class_, std::shared_ptr>>(m, "SparseParametricDtmc", "pDTMC in sparse representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricDTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseParametricMdp", "pMDP in sparse representation", modelRatFunc) - .def_property_readonly("nondeterministic_choice_indices", [](SparseMdp const& mdp) { return mdp.getNondeterministicChoiceIndices(); }) - .def("apply_scheduler", [](SparseMdp const& mdp, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) - .def("__str__", getModelInfoPrinter("ParametricMDP")) + .def_property_readonly("nondeterministic_choice_indices", [](SparseMdp const& mdp) { return mdp.getNondeterministicChoiceIndices(); }) + .def("apply_scheduler", [](SparseMdp const& mdp, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseParametricCtmc", "pCTMC in sparse representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricCTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseParametricMA", "pMA in sparse representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricMA")) + .def("__str__", &getModelInfoPrinter) ; py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models") - .def_property_readonly("has_state_rewards", &SparseRewardModel::hasStateRewards) - .def_property_readonly("has_state_action_rewards", &SparseRewardModel::hasStateActionRewards) - .def_property_readonly("has_transition_rewards", &SparseRewardModel::hasTransitionRewards) - .def_property_readonly("transition_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) - .def_property_readonly("state_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) - .def("get_state_reward", [](SparseRewardModel& rewardModel, uint64_t state) {return rewardModel.getStateReward(state);}) - .def("get_state_action_reward", [](SparseRewardModel& rewardModel, uint64_t action_index) {return rewardModel.getStateActionReward(action_index);}) - - .def_property_readonly("state_action_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) - .def("reduce_to_state_based_rewards", [](SparseRewardModel& rewardModel, storm::storage::SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") + .def_property_readonly("has_state_rewards", &SparseRewardModel::hasStateRewards) + .def_property_readonly("has_state_action_rewards", &SparseRewardModel::hasStateActionRewards) + .def_property_readonly("has_transition_rewards", &SparseRewardModel::hasTransitionRewards) + .def_property_readonly("transition_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) + .def_property_readonly("state_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) + .def("get_state_reward", [](SparseRewardModel& rewardModel, uint64_t state) {return rewardModel.getStateReward(state);}) + .def("get_state_action_reward", [](SparseRewardModel& rewardModel, uint64_t action_index) {return rewardModel.getStateActionReward(action_index);}) + + .def_property_readonly("state_action_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) + .def("reduce_to_state_based_rewards", [](SparseRewardModel& rewardModel, storm::storage::SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") ; } @@ -308,19 +291,19 @@ void define_symbolic_model(py::module& m, std::string vt_suffix) { .def_property_readonly("initial_states", &SymbolicModel::getInitialStates, "initial states as DD") .def("get_states", [](SymbolicModel const& model, storm::expressions::Expression const& expr) {return model.getStates(expr);}, py::arg("expression"), "Get states that are described by the expression") .def("reduce_to_state_based_rewards", &SymbolicModel::reduceToStateBasedRewards) - .def("__str__", getModelInfoPrinter()) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Dtmc").c_str(), "DTMC in symbolic representation", model) - .def("__str__", getModelInfoPrinter("DTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Mdp").c_str(), "MDP in symbolic representation", model) - .def("__str__", getModelInfoPrinter("MDP")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Ctmc").c_str(), "CTMC in symbolic representation", model) - .def("__str__", getModelInfoPrinter("CTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"MA").c_str(), "MA in symbolic representation", model) - .def("__str__", getModelInfoPrinter("MA")) + .def("__str__", &getModelInfoPrinter) ; py::class_>(m, (prefixClassName+"RewardModel").c_str(), "Reward structure for symbolic models") @@ -338,26 +321,26 @@ void define_symbolic_model(py::module& m, std::string vt_suffix) { .def_property_readonly("initial_states", &SymbolicModel::getInitialStates, "initial states as DD") .def("get_states", [](SymbolicModel const& model, storm::expressions::Expression const& expr) {return model.getStates(expr);}, py::arg("expression"), "Get states that are described by the expression") .def("reduce_to_state_based_rewards", &SymbolicModel::reduceToStateBasedRewards) - .def("__str__", getModelInfoPrinter("ParametricModel")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Dtmc").c_str(), "pDTMC in symbolic representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricDTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Mdp").c_str(), "pMDP in symbolic representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricMDP")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Ctmc").c_str(), "pCTMC in symbolic representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricCTMC")) + .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"MA").c_str(), "pMA in symbolic representation", modelRatFunc) - .def("__str__", getModelInfoPrinter("ParametricMA")) + .def("__str__", &getModelInfoPrinter) ; py::class_>(m, (prefixParametricClassName+"RewardModel").c_str(), "Reward structure for parametric symbolic models") - .def_property_readonly("has_state_rewards", &SymbolicRewardModel::hasStateRewards) - .def_property_readonly("has_state_action_rewards", &SymbolicRewardModel::hasStateActionRewards) - .def_property_readonly("has_transition_rewards", &SymbolicRewardModel::hasTransitionRewards) + .def_property_readonly("has_state_rewards", &SymbolicRewardModel::hasStateRewards) + .def_property_readonly("has_state_action_rewards", &SymbolicRewardModel::hasStateActionRewards) + .def_property_readonly("has_transition_rewards", &SymbolicRewardModel::hasTransitionRewards) ; }