#include "model.h" #include "state.h" #include "storm/models/ModelBase.h" #include "storm/models/sparse/Model.h" #include "storm/models/sparse/Dtmc.h" #include "storm/models/sparse/Mdp.h" #include "storm/models/sparse/Smg.h" #include "storm/models/sparse/StochasticTwoPlayerGame.h" #include "storm/models/sparse/Pomdp.h" #include "storm/models/sparse/Ctmc.h" #include "storm/models/sparse/MarkovAutomaton.h" #include "storm/models/sparse/StandardRewardModel.h" #include "storm/models/symbolic/Model.h" #include "storm/models/symbolic/Dtmc.h" #include "storm/models/symbolic/Mdp.h" #include "storm/models/symbolic/Ctmc.h" #include "storm/models/symbolic/MarkovAutomaton.h" #include "storm/models/symbolic/StandardRewardModel.h" #include "storm/utility/dd.h" #include "storm/storage/dd/DdManager.h" #include "storm/storage/Scheduler.h" #include #include #include // Typedefs using RationalFunction = storm::RationalFunction; using ModelBase = storm::models::ModelBase; template using ModelComponents = storm::storage::sparse::ModelComponents; template using SparseModel = storm::models::sparse::Model; template using SparseDtmc = storm::models::sparse::Dtmc; template using SparseMdp = storm::models::sparse::Mdp; template using Smg = storm::models::sparse::Smg; template using StochasticTwoPlayerGame = storm::models::sparse::StochasticTwoPlayerGame; template using SparsePomdp = storm::models::sparse::Pomdp; template using SparseCtmc = storm::models::sparse::Ctmc; template using SparseMarkovAutomaton = storm::models::sparse::MarkovAutomaton; template using SparseRewardModel = storm::models::sparse::StandardRewardModel; template using SymbolicModel = storm::models::symbolic::Model; template using SymbolicDtmc = storm::models::symbolic::Dtmc; template using SymbolicMdp = storm::models::symbolic::Mdp; template using SymbolicCtmc = storm::models::symbolic::Ctmc; template using SymbolicMarkovAutomaton = storm::models::symbolic::MarkovAutomaton; template using SymbolicRewardModel = storm::models::symbolic::StandardRewardModel; // Thin wrappers template std::vector getSparseInitialStates(SparseModel const& model) { std::vector initialStates; for (auto entry : model.getInitialStates()) { initialStates.push_back(entry); } return initialStates; } template storm::storage::SparseMatrix& getTransitionMatrix(SparseModel& model) { return model.getTransitionMatrix(); } // requires pycarl.Variable std::set probabilityVariables(SparseModel const& model) { return storm::models::sparse::getProbabilityParameters(model); } std::set rewardVariables(SparseModel const& model) { return storm::models::sparse::getRewardParameters(model); } std::set allVariables(SparseModel const& model) { return storm::models::sparse::getAllParameters(model); } std::string getModelInfoPrinter(ModelBase const& model) { std::stringstream ss; model.printModelInformationToStream(ss); return ss.str(); } template storm::models::sparse::StateLabeling& getLabeling(SparseModel& model) { return model.getStateLabeling(); } // Bindings for general models void define_model(py::module& m) { // ModelType py::enum_(m, "ModelType", "Type of the model") .value("DTMC", storm::models::ModelType::Dtmc) .value("MDP", storm::models::ModelType::Mdp) .value("POMDP", storm::models::ModelType::Pomdp) .value("CTMC", storm::models::ModelType::Ctmc) .value("MA", storm::models::ModelType::MarkovAutomaton) .value("SMG", storm::models::ModelType::Smg) ; // ModelBase py::class_> modelBase(m, "_ModelBase", "Base class for all models"); modelBase.def_property_readonly("nr_states", &ModelBase::getNumberOfStates, "Number of states") .def_property_readonly("nr_transitions", &ModelBase::getNumberOfTransitions, "Number of transitions") .def_property_readonly("nr_choices", &ModelBase::getNumberOfChoices, "Number of choices") .def_property_readonly("model_type", &ModelBase::getType, "Model type") .def_property_readonly("supports_parameters", &ModelBase::supportsParameters, "Flag whether model supports parameters") .def_property_readonly("has_parameters", &ModelBase::hasParameters, "Flag whether model has parameters") .def_property_readonly("is_exact", &ModelBase::isExact, "Flag whether model is exact") .def_property_readonly("is_partially_observable", &ModelBase::isPartiallyObservable, "Flag whether the model has observation labels") .def_property_readonly("is_sparse_model", &ModelBase::isSparseModel, "Flag whether the model is stored as a sparse model") .def_property_readonly("is_symbolic_model", &ModelBase::isSymbolicModel, "Flag whether the model is stored using decision diagrams") .def_property_readonly("is_discrete_time_model", &ModelBase::isDiscreteTimeModel, "Flag whether the model is a discrete time model") .def_property_readonly("is_nondeterministic_model", &ModelBase::isNondeterministicModel, "Flag whether the model contains nondeterminism") .def("_as_sparse_dtmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse DTMC") .def("_as_sparse_exact_dtmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse DTMC") .def("_as_sparse_pdtmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse pDTMC") .def("_as_sparse_mdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse MDP") .def("_as_sparse_exact_mdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse exact MDP") .def("_as_sparse_pmdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse pMDP") .def("_as_sparse_pomdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse POMDP") .def("_as_sparse_ppomdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse pPOMDP") .def("_as_sparse_ctmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse CTMC") .def("_as_sparse_pctmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse pCTMC") .def("_as_sparse_ma", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse MA") .def("_as_sparse_pma", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as sparse pMA") .def("_as_symbolic_dtmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic DTMC") .def("_as_symbolic_pdtmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic pDTMC") .def("_as_symbolic_mdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic MDP") .def("_as_symbolic_pmdp", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic pMDP") .def("_as_symbolic_ctmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic CTMC") .def("_as_symbolic_pctmc", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic pCTMC") .def("_as_symbolic_ma", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic MA") .def("_as_symbolic_pma", [](ModelBase &modelbase) { return modelbase.as>(); }, "Get model as symbolic pMA") ; } // Bindings for sparse models template void define_sparse_model(py::module& m, std::string const& vtSuffix) { // Models with double numbers py::class_, std::shared_ptr>, ModelBase> model(m, ("_Sparse" + vtSuffix + "Model").c_str(), "A probabilistic model where transitions are represented by doubles and saved in a sparse matrix"); model.def_property_readonly("labeling", &getLabeling, "Labels") .def("has_choice_labeling", [](SparseModel const& model) {return model.hasChoiceLabeling();}, "Does the model have an associated choice labelling?") .def_property_readonly("choice_labeling", [](SparseModel const& model) {return model.getChoiceLabeling();}, "get choice labelling") .def("has_choice_origins", [](SparseModel const& model) {return model.hasChoiceOrigins();}, "has choice origins?") .def_property_readonly("choice_origins", [](SparseModel const& model) {return model.getChoiceOrigins();}) .def("labels_state", &SparseModel::getLabelsOfState, py::arg("state"), "Get labels of state") .def_property_readonly("initial_states", &getSparseInitialStates, "Initial states") .def_property_readonly("states", [](SparseModel& model) { return SparseModelStates(model); }, "Get states") .def_property_readonly("reward_models", [](SparseModel& model) {return model.getRewardModels(); }, "Reward models") .def_property_readonly("transition_matrix", &getTransitionMatrix, py::return_value_policy::reference, py::keep_alive<1, 0>(), "Transition matrix") .def_property_readonly("backward_transition_matrix", &SparseModel::getBackwardTransitions, py::return_value_policy::reference, py::keep_alive<1, 0>(), "Backward transition matrix") .def("get_reward_model", [](SparseModel& model, std::string const& name) {return model.getRewardModel(name);}, py::return_value_policy::reference, py::keep_alive<1, 0>(), "Reward model") .def("has_state_valuations", [](SparseModel const& model) {return model.hasStateValuations();}, "has state valuation?") .def_property_readonly("state_valuations", [](SparseModel const& model) {return model.getStateValuations();}, "state valuations") .def("reduce_to_state_based_rewards", &SparseModel::reduceToStateBasedRewards) .def("is_sink_state", &SparseModel::isSinkState, py::arg("state")) .def("__str__", &getModelInfoPrinter) .def("to_dot", [](SparseModel& model) { std::stringstream ss; model.writeDotToStream(ss); return ss.str(); }, "Write dot to a string") ; py::class_, std::shared_ptr>>(m, ("Sparse" + vtSuffix + "Dtmc").c_str(), "DTMC in sparse representation", model) .def(py::init>(), py::arg("other_model")) .def(py::init const&>(), py::arg("components")) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>> mdp(m, ("Sparse" + vtSuffix + "Mdp").c_str(), "MDP in sparse representation", model); mdp.def(py::init>(), py::arg("other_model")) .def(py::init const&, storm::models::ModelType>(), py::arg("components"), py::arg("type")=storm::models::ModelType::Mdp) .def_property_readonly("nondeterministic_choice_indices", [](SparseMdp const& mdp) { return mdp.getNondeterministicChoiceIndices(); }) .def("get_nr_available_actions", [](SparseMdp const& mdp, uint64_t stateIndex) { return mdp.getNondeterministicChoiceIndices()[stateIndex+1] - mdp.getNondeterministicChoiceIndices()[stateIndex] ; }, py::arg("state")) .def("get_choice_index", [](SparseMdp const& mdp, uint64_t state, uint64_t actOff) { return mdp.getNondeterministicChoiceIndices()[state]+actOff; }, py::arg("state"), py::arg("action_offset"), "gets the choice index for the offset action from the given state.") .def("apply_scheduler", [](SparseMdp const& mdp, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) .def("__str__", &getModelInfoPrinter) .def("get_label_of_choice", [](SparseMdp const& mdp, uint64_t state, uint64_t actOff) {return mdp.getChoiceLabeling().getLabelsOfChoice(mdp.getNondeterministicChoiceIndices()[state]+actOff);}, py::arg("state"), py::arg("action_offset")) ; py::class_, std::shared_ptr>> smg(m, ("Sparse" + vtSuffix + "Smg").c_str(), "SMG in sparse representation", model); smg .def(py::init>(), py::arg("other_model")) .def(py::init const&>(), py::arg("components")) .def("get_player_of_state", &Smg::getPlayerOfState, py::arg("state_index")) .def("get_player_index", &Smg::getPlayerIndex, py::arg("player_name")) .def("get_choice_index", [](Smg const& smg, uint64_t state, uint64_t actOff) { return smg.getNondeterministicChoiceIndices()[state]+actOff; }, py::arg("state"), py::arg("action_offset"), "gets the choice index for the offset action from the given state.") .def("get_label_of_choice", [](Smg const& smg, uint64_t state, uint64_t actOff) {return smg.getChoiceLabeling().getLabelsOfChoice(smg.getNondeterministicChoiceIndices()[state]+actOff);}, py::arg("state"), py::arg("action_offset")) ; py::class_, std::shared_ptr>> stg(m, ("Sparse" + vtSuffix + "Stg").c_str(), "STG in sparse representation", model); stg .def(py::init>(), py::arg("other_model")) .def(py::init const&>(), py::arg("components")) .def_property_readonly("player_1_matrix", &StochasticTwoPlayerGame::getPlayer1Matrix) .def_property_readonly("player_2_matrix", &StochasticTwoPlayerGame::getPlayer2Matrix) .def_property_readonly("has_player_2_choice_labeling", &StochasticTwoPlayerGame::hasPlayer2ChoiceLabeling) .def_property_readonly("player_2_choice_labeling", &StochasticTwoPlayerGame::getPlayer2ChoiceLabeling) ; py::class_, std::shared_ptr>>(m, ("Sparse" + vtSuffix + "Pomdp").c_str(), "POMDP in sparse representation", mdp) .def(py::init>(), py::arg("other_model")) .def(py::init const&, bool>(), py::arg("components"), py::arg("canonic_flag")=false) .def("__str__", &getModelInfoPrinter) .def("get_observation", &SparsePomdp::getObservation, py::arg("state")) .def_property_readonly("observations", &SparsePomdp::getObservations) .def_property_readonly("nr_observations", &SparsePomdp::getNrObservations) .def("has_observation_valuations", &SparsePomdp::hasObservationValuations) .def_property_readonly("observation_valuations", &SparsePomdp::getObservationValuations) ; py::class_, std::shared_ptr>>(m, ("Sparse" + vtSuffix + "Ctmc").c_str(), "CTMC in sparse representation", model) .def(py::init>(), py::arg("other_model")) .def(py::init const&>(), py::arg("components")) .def_property_readonly("exit_rates", [](SparseCtmc const& ctmc) { return ctmc.getExitRateVector(); }) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, ("Sparse" + vtSuffix + "MA").c_str(), "MA in sparse representation", model) .def(py::init>(), py::arg("other_model")) .def(py::init const&>(), py::arg("components")) .def_property_readonly("exit_rates", [](SparseMarkovAutomaton const& ma) { return ma.getExitRates(); }) .def_property_readonly("markovian_states", [](SparseMarkovAutomaton const& ma) { return ma.getMarkovianStates(); }) .def_property_readonly("nondeterministic_choice_indices", [](SparseMarkovAutomaton const& ma) { return ma.getNondeterministicChoiceIndices(); }) .def("apply_scheduler", [](SparseMarkovAutomaton const& ma, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return ma.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) .def("__str__", &getModelInfoPrinter) .def_property_readonly("convertible_to_ctmc", &SparseMarkovAutomaton::isConvertibleToCtmc, "Check whether the MA can be converted into a CTMC.") .def("convert_to_ctmc", &SparseMarkovAutomaton::convertToCtmc, "Convert the MA into a CTMC.") ; py::class_>(m, ("Sparse" + vtSuffix + "RewardModel").c_str(), "Reward structure for sparse models") .def(py::init> const&, boost::optional> const&, boost::optional> const&>(), py::arg("optional_state_reward_vector") = boost::none, py::arg("optional_state_action_reward_vector") = boost::none, py::arg("optional_transition_reward_matrix") = boost::none) .def_property_readonly("has_state_rewards", &SparseRewardModel::hasStateRewards) .def_property_readonly("has_state_action_rewards", &SparseRewardModel::hasStateActionRewards) .def_property_readonly("has_transition_rewards", &SparseRewardModel::hasTransitionRewards) .def_property_readonly("transition_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) .def_property_readonly("state_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) .def("get_state_reward", [](SparseRewardModel& rewardModel, uint64_t state) {return rewardModel.getStateReward(state);}) .def("get_zero_reward_states", &SparseRewardModel::template getStatesWithZeroReward, "get states where all rewards are zero", py::arg("transition_matrix")) .def("get_state_action_reward", [](SparseRewardModel& rewardModel, uint64_t action_index) {return rewardModel.getStateActionReward(action_index);}) .def_property_readonly("state_action_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) .def("reduce_to_state_based_rewards", [](SparseRewardModel& rewardModel, storm::storage::SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") ; } void define_sparse_parametric_model(py::module& m) { // Parametric models py::class_, std::shared_ptr>, ModelBase> modelRatFunc(m, "_SparseParametricModel", "A probabilistic model where transitions are represented by rational functions and saved in a sparse matrix"); modelRatFunc.def("collect_probability_parameters", &probabilityVariables, "Collect parameters") .def("collect_reward_parameters", &rewardVariables, "Collect reward parameters") .def("collect_all_parameters", &allVariables, "Collect all parameters") .def_property_readonly("labeling", &getLabeling, "Labels") .def("labels_state", &SparseModel::getLabelsOfState, py::arg("state"), "Get labels of state") .def_property_readonly("initial_states", &getSparseInitialStates, "Initial states") .def_property_readonly("states", [](SparseModel& model) { return SparseModelStates(model); }, "Get states") .def_property_readonly("reward_models", [](SparseModel const& model) {return model.getRewardModels(); }, "Reward models") .def_property_readonly("transition_matrix", &getTransitionMatrix, py::return_value_policy::reference, py::keep_alive<1, 0>(), "Transition matrix") .def_property_readonly("backward_transition_matrix", &SparseModel::getBackwardTransitions, py::return_value_policy::reference, py::keep_alive<1, 0>(), "Backward transition matrix") .def("has_state_valuations", [](SparseModel const& model) {return model.hasStateValuations();}, "has state valuation?") .def_property_readonly("state_valuations", [](SparseModel const& model) {return model.getStateValuations();}, "state valuations") .def("reduce_to_state_based_rewards", &SparseModel::reduceToStateBasedRewards) .def("is_sink_state", &SparseModel::isSinkState, py::arg("state")) .def("__str__", &getModelInfoPrinter) .def("to_dot", [](SparseModel& model) { std::stringstream ss; model.writeDotToStream(ss); return ss.str(); }, "Write dot to a string") ; py::class_, std::shared_ptr>>(m, "SparseParametricDtmc", "pDTMC in sparse representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>> pmdp(m, "SparseParametricMdp", "pMDP in sparse representation", modelRatFunc); pmdp.def_property_readonly("nondeterministic_choice_indices", [](SparseMdp const& mdp) { return mdp.getNondeterministicChoiceIndices(); }) .def("apply_scheduler", [](SparseMdp const& mdp, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return mdp.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseParametricPomdp", "POMDP in sparse representation", pmdp) .def(py::init>(), py::arg("other_model")) .def("__str__", &getModelInfoPrinter) .def("get_observation", &SparsePomdp::getObservation, py::arg("state")) .def_property_readonly("observations", &SparsePomdp::getObservations) .def_property_readonly("nr_observations", &SparsePomdp::getNrObservations) ; py::class_, std::shared_ptr>>(m, "SparseParametricCtmc", "pCTMC in sparse representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, "SparseParametricMA", "pMA in sparse representation", modelRatFunc) .def_property_readonly("nondeterministic_choice_indices", [](SparseMarkovAutomaton const& ma) { return ma.getNondeterministicChoiceIndices(); }) .def("apply_scheduler", [](SparseMarkovAutomaton const& ma, storm::storage::Scheduler const& scheduler, bool dropUnreachableStates) { return ma.applyScheduler(scheduler, dropUnreachableStates); } , "apply scheduler", "scheduler"_a, "drop_unreachable_states"_a = true) .def("__str__", &getModelInfoPrinter) ; py::class_>(m, "SparseParametricRewardModel", "Reward structure for parametric sparse models") .def_property_readonly("has_state_rewards", &SparseRewardModel::hasStateRewards) .def_property_readonly("has_state_action_rewards", &SparseRewardModel::hasStateActionRewards) .def_property_readonly("has_transition_rewards", &SparseRewardModel::hasTransitionRewards) .def_property_readonly("transition_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getTransitionRewardMatrix();}) .def_property_readonly("state_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateRewardVector();}) .def("get_state_reward", [](SparseRewardModel& rewardModel, uint64_t state) {return rewardModel.getStateReward(state);}) .def("get_state_action_reward", [](SparseRewardModel& rewardModel, uint64_t action_index) {return rewardModel.getStateActionReward(action_index);}) .def_property_readonly("state_action_rewards", [](SparseRewardModel& rewardModel) {return rewardModel.getStateActionRewardVector();}) .def("reduce_to_state_based_rewards", [](SparseRewardModel& rewardModel, storm::storage::SparseMatrix const& transitions, bool onlyStateRewards){return rewardModel.reduceToStateBasedRewards(transitions, onlyStateRewards);}, py::arg("transition_matrix"), py::arg("only_state_rewards"), "Reduce to state-based rewards") ; } // Bindings for symbolic models template void define_symbolic_model(py::module& m, std::string vt_suffix) { // Set class names std::string prefixClassName = "Symbolic" + vt_suffix; std::string prefixParametricClassName = "Symbolic" + vt_suffix + "Parametric"; // Models with double numbers py::class_, std::shared_ptr>, ModelBase> model(m, ("_"+prefixClassName+"Model").c_str(), "A probabilistic model where transitions are represented by doubles and saved in a symbolic representation"); model.def_property_readonly("reward_models", [](SymbolicModel& model) {return model.getRewardModels(); }, "Reward models") .def_property_readonly("dd_manager", &SymbolicModel::getManager, "dd manager") .def_property_readonly("reachable_states", &SymbolicModel::getReachableStates, "reachable states as DD") .def_property_readonly("initial_states", &SymbolicModel::getInitialStates, "initial states as DD") .def("get_states", [](SymbolicModel const& model, storm::expressions::Expression const& expr) {return model.getStates(expr);}, py::arg("expression"), "Get states that are described by the expression") .def("compute_depth", [](SymbolicModel const& model) {return storm::utility::dd::computeReachableStates(model.getInitialStates(), model.getQualitativeTransitionMatrix(false), model.getRowVariables(), model.getColumnVariables()).second;}, "Computes the depth of the model, i.e., the distance to the node with the largest minimal distance from the initial states") .def("reduce_to_state_based_rewards", &SymbolicModel::reduceToStateBasedRewards) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Dtmc").c_str(), "DTMC in symbolic representation", model) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Mdp").c_str(), "MDP in symbolic representation", model) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"Ctmc").c_str(), "CTMC in symbolic representation", model) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixClassName+"MA").c_str(), "MA in symbolic representation", model) .def("__str__", &getModelInfoPrinter) ; py::class_>(m, (prefixClassName+"RewardModel").c_str(), "Reward structure for symbolic models") .def_property_readonly("has_state_rewards", &SymbolicRewardModel::hasStateRewards) .def_property_readonly("has_state_action_rewards", &SymbolicRewardModel::hasStateActionRewards) .def_property_readonly("has_transition_rewards", &SymbolicRewardModel::hasTransitionRewards) ; // Parametric models py::class_, std::shared_ptr>, ModelBase> modelRatFunc(m, ("_"+prefixParametricClassName+"Model").c_str(), "A probabilistic model where transitions are represented by rational functions and saved in a symbolic representation"); modelRatFunc.def("get_parameters", &SymbolicModel::getParameters, "Get parameters") .def_property_readonly("reward_models", [](SymbolicModel const& model) {return model.getRewardModels(); }, "Reward models") .def_property_readonly("reachable_states", &SymbolicModel::getReachableStates, "reachable states as DD") .def_property_readonly("initial_states", &SymbolicModel::getInitialStates, "initial states as DD") .def("get_states", [](SymbolicModel const& model, storm::expressions::Expression const& expr) {return model.getStates(expr);}, py::arg("expression"), "Get states that are described by the expression") .def("reduce_to_state_based_rewards", &SymbolicModel::reduceToStateBasedRewards) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Dtmc").c_str(), "pDTMC in symbolic representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Mdp").c_str(), "pMDP in symbolic representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"Ctmc").c_str(), "pCTMC in symbolic representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_, std::shared_ptr>>(m, (prefixParametricClassName+"MA").c_str(), "pMA in symbolic representation", modelRatFunc) .def("__str__", &getModelInfoPrinter) ; py::class_>(m, (prefixParametricClassName+"RewardModel").c_str(), "Reward structure for parametric symbolic models") .def_property_readonly("has_state_rewards", &SymbolicRewardModel::hasStateRewards) .def_property_readonly("has_state_action_rewards", &SymbolicRewardModel::hasStateActionRewards) .def_property_readonly("has_transition_rewards", &SymbolicRewardModel::hasTransitionRewards) ; } template void define_symbolic_model(py::module& m, std::string vt_suffix); template void define_sparse_model(py::module& m, std::string const& vt_suffix); template void define_sparse_model(py::module& m, std::string const& vt_suffix);