From cc1cd0dce78102e94b08f7423bed0f2afa81ca60 Mon Sep 17 00:00:00 2001 From: Matthias Volk Date: Thu, 23 Jul 2020 17:35:52 +0200 Subject: [PATCH] Minor changes in documentation for model building --- doc/source/advanced_topics.rst | 8 +++--- doc/source/conf.py | 11 ++++++-- .../doc/{ => models}/building_ctmcs.rst | 6 ++-- .../doc/{ => models}/building_dtmcs.rst | 19 +++++++------ doc/source/doc/{ => models}/building_mas.rst | 13 +++++---- doc/source/doc/{ => models}/building_mdps.rst | 28 +++++++++++-------- 6 files changed, 48 insertions(+), 37 deletions(-) rename doc/source/doc/{ => models}/building_ctmcs.rst (89%) rename doc/source/doc/{ => models}/building_dtmcs.rst (88%) rename doc/source/doc/{ => models}/building_mas.rst (80%) rename doc/source/doc/{ => models}/building_mdps.rst (80%) diff --git a/doc/source/advanced_topics.rst b/doc/source/advanced_topics.rst index 6604bbe..3c6c8c4 100644 --- a/doc/source/advanced_topics.rst +++ b/doc/source/advanced_topics.rst @@ -10,10 +10,10 @@ This guide is a collection of examples meant to bridge the gap between the getti doc/analysis doc/building_models - doc/building_dtmcs - doc/building_mdps - doc/building_ctmcs - doc/building_mas + doc/models/building_dtmcs + doc/models/building_mdps + doc/models/building_ctmcs + doc/models/building_mas doc/engines doc/exploration doc/reward_models diff --git a/doc/source/conf.py b/doc/source/conf.py index 0c71a5e..fec21e1 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -36,11 +36,16 @@ import sphinx_bootstrap_theme # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', + #'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'sphinx.ext.githubpages'] + 'sphinx.ext.githubpages', + 'sphinx.ext.autosectionlabel' +] +autosectionlabel_prefix_document = True +#autosectionlabel_maxdepth = 10 # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/doc/source/doc/building_ctmcs.rst b/doc/source/doc/models/building_ctmcs.rst similarity index 89% rename from doc/source/doc/building_ctmcs.rst rename to doc/source/doc/models/building_ctmcs.rst index ee20264..0ddf6dd 100644 --- a/doc/source/doc/building_ctmcs.rst +++ b/doc/source/doc/models/building_ctmcs.rst @@ -9,7 +9,7 @@ Background In this section, we explain how Stormpy can be used to build a simple CTMC. Building CTMCs works similar to building DTMCs as in :doc:`building_dtmcs`, but additionally every state is equipped with an exit rate. -.. seealso:: `01-building-ctmcs.py ` +.. seealso:: `01-building-ctmcs.py `_ First, we import Stormpy:: @@ -45,7 +45,7 @@ The following function call returns a sparse matrix with default row groups:: Labeling ================ -The state labeling is created analogously to the previous example in :doc:`building_dtmcs`:: +The state labeling is created analogously to the previous example in :ref:`building DTMCs`. >>> state_labeling = stormpy.storage.StateLabeling(4) >>> state_labels = {'empty', 'init', 'deadlock', 'full'} @@ -65,7 +65,7 @@ Building the Model ==================== Now, we can collect all components, including the choice labeling and the exit rates. -To let the transition values be interpreted as rates we set rate_transitions to True:: +To let the transition values be interpreted as rates we set `rate_transitions` to `True`:: components = stormpy.SparseModelComponents(transition_matrix=transition_matrix, state_labeling=state_labeling, rate_transitions=True) components.exit_rates = exit_rates diff --git a/doc/source/doc/building_dtmcs.rst b/doc/source/doc/models/building_dtmcs.rst similarity index 88% rename from doc/source/doc/building_dtmcs.rst rename to doc/source/doc/models/building_dtmcs.rst index c39350a..8a9de44 100644 --- a/doc/source/doc/building_dtmcs.rst +++ b/doc/source/doc/models/building_dtmcs.rst @@ -5,13 +5,13 @@ Discrete-time Markov chains (DTMCs) Background ===================== -As described in :doc:`../getting_started`, +As described in :doc:`../../getting_started`, Storm can be used to translate a model description e.g. in form of a prism file into a Markov chain. -Here, we use Stormpy to create the single components, to build a DTMC without parsing a model description. -We consider the previous example of the dice. +Here, we use Stormpy to create the components for a model and build a DTMC directly from these components without parsing a model description. +We consider the previous example of the Knuth-Yao die. -.. seealso:: `01-building-dtmcs.py ` +.. seealso:: `01-building-dtmcs.py `_ In the following we create the transition matrix, the state labeling and the reward models of a DTMC. First, we import stormpy:: @@ -62,12 +62,13 @@ Finally, we can build the matrix:: It should be noted that entries can only be inserted in ascending order, i.e. row by row and column by column. Stormpy provides the possibility to build a sparse matrix using the numpy library (https://numpy.org/ ) -Instead of using the SparseMatrixBuilder, a sparse matrix can be build from a numpy array via the method stormpy.build_sparse_matrix. +Instead of using the SparseMatrixBuilder, a sparse matrix can be build from a numpy array via the method `stormpy.build_sparse_matrix`. +An example is given in :ref:`building CTMCs `. Labeling ==================== -States can be labeled with sets of propositions, for example state 0 can be labeled with 'init'. +States can be labeled with sets of propositions, for example state 0 can be labeled with "init". In order to specify the state labeling we create an empty labeling for the given number of states and add the labels to the labeling:: >>> state_labeling = stormpy.storage.StateLabeling(13) @@ -77,7 +78,7 @@ In order to specify the state labeling we create an empty labeling for the given ... state_labeling.add_label(label) -Labels can be asociated with states. As an example, we label the state 0 with 'init':: +Labels can be asociated with states. As an example, we label the state 0 with "init":: >>> state_labeling.add_label_to_state('init', 0) >>> print(state_labeling.get_states('init')) @@ -112,9 +113,9 @@ Defining a choice labeling is possible in a similar way. Reward Models ==================== Stormpy supports multiple reward models such as state rewards, state-action rewards and as transition rewards. -In this example, the actions of states which satisfy s<7 acquire a reward of 1.0. +In this example, the actions of states which satisfy `s<7` acquire a reward of 1.0. -The state-action rewards are represented by a vector, which is associated to a reward model named 'coin_flips':: +The state-action rewards are represented by a vector, which is associated to a reward model named "coin_flips":: >>> reward_models = {} >>> action_reward = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] diff --git a/doc/source/doc/building_mas.rst b/doc/source/doc/models/building_mas.rst similarity index 80% rename from doc/source/doc/building_mas.rst rename to doc/source/doc/models/building_mas.rst index 8906559..852b00b 100644 --- a/doc/source/doc/building_mas.rst +++ b/doc/source/doc/models/building_mas.rst @@ -6,14 +6,15 @@ Markov automata (MAs) Background ===================== -We already saw the process of building CTMCs with MDPs via Stormpy in :doc:`building_ctmcs` and :doc:`building_mdps`. -Markov automata use states that are probabilistic i.e. like the states of an MDP or markovian that are like the states of a CTMC. +We already saw the process of building :doc:`CTMCs ` and :doc:`MDPs ` via Stormpy. -In this section, we build a small MA with five states from which the first four are markovian. +Markov automata use states that are probabilistic, i.e. like the states of an MDP, or Markovian, i.e. like the states of a CTMC. + +In this section, we build a small MA with five states from which the first four are Markovian. Since we covered labeling and exit rates already in the previous examples we omit the description of these components. The full example can be found here: -.. seealso:: `01-building-mas.py ` +.. seealso:: `01-building-mas.py `_ First, we import Stormpy:: @@ -21,7 +22,7 @@ First, we import Stormpy:: Transition Matrix ================== -In :doc:`building_mdps`, we used the SparseMatrixBuilder to create a matrix with a custom row grouping. +For :ref:`building MDPS `, we used the `SparseMatrixBuilder` to create a matrix with a custom row grouping. In this example, we use the numpy library. In the beginning, we create a numpy array that will be used to build the transition matrix of our model.:: @@ -58,7 +59,7 @@ When building the matrix we define a custom row grouping by passing a list conta Markovian States ================== In order to define which states have only one probability distribution over the successor states, -we build a BitVector that contains the respective markovian states:: +we build a BitVector that contains the respective Markovian states:: >>> markovian_states = stormpy.BitVector(5, [1, 2, 3, 4]) diff --git a/doc/source/doc/building_mdps.rst b/doc/source/doc/models/building_mdps.rst similarity index 80% rename from doc/source/doc/building_mdps.rst rename to doc/source/doc/models/building_mdps.rst index 3f07e6b..a588df3 100644 --- a/doc/source/doc/building_mdps.rst +++ b/doc/source/doc/models/building_mdps.rst @@ -1,14 +1,14 @@ *********************************************** -Discrete-time Markov decision processes (MDPs) +Markov decision processes (MDPs) *********************************************** Background ===================== -In :doc:`building_dtmcs` we modelled Knuth's model of a fair die by the means of a DTMC. -In the following we extend this model with nondeterministic choice by building a Markov Decision process. +In :doc:`building_dtmcs` we modelled Knuth-Yao's model of a fair die by the means of a DTMC. +In the following we extend this model with nondeterministic choice by building a Markov decision process. -.. seealso:: `01-building-mdps.py ` +.. seealso:: `01-building-mdps.py `_ First, we import Stormpy:: @@ -30,7 +30,11 @@ Note that the row group needs to be added before any entries are added to the gr >>> builder.add_next_value(1, 1, 0.2) >>> builder.add_next_value(1, 2, 0.8) -For the remaining states, we need to specify the starting rows of row groups:: +In this example, we have two nondeterministic choices in state 0. +With choice `0` we have probability 0.5 to got to state 1 and probability 0.5 to got to state 2. +With choice `1` we got to state 1 with probability 0.2 and go to state 2 with probability 0.8. + +For the remaining states, we need to specify the starting rows of each row group:: >>> builder.new_row_group(2) >>> builder.add_next_value(2, 3, 0.5) @@ -55,7 +59,7 @@ For the remaining states, we need to specify the starting rows of row groups:: ... builder.new_row_group(s) ... builder.add_next_value(s, s - 1, 1) -Build:: +Finally, we build the transition matrix:: >>> transition_matrix = builder.build() @@ -63,7 +67,7 @@ Labeling ================ We have seen the construction of a state labeling in previous examples. Therefore we omit the description here. -Instead we focus on the choices. +Instead, we focus on the choices. Since in state 0 a nondeterministic choice over two actions is available, the number of choices is 14. To distinguish those we can define a choice labeling:: @@ -87,7 +91,7 @@ Recall that those actions where defined in row one and two of the transition mat Reward models ================== -In this reward models the length of vector coincides with number of choices:: +In this reward model the length of the action rewards coincides with the number of choices:: >>> reward_models = {} >>> action_reward = [0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] @@ -95,12 +99,12 @@ In this reward models the length of vector coincides with number of choices:: Building the Model ==================== -Collect components:: +We collect the components:: >>> components = stormpy.SparseModelComponents(transition_matrix=transition_matrix, state_labeling=state_labeling, reward_models=reward_models, rate_transitions=False) >>> components.choice_labeling = choice_labeling -Build the model:: +We build the model:: >>> mdp = stormpy.storage.SparseMdp(components) >>> print(mdp) @@ -126,5 +130,5 @@ Build the model:: Partially observable Markov decision process (POMDPs) ======================================================== -To build a partially observable Markov decision process, -components.observations can be set to a list of numbers that defines the status of the observables in each state. +To build a partially observable Markov decision process (POMDP), +`components.observations` can be set to a list of numbers that defines the status of the observables in each state.