diff --git a/.gitignore b/.gitignore
index 6c315c3..6250ed7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,14 @@
*.so
-__pycache__
+*.py[cod]
+lib/**/_config.py
+.eggs/
+*.egg-info/
build/
-*.pye
-.idea
+dist/
+.idea/
+__pycache__/
+_build/
+.pytest_cache/
+.idea/
+
.DS_Store
-_config.py
diff --git a/.travis.yml b/.travis.yml
index 4c79c06..d4133ea 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,28 +25,34 @@ notifications:
#
jobs:
include:
- # docker storm:latest
+ # Docker Storm master
- os: linux
compiler: gcc
env: TASK=Test CONFIG=Release DOCKER=storm:travis PYTHON=python3
- install:
- travis/install_linux.sh
script:
travis/build.sh
- # docker storm-debug:latest
+ # Docker Storm master in debug mode
- os: linux
compiler: gcc
env: TASK=Test CONFIG=Debug DOCKER=storm:travis-debug PYTHON=python3
- install:
- travis/install_linux.sh
+ script:
+ travis/build.sh
+ # Docker Storm stable
+ - os: linux
+ compiler: gcc
+ env: TASK=Test CONFIG=Release DOCKER=storm:1.3.0 PYTHON=python3
+ script:
+ travis/build.sh
+ # Docker Storm stable in debug mode
+ - os: linux
+ compiler: gcc
+ env: TASK=Test CONFIG=Debug DOCKER=storm:1.3.0-debug PYTHON=python3
script:
travis/build.sh
# Documentation
- os: linux
compiler: gcc
env: TASK=Documentation CONFIG=Release DOCKER=storm:travis PYTHON=python3
- install:
- travis/install_linux.sh
script:
travis/build.sh
before_deploy:
@@ -59,3 +65,9 @@ jobs:
on:
branch: master
+ # Allow failures of stable versions as new features might have been added
+ allow_failures:
+ - os: linux
+ env: TASK=Test CONFIG=Release DOCKER=storm:1.3.0 PYTHON=python3
+ - os: linux
+ env: TASK=Test CONFIG=Debug DOCKER=storm:1.3.0-debug PYTHON=python3
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8479b19..347b676 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,6 +1,24 @@
Changelog
=============
+Version 1.3.x
+-------------
+
+### Version 1.3.0 (2019/01)
+Requires storm version >= 1.3.0 and pycarl version >= 2.0.3
+- Adaptions to changes in Storm
+- Bindings for symbolic models:
+ * building symbolic models
+ * bisimulation
+ * transforming symbolic to sparse models
+- Extraction of schedulers and queries on schedulers
+- High-level counterexamples connected
+- Drastically extended JANI bindings
+- Extended bindings for expressions
+- Extended PLA bindings
+- Extended DFT bindings
+- Extended documentation
+- Improved and extended setup
Version 1.2.x
-------------
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c400ebd..cbe6851 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,57 +1,43 @@
cmake_minimum_required(VERSION 3.0.0)
project(pystorm)
-option(STORMPY_DISABLE_SIGNATURE_DOC "disables the signature in the documentation" ON)
-
find_package(storm REQUIRED)
add_subdirectory(resources/pybind11)
+option(STORMPY_DISABLE_SIGNATURE_DOC "disables the signature in the documentation" ON)
+
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/src/config.h.in ${CMAKE_CURRENT_BINARY_DIR}/src/generated/config.h)
message(STATUS "STORM-DIR: ${storm_DIR}")
-set(STORMPY_LIB_DIR "${CMAKE_SOURCE_DIR}/lib/stormpy" CACHE STRING "Sets the storm library dir")
-
+message(STATUS "STORM-INCLUDE-DIR: ${storm_INCLUDE_DIR}")
function(stormpy_module NAME)
- # second, optional argument is LIBRARY_OUTPUT_DIRECTORY,
- # defaults to subdir with module name
- # third, optional argument are ADDITIONAL_LIBRARIES
- # fourth, optional argument are ADDITIONAL_INCLUDES
- if(ARGC GREATER 1)
- set(LIB_PATH "${ARGV1}")
- else()
- set(LIB_PATH "${STORMPY_LIB_DIR}/${NAME}")
- endif()
+ # second, optional argument are ADDITIONAL_LIBRARIES
+ # third, optional argument are ADDITIONAL_INCLUDES
file(GLOB_RECURSE "STORM_${NAME}_SOURCES" "${CMAKE_CURRENT_SOURCE_DIR}/src/${NAME}/*.cpp")
pybind11_add_module(${NAME} "${CMAKE_CURRENT_SOURCE_DIR}/src/mod_${NAME}.cpp" ${STORM_${NAME}_SOURCES})
- if(ARGC GREATER 2)
+ if(ARGC GREATER 1)
# Additional libraries
- target_include_directories(${NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${storm_INCLUDE_DIR} ${ARGV3} ${CMAKE_CURRENT_BINARY_DIR}/src/generated)
- target_link_libraries(${NAME} PRIVATE storm ${ARGV2})
+ target_include_directories(${NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${storm_INCLUDE_DIR} ${storm-parsers_INCLUDE_DIR} ${storm-counterexamples_INCLUDE_DIR} ${ARGV2} ${CMAKE_CURRENT_BINARY_DIR}/src/generated)
+ target_link_libraries(${NAME} PRIVATE storm storm-parsers storm-counterexamples ${ARGV1})
else()
- target_include_directories(${NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${storm_INCLUDE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/src/generated)
- target_link_libraries(${NAME} PRIVATE storm)
+ target_include_directories(${NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${storm_INCLUDE_DIR} ${storm-parsers_INCLUDE_DIR} ${storm-counterexamples_INCLUDE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/src/generated)
+ target_link_libraries(${NAME} PRIVATE storm storm-parsers storm-counterexamples)
endif()
-
- # setup.py will override this (because pip may want a different install
- # path), but also specifying it here has the advantage that invoking cmake
- # manually uses the correct path if the default is used (i.e. the
- # STORMPY_LIB_DIR hardcoded above)
- set_target_properties(${NAME} PROPERTIES LIBRARY_OUTPUT_DIRECTORY "${LIB_PATH}")
endfunction(stormpy_module)
-stormpy_module(core "${STORMPY_LIB_DIR}")
+stormpy_module(core)
stormpy_module(info)
stormpy_module(logic)
stormpy_module(storage)
stormpy_module(utility)
if(HAVE_STORM_PARS)
- stormpy_module(pars "${STORMPY_LIB_DIR}/pars" storm-pars "${storm-pars_INCLUDE_DIR}")
+ stormpy_module(pars storm-pars "${storm-pars_INCLUDE_DIR}")
endif()
if(HAVE_STORM_DFT)
- stormpy_module(dft "${STORMPY_LIB_DIR}/dft" storm-dft "${storm-dft_INCLUDE_DIR}")
+ stormpy_module(dft storm-dft "${storm-dft_INCLUDE_DIR}")
endif()
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..bbfd747
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include CMakeLists.txt
+recursive-include setup/ *.py
+recursive-include cmake/ *
+recursive-include src/ *
+recursive-include resources/ *
+recursive-include lib/stormpy/examples/files/ *
diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index ef03051..62123d9 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -2,28 +2,28 @@ cmake_minimum_required(VERSION 3.0.0)
project(storm-version)
find_package(storm REQUIRED)
+
+# Set configuration
+set(STORM_DIR ${storm_DIR})
+set(STORM_VERSION ${storm_VERSION})
+set(STORM_LIBS ${storm_LIBRARIES})
+
# Check for storm-pars
-if(EXISTS "${storm_DIR}/lib/libstorm-pars.dylib")
- set(HAVE_STORM_PARS TRUE)
-elseif(EXISTS "${storm_DIR}/lib/libstorm-pars.so")
+find_library(STORM_PARS NAMES storm-pars HINTS "${storm_DIR}/lib/")
+if(STORM_PARS)
set(HAVE_STORM_PARS TRUE)
else()
set(HAVE_STORM_PARS FALSE)
endif()
# Check for storm-dft
-if(EXISTS "${storm_DIR}/lib/libstorm-dft.dylib")
- set(HAVE_STORM_DFT TRUE)
-elseif(EXISTS "${storm_DIR}/lib/libstorm-dft.so")
+find_library(STORM_DFT NAMES storm-dft HINTS "${storm_DIR}/lib/")
+if(STORM_DFT)
set(HAVE_STORM_DFT TRUE)
else()
set(HAVE_STORM_DFT FALSE)
endif()
-# Set configuration
-set(STORM_DIR ${storm_DIR})
-set(STORM_VERSION ${storm_VERSION})
-
if(HAVE_STORM_PARS)
set(HAVE_STORM_PARS_BOOL "True")
else()
@@ -48,7 +48,4 @@ else()
set(STORM_CLN_RF_BOOL "False")
endif()
-
-
-
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/config.py.in ${CMAKE_CURRENT_BINARY_DIR}/generated/config.py @ONLY)
diff --git a/doc/source/advanced_topics.rst b/doc/source/advanced_topics.rst
index 124c702..62142be 100644
--- a/doc/source/advanced_topics.rst
+++ b/doc/source/advanced_topics.rst
@@ -8,6 +8,11 @@ This guide is a collection of examples meant to bridge the gap between the getti
:maxdepth: 2
:caption: Contents:
- building_models
- reward_models
- shortest_paths
\ No newline at end of file
+ doc/building_models
+ doc/engines
+ doc/exploration
+ doc/reward_models
+ doc/schedulers
+ doc/shortest_paths
+ doc/parametric_models
+ doc/dfts
\ No newline at end of file
diff --git a/doc/source/api.rst b/doc/source/api.rst
new file mode 100644
index 0000000..f9144c4
--- /dev/null
+++ b/doc/source/api.rst
@@ -0,0 +1,17 @@
+Stormpy API Reference
+====================================
+Work in progress!
+
+.. toctree::
+ :maxdepth: 2
+ :caption: Modules:
+
+ api/core
+ api/info
+ api/exceptions
+ api/logic
+ api/storage
+ api/utility
+
+ api/dft
+ api/pars
diff --git a/doc/source/code_stormpy_core.rst b/doc/source/api/core.rst
similarity index 53%
rename from doc/source/code_stormpy_core.rst
rename to doc/source/api/core.rst
index 4f26fa9..b9a41a1 100644
--- a/doc/source/code_stormpy_core.rst
+++ b/doc/source/api/core.rst
@@ -5,11 +5,3 @@ Stormpy.core
:members:
:undoc-members:
:imported-members:
-
-Core members
-=========================
-
-
-.. automodule:: stormpy.core
- :members:
- :undoc-members:
diff --git a/doc/source/api/dft.rst b/doc/source/api/dft.rst
new file mode 100644
index 0000000..331f60c
--- /dev/null
+++ b/doc/source/api/dft.rst
@@ -0,0 +1,7 @@
+Stormpy.dft
+**************************
+
+.. automodule:: stormpy.dft
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/exceptions.rst b/doc/source/api/exceptions.rst
new file mode 100644
index 0000000..8a3e2b2
--- /dev/null
+++ b/doc/source/api/exceptions.rst
@@ -0,0 +1,7 @@
+Stormpy.exceptions
+**************************
+
+.. automodule:: stormpy.exceptions
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/info.rst b/doc/source/api/info.rst
new file mode 100644
index 0000000..8421b2b
--- /dev/null
+++ b/doc/source/api/info.rst
@@ -0,0 +1,7 @@
+Stormpy.info
+**************************
+
+.. automodule:: stormpy.info
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/logic.rst b/doc/source/api/logic.rst
new file mode 100644
index 0000000..84d34a9
--- /dev/null
+++ b/doc/source/api/logic.rst
@@ -0,0 +1,7 @@
+Stormpy.logic
+**************************
+
+.. automodule:: stormpy.logic
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/pars.rst b/doc/source/api/pars.rst
new file mode 100644
index 0000000..2c66395
--- /dev/null
+++ b/doc/source/api/pars.rst
@@ -0,0 +1,7 @@
+Stormpy.pars
+**************************
+
+.. automodule:: stormpy.pars
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/storage.rst b/doc/source/api/storage.rst
new file mode 100644
index 0000000..5281904
--- /dev/null
+++ b/doc/source/api/storage.rst
@@ -0,0 +1,7 @@
+Stormpy.storage
+**************************
+
+.. automodule:: stormpy.storage
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/api/utility.rst b/doc/source/api/utility.rst
new file mode 100644
index 0000000..6bfe305
--- /dev/null
+++ b/doc/source/api/utility.rst
@@ -0,0 +1,7 @@
+Stormpy.utility
+**************************
+
+.. automodule:: stormpy.utility
+ :members:
+ :undoc-members:
+ :imported-members:
diff --git a/doc/source/code_stormpy_logic.rst b/doc/source/code_stormpy_logic.rst
deleted file mode 100644
index 51c4daf..0000000
--- a/doc/source/code_stormpy_logic.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-Stormpy.logic
-**************************
-
-.. automodule:: stormpy
-
-
-Members
-=========================
-
-
-.. automodule:: stormpy.logic.logic
- :members:
diff --git a/doc/source/code_stormpy_storage.rst b/doc/source/code_stormpy_storage.rst
deleted file mode 100644
index 666f940..0000000
--- a/doc/source/code_stormpy_storage.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-Stormpy.storage
-**************************
-
-.. automodule:: stormpy
-
-Members
-==============================
-
-.. automodule:: stormpy.storage.storage
- :members:
-
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2fbcfa8..0c71a5e 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -56,7 +56,7 @@ master_doc = 'index'
# General information about the project.
project = 'stormpy'
-copyright = '2016--2017 Moves RWTH Aachen'
+copyright = '2016-2019 Moves RWTH Aachen'
author = 'Sebastian Junges, Matthias Volk'
# The version info for the project you're documenting, acts as replacement for
@@ -64,9 +64,9 @@ author = 'Sebastian Junges, Matthias Volk'
# built documents.
#
# The short X.Y version.
-version = ''
+version = stormpy.__version__
# The full version, including alpha/beta/rc tags.
-release = ''
+release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/doc/source/contributors.rst b/doc/source/contributors.rst
index 464d72b..0961349 100644
--- a/doc/source/contributors.rst
+++ b/doc/source/contributors.rst
@@ -2,7 +2,8 @@
Contributors
**************
-Stormpy is an extension to `Storm `_. As a consequence, developers of Storm contributed significantly to the functionality offered by these Python bindings.
+Stormpy is an extension to `Storm `_.
+As a consequence, developers of Storm contributed significantly to the functionality offered by these Python bindings.
The bindings themselves have been developed by (lexicographically ordered):
* Sebastian Junges
diff --git a/doc/source/building_models.rst b/doc/source/doc/building_models.rst
similarity index 97%
rename from doc/source/building_models.rst
rename to doc/source/doc/building_models.rst
index 690e491..02229b3 100644
--- a/doc/source/building_models.rst
+++ b/doc/source/doc/building_models.rst
@@ -17,7 +17,7 @@ We use some standard examples::
>>> import stormpy.examples
>>> import stormpy.examples.files
-Storm supports the DRN format.
+Storm supports the explicit DRN format.
From this, models can be built directly::
>>> path = stormpy.examples.files.drn_ctmc_dft
diff --git a/doc/source/doc/dfts.rst b/doc/source/doc/dfts.rst
new file mode 100644
index 0000000..30d894a
--- /dev/null
+++ b/doc/source/doc/dfts.rst
@@ -0,0 +1,51 @@
+*******************
+Dynamic Fault Trees
+*******************
+
+
+Building DFTs
+=============
+.. seealso:: `01-dfts.py `_
+
+Dynamic fault trees can be loaded from either the Galileo format or from a custom JSON form.
+A file containing the DFT in the Galileo format can be loaded via ``load_dft_galileo_file(path)``.
+The custom JSON format can be loaded from a file via ``load_dft_json_file(path)`` or directly from a string via ``load_dft_json_string(path)``.
+We start by loading a simple DFT containing an AND gate from JSON::
+
+ >>> import stormpy
+ >>> import stormpy.dft
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+ >>> path_json = stormpy.examples.files.dft_json_and
+ >>> dft_small = stormpy.dft.load_dft_json_file(path_json)
+ >>> print(dft_small)
+ Top level index: 2, Nr BEs2
+
+Next we load a more complex DFT from the Galileo format::
+
+ >>> path_galileo = stormpy.examples.files.dft_galileo_hecs
+ >>> dft = stormpy.dft.load_dft_galileo_file(path_galileo)
+
+After loading the DFT, we can display some common statistics about the model::
+
+ >>> print("DFT with {} elements.".format(dft.nr_elements()))
+ DFT with 23 elements.
+ >>> print("DFT has {} BEs and {} dynamic elements.".format(dft.nr_be(), dft.nr_dynamic()))
+ DFT has 13 BEs and 2 dynamic elements.
+
+
+Analyzing DFTs
+==============
+.. seealso:: `01-dfts.py `_
+
+The next step is to analyze the DFT via ``analyze_dft(dft, formula)``.
+Here we can use all standard properties as described in :ref:`getting-started-building-properties`.
+In our example we compute the `Mean-time-to-failure (MTTF)`::
+
+ >>> formula_str = "T=? [ F \"failed\" ]"
+ >>> formulas = stormpy.parse_properties(formula_str)
+ >>> results = stormpy.dft.analyze_dft(dft, [formulas[0].raw_formula])
+ >>> result = results[0]
+ >>> print("MTTF: {:.2f}".format(result))
+ MTTF: 363.89
+
diff --git a/doc/source/doc/engines.rst b/doc/source/doc/engines.rst
new file mode 100644
index 0000000..f145e5b
--- /dev/null
+++ b/doc/source/doc/engines.rst
@@ -0,0 +1,82 @@
+***************
+Engines
+***************
+
+Background
+=====================
+
+Storm supports different engines for building and checking a model. A detailed comparison of the different engines provided in Storm can be found on the `Storm website `_.
+
+
+Sparse engine
+===============================
+
+In all of the examples so far we used the default sparse engine:
+
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+ >>> prism_program = stormpy.parse_prism_program(stormpy.examples.files.prism_dtmc_die)
+ >>> properties = stormpy.parse_properties_for_prism_program('P=? [F "one"]', prism_program)
+ >>> sparse_model = stormpy.build_sparse_model(prism_program, properties)
+ >>> print(type(sparse_model))
+
+ >>> print("Number of states: {}".format(sparse_model.nr_states))
+ Number of states: 13
+ >>> print("Number of transitions: {}".format(sparse_model.nr_transitions))
+ Number of transitions: 20
+
+The model checking was also done in the sparse engine:
+
+ >>> sparse_result = stormpy.check_model_sparse(sparse_model, properties[0])
+ >>> initial_state = sparse_model.initial_states[0]
+ >>> print(sparse_result.at(initial_state))
+ 0.16666666666666666
+
+
+Symbolic engine
+===============================
+
+Instead of using the sparse engine, one can also use a symbolic representation in terms of `binary decision diagrams (BDDs)`.
+To use the symbolic (dd) engine, we use the symbolic versions for the building and model checking:
+
+ >>> symbolic_model = stormpy.build_symbolic_model(prism_program, properties)
+ >>> print(type(symbolic_model))
+
+ >>> print("Number of states: {}".format(symbolic_model.nr_states))
+ Number of states: 13
+ >>> print("Number of transitions: {}".format(symbolic_model.nr_transitions))
+ Number of transitions: 20
+ >>> symbolic_result = stormpy.check_model_dd(symbolic_model, properties[0])
+ >>> print(symbolic_result)
+ [0, 1] (range)
+
+We can also filter the computed results and only consider the initial states:
+
+ >>> filter = stormpy.create_filter_initial_states_symbolic(symbolic_model)
+ >>> symbolic_result.filter(filter)
+ >>> print(symbolic_result.min)
+ 0.16666650772094727
+
+It is also possible to first build the model symbolically and then transform it into a sparse model:
+
+ >>> print(type(symbolic_model))
+
+ >>> transformed_model = stormpy.transform_to_sparse_model(symbolic_model)
+ >>> print(type(transformed_model))
+
+
+
+Hybrid engine
+===============================
+
+A third possibility is to use the hybrid engine, a combination of sparse and dd engines.
+It first builds the model symbolically.
+The actual model checking is then performed with the engine which is deemed most suitable for the given task.
+
+ >>> print(type(symbolic_model))
+
+ >>> hybrid_result = stormpy.check_model_hybrid(symbolic_model, properties[0])
+ >>> filter = stormpy.create_filter_initial_states_symbolic(symbolic_model)
+ >>> hybrid_result.filter(filter)
+ >>> print(hybrid_result)
+ 0.166667
diff --git a/doc/source/doc/exploration.rst b/doc/source/doc/exploration.rst
new file mode 100644
index 0000000..383d4f2
--- /dev/null
+++ b/doc/source/doc/exploration.rst
@@ -0,0 +1,105 @@
+****************
+Exploring Models
+****************
+
+Background
+=====================
+
+Often, stormpy is used as a testbed for new algorithms.
+An essential step is to transfer the (low-level) descriptions of an MDP or other state-based model into
+an own algorithm. In this section, we discuss some of the functionality.
+
+Reading MDPs
+=====================
+
+.. seealso:: `01-exploration.py `_
+
+In :doc:`../getting_started`, we briefly iterated over a DTMC. In this section, we explore an MDP::
+
+ >>> import doctest
+ >>> doctest.ELLIPSIS_MARKER = '-etc-' # doctest:+ELLIPSIS
+ >>> import stormpy
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+ >>> program = stormpy.parse_prism_program(stormpy.examples.files.prism_mdp_maze)
+ >>> prop = "R=? [F \"goal\"]"
+
+ >>> properties = stormpy.parse_properties_for_prism_program(prop, program, None)
+ >>> model = stormpy.build_model(program, properties)
+
+The iteration over the model is as before, but now, for every action, we can have several transitions::
+
+ >>> for state in model.states:
+ ... if state.id in model.initial_states:
+ ... print("State {} is initial".format(state.id))
+ ... for action in state.actions:
+ ... for transition in action.transitions:
+ ... print("From state {} by action {}, with probability {}, go to state {}".format(state, action, transition.value(), transition.column))
+ -etc-
+
+The output (omitted for brievety) contains sentences like::
+
+ From state 1 by action 0, with probability 1.0, go to state 2
+ From state 1 by action 1, with probability 1.0, go to state 1
+
+
+
+Internally, storm can hold hints to the origin of the actions, which may be helpful to give meaning and for debugging.
+As the availability and the encoding of this data depends on the input model, we discuss these features in :doc:`highlevel_models`.
+
+
+Storm currently supports deterministic rewards on states or actions. More information can be found in :doc:`reward_models`.
+
+
+Reading POMDPs
+======================
+.. seealso:: `02-exploration.py `_
+
+
+Internally, POMDPs extend MDPs. Thus, iterating over the MDP is done as before.
+
+ >>> import stormpy
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+ >>> program = stormpy.parse_prism_program(stormpy.examples.files.prism_pomdp_maze)
+ >>> prop = "R=? [F \"goal\"]"
+ >>> properties = stormpy.parse_properties_for_prism_program(prop, program, None)
+ >>> model = stormpy.build_model(program, properties)
+
+Indeed, all that changed in the code above is the example we use.
+And, that the model type now is a POMDP::
+
+ >>> print(model.model_type)
+ ModelType.POMDP
+
+Additionally, POMDPs have a set of observations, which are internally just numbered by an integer from 0 to the number of observations -1 ::
+
+ >>> print(model.nr_observations)
+ 8
+ >>> for state in model.states:
+ ... print("State {} has observation id {}".format(state.id, model.observations[state.id]))
+ State 0 has observation id 6
+ State 1 has observation id 1
+ State 2 has observation id 4
+ State 3 has observation id 7
+ State 4 has observation id 4
+ State 5 has observation id 3
+ State 6 has observation id 0
+ State 7 has observation id 0
+ State 8 has observation id 0
+ State 9 has observation id 0
+ State 10 has observation id 0
+ State 11 has observation id 0
+ State 12 has observation id 2
+ State 13 has observation id 2
+ State 14 has observation id 4
+ State 15 has observation id 5
+
+
+
+
+
+Reading MAs
+======================
+
+To be continued...
\ No newline at end of file
diff --git a/doc/source/doc/parametric_models.rst b/doc/source/doc/parametric_models.rst
new file mode 100644
index 0000000..d16b597
--- /dev/null
+++ b/doc/source/doc/parametric_models.rst
@@ -0,0 +1,60 @@
+*****************
+Parametric Models
+*****************
+
+
+
+Instantiating parametric models
+===============================
+.. seealso:: `01-parametric-models.py `_
+
+Input formats such as prism allow to specify programs with open constants. We refer to these open constants as parameters.
+If the constants only influence the probabilities or rates, but not the topology of the underlying model, we can build these models as parametric models::
+
+ >>> import stormpy
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+ >>> path = stormpy.examples.files.prism_dtmc_die
+ >>> prism_program = stormpy.parse_prism_program(path)
+ >>> formula_str = "P=? [F s=7 & d=2]"
+ >>> properties = stormpy.parse_properties_for_prism_program(formula_str, prism_program)
+ >>> model = stormpy.build_parametric_model(prism_program, properties)
+ >>> parameters = model.collect_probability_parameters()
+ >>> for x in parameters:
+ ... print(x)
+
+In order to obtain a standard DTMC, MDP or other Markov model, we need to instantiate these models by means of a model instantiator::
+
+ >>> import stormpy.pars
+ >>> instantiator = stormpy.pars.PDtmcInstantiator(model)
+
+Before we obtain an instantiated model, we need to map parameters to values: We build such a dictionary as follows::
+
+ >>> point = dict()
+ >>> for x in parameters:
+ ... print(x.name)
+ ... point[x] = 0.4
+ >>> instantiated_model = instantiator.instantiate(point)
+ >>> result = stormpy.model_checking(instantiated_model, properties[0])
+
+Initial states and labels are set as for the parameter-free case.
+
+
+Checking parametric models
+==========================
+.. seealso:: `02-parametric-models.py `_
+
+It is also possible to check the parametric model directly, similar as before in :ref:`getting-started-checking-properties`::
+
+ >>> result = stormpy.model_checking(model, properties[0])
+ >>> initial_state = model.initial_states[0]
+ >>> func = result.at(initial_state)
+
+We collect the constraints ensuring that underlying model is well-formed and the graph structure does not change.
+
+ >>> collector = stormpy.ConstraintCollector(model)
+ >>> for formula in collector.wellformed_constraints:
+ ... print(formula)
+ >>> for formula in collector.graph_preserving_constraints:
+ ... print(formula)
+
diff --git a/doc/source/reward_models.rst b/doc/source/doc/reward_models.rst
similarity index 92%
rename from doc/source/reward_models.rst
rename to doc/source/doc/reward_models.rst
index 8f12b20..e2b1109 100644
--- a/doc/source/reward_models.rst
+++ b/doc/source/doc/reward_models.rst
@@ -2,7 +2,7 @@
Reward Models
**************
-In :doc:`getting_started`, we mainly looked at probabilities in the Markov models and properties that refer to these probabilities.
+In :doc:`../getting_started`, we mainly looked at probabilities in the Markov models and properties that refer to these probabilities.
In this section, we discuss reward models.
Exploring reward models
@@ -29,7 +29,7 @@ We can do model checking analogous to probabilities::
>>> initial_state = model.initial_states[0]
>>> result = stormpy.model_checking(model, properties[0])
>>> print("Result: {}".format(result.at(initial_state)))
- Result: 3.6666666666666665
+ Result: 3.666666666666667
The reward model has a name which we can obtain as follows::
diff --git a/doc/source/doc/schedulers.rst b/doc/source/doc/schedulers.rst
new file mode 100644
index 0000000..00b03bb
--- /dev/null
+++ b/doc/source/doc/schedulers.rst
@@ -0,0 +1,99 @@
+***********************
+Working with Schedulers
+***********************
+
+In non-deterministic models the notion of a scheduler (or policy) is important.
+The scheduler determines which action to take at each state.
+
+For a given reachability property, Storm can return the scheduler realizing the resulting probability.
+
+Examining Schedulers for MDPs
+=============================
+
+.. seealso:: `01-schedulers.py `_
+
+As in :doc:`../getting_started`, we import some required modules and build a model from the example files::
+
+ >>> import stormpy
+ >>> import stormpy.core
+ >>> import stormpy.examples
+ >>> import stormpy.examples.files
+
+ >>> path = stormpy.examples.files.prism_mdp_coin_2_2
+ >>> formula_str = "Pmin=? [F \"finished\" & \"all_coins_equal_1\"]"
+ >>> program = stormpy.parse_prism_program(path)
+ >>> formulas = stormpy.parse_properties_for_prism_program(formula_str, program)
+ >>> model = stormpy.build_model(program, formulas)
+
+Next we check the model and make sure to extract the scheduler:
+
+ >>> result = stormpy.model_checking(model, formulas[0], extract_scheduler=True)
+
+The result then contains the scheduler we want:
+
+ >>> assert result.has_scheduler
+ >>> scheduler = result.scheduler
+ >>> assert scheduler.memoryless
+ >>> assert scheduler.deterministic
+ >>> print(scheduler)
+ ___________________________________________________________________
+ Fully defined memoryless deterministic scheduler:
+ model state: choice(s)
+ 0 0
+ 1 0
+ 2 1
+ 3 0
+ -etc-
+
+To get the information which action the scheduler chooses in which state, we can simply iterate over the states:
+
+ >>> for state in model.states:
+ ... choice = scheduler.get_choice(state)
+ ... action = choice.get_deterministic_choice()
+ ... print("In state {} choose action {}".format(state, action))
+ In state 0 choose action 0
+ In state 1 choose action 0
+ In state 2 choose action 1
+ In state 3 choose action 0
+ In state 4 choose action 0
+ In state 5 choose action 0
+ -etc-
+
+
+Examining Schedulers for Markov automata
+========================================
+
+.. seealso:: `02-schedulers.py `_
+
+Currently there is no support yet for scheduler extraction on MAs.
+However, if the timing information is not relevant for the property, we can circumvent this lack by first transforming the MA to an MDP.
+
+We build the model as before:
+
+ >>> path = stormpy.examples.files.prism_ma_simple
+ >>> formula_str = "Tmin=? [ F s=4 ]"
+
+ >>> program = stormpy.parse_prism_program(path, False, True)
+ >>> formulas = stormpy.parse_properties_for_prism_program(formula_str, program)
+ >>> ma = stormpy.build_model(program, formulas)
+
+Next we transform the continuous-time model into a discrete-time model.
+Note that all timing information is lost at this point.
+
+ >>> mdp, mdp_formulas = stormpy.transform_to_discrete_time_model(ma, formulas)
+ >>> assert mdp.model_type == stormpy.ModelType.MDP
+
+After the transformation we have obtained an MDP where we can extract the scheduler as shown before:
+
+ >>> result = stormpy.model_checking(mdp, mdp_formulas[0], extract_scheduler=True)
+ >>> scheduler = result.scheduler
+ >>> print(scheduler)
+ ___________________________________________________________________
+ Fully defined memoryless deterministic scheduler:
+ model state: choice(s)
+ 0 1
+ 1 0
+ 2 0
+ 3 0
+ 4 0
+ -etc-
diff --git a/doc/source/shortest_paths.rst b/doc/source/doc/shortest_paths.rst
similarity index 92%
rename from doc/source/shortest_paths.rst
rename to doc/source/doc/shortest_paths.rst
index 95e687e..6001a5d 100644
--- a/doc/source/shortest_paths.rst
+++ b/doc/source/doc/shortest_paths.rst
@@ -18,9 +18,9 @@ It is crucial to note that *any* path is eligible, including those that (repeate
Examining Shortest Paths
========================
-.. seealso:: `07-shortest-paths.py `_
+.. seealso:: `01-shortest-paths.py `_
-As in :doc:`getting_started`, we import some required modules and build a model from the example files::
+As in :doc:`../getting_started`, we import some required modules and build a model from the example files::
>>> import stormpy.examples
>>> import stormpy.examples.files
@@ -29,7 +29,7 @@ As in :doc:`getting_started`, we import some required modules and build a model
>>> model = stormpy.build_model(prism_program)
-We also import the `ShortestPathsGenerator`::
+We also import the ``ShortestPathsGenerator``::
>>> from stormpy.utility import ShortestPathsGenerator
@@ -40,7 +40,7 @@ and choose a target state (by its ID) to which we want to compute the shortest p
It is also possible to specify a set of target states (as a list, e.g., ``[8, 10, 11]``) or a label in the model if applicable (e.g., ``"observe0Greater1"``).
For simplicity, we will stick to using a single state for now.
-We initialize a `ShortestPathsGenerator` instance::
+We initialize a ``ShortestPathsGenerator`` instance::
>>> spg = ShortestPathsGenerator(model, state_id)
diff --git a/doc/source/getting_started.rst b/doc/source/getting_started.rst
index b577e11..e35acc1 100644
--- a/doc/source/getting_started.rst
+++ b/doc/source/getting_started.rst
@@ -11,15 +11,17 @@ This guide is intended for people which have a basic understanding of probabilis
`Storm website `_.
While we assume some very basic programming concepts, we refrain from using more advanced concepts of python throughout the guide.
-We start with a selection of high-level constructs in stormpy, and go into more details afterwards. More in-depth examples can be found in the :doc:`advanced_examples`.
+We start with a selection of high-level constructs in stormpy, and go into more details afterwards. More in-depth examples can be found in the :doc:`advanced_topics`.
.. seealso:: The code examples are also given in the `examples/ `_ folder. These boxes throughout the text will tell you which example contains the code discussed.
-In order to do this, we import stormpy::
+We start by launching the python 3 interpreter::
+
+ $ python3
+
+First we import stormpy::
>>> import stormpy
- >>> import stormpy.core
-
Building models
------------------------------------------------
@@ -39,7 +41,7 @@ With this, we can now import the path of our prism file::
>>> path = stormpy.examples.files.prism_dtmc_die
>>> prism_program = stormpy.parse_prism_program(path)
-The `prism_program` can be translated into Markov chains::
+The ``prism_program`` can be translated into a Markov chain::
>>> model = stormpy.build_model(prism_program)
>>> print("Number of states: {}".format(model.nr_states))
@@ -54,15 +56,16 @@ Moreover, initial states and deadlocks are indicated with a labelling function.
>>> print("Labels: {}".format(model.labeling.get_labels()))
Labels: ...
-We will investigate ways to examine the model in more detail in :ref:`getting-started-investigating-the-model`.
+We will investigate ways to examine the model in more detail later in :ref:`getting-started-investigating-the-model`.
+.. _getting-started-building-properties:
Building properties
--------------------------
.. seealso:: `02-getting-started.py `_
Storm takes properties in the prism-property format.
-To express that one is interested in the reachability of any state where the prism program variable s is 2, one would formulate::
+To express that one is interested in the reachability of any state where the prism program variable ``s`` is 2, one would formulate::
P=? [F s=2]
@@ -73,7 +76,7 @@ Stormpy can be used to parse this. As the variables in the property refer to a p
Notice that properties is now a list of properties containing a single element.
-However, if we build the model as before, then the appropriate information that the variable s=2 in some states is not present.
+However, if we build the model as before, then the appropriate information that the variable ``s=2`` in some states is not present.
In order to label the states accordingly, we should notify Storm upon building the model that we would like to preserve given properties.
Storm will then add the labels accordingly::
@@ -82,7 +85,7 @@ Storm will then add the labels accordingly::
Labels in the model: ['(s = 2)', 'deadlock', 'init']
Model building however now behaves slightly different: Only the properties passed are preserved, which means that model building might skip parts of the model.
-In particular, to check the probability of eventually reaching a state x where s=2, successor states of x are not relevant::
+In particular, to check the probability of eventually reaching a state ``x`` where ``s=2``, successor states of ``x`` are not relevant::
>>> print("Number of states: {}".format(model.nr_states))
Number of states: 8
@@ -91,7 +94,7 @@ If we consider another property, however, such as::
P=? [F s=7 & d=2]
-then Storm is only skipping exploration of successors of the particular state y where s=7 and d=2. In this model, state y has a self-loop, so effectively, the whole model is explored.
+then Storm is only skipping exploration of successors of the particular state ``y`` where ``s=7`` and ``d=2``. In this model, state ``y`` has a self-loop, so effectively, the whole model is explored.
.. _getting-started-checking-properties:
@@ -124,58 +127,11 @@ A good way to get the result for the initial states is as follows::
>>> print(result.at(initial_state))
0.5
-Instantiating parametric models
-------------------------------------
-.. seealso:: `04-getting-started.py `_
-
-Input formats such as prism allow to specify programs with open constants. We refer to these open constants as parameters.
-If the constants only influence the probabilities or rates, but not the topology of the underlying model, we can build these models as parametric models::
-
- >>> model = stormpy.build_parametric_model(prism_program, properties)
- >>> parameters = model.collect_probability_parameters()
- >>> for x in parameters:
- ... print(x)
-
-In order to obtain a standard DTMC, MDP or other Markov model, we need to instantiate these models by means of a model instantiator::
-
- >>> import stormpy.pars
- >>> instantiator = stormpy.pars.PDtmcInstantiator(model)
-
-Before we obtain an instantiated model, we need to map parameters to values: We build such a dictionary as follows::
-
- >>> point = dict()
- >>> for x in parameters:
- ... print(x.name)
- ... point[x] = 0.4
- >>> instantiated_model = instantiator.instantiate(point)
- >>> result = stormpy.model_checking(instantiated_model, properties[0])
-
-Initial states and labels are set as for the parameter-free case.
-
-
-Checking parametric models
-------------------------------------
-.. seealso:: `05-getting-started.py `_
-
-It is also possible to check the parametric model directly, similar as before in :ref:`getting-started-checking-properties`::
-
- >>> result = stormpy.model_checking(model, properties[0])
- >>> initial_state = model.initial_states[0]
- >>> func = result.at(initial_state)
-
-We collect the constraints ensuring that underlying model is well-formed and the graph structure does not change.
-
- >>> collector = stormpy.ConstraintCollector(model)
- >>> for formula in collector.wellformed_constraints:
- ... print(formula)
- >>> for formula in collector.graph_preserving_constraints:
- ... print(formula)
-
.. _getting-started-investigating-the-model:
Investigating the model
-------------------------------------
-.. seealso:: `06-getting-started.py `_
+.. seealso:: `04-getting-started.py `_
One powerful part of the Storm model checker is to quickly create the Markov chain from higher-order descriptions, as seen above::
@@ -224,7 +180,7 @@ Thus::
... assert len(state.actions) <= 1
-We can also check if a state is indeed an initial state. Notice that model.initial_states contains state ids, not states.::
+We can also check if a state is indeed an initial state. Notice that ``model.initial_states`` contains state ids, not states.::
>>> for state in model.states:
... if state.id in model.initial_states:
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 76040e8..74a137d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -19,16 +19,8 @@ Stormpy is a set of python bindings for the probabilistic model checker `Storm <
advanced_topics
contributors
-
-Stormpy API Reference
-====================================
-.. toctree::
- :maxdepth: 2
- :caption: Modules:
-
- code_stormpy_core
- code_stormpy_logic
- code_stormpy_storage
+
+.. include:: api.rst
Indices and tables
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
index 69d2915..80ce6c7 100644
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -7,28 +7,46 @@ Requirements
Before installing stormpy, make sure
-- `pycarl `_
-- `Storm `_
+- Python 3 is available on your system. Stormpy does not work with python 2.
+- `pycarl `_ is available.
+- `Storm `_ is available on your system.
-are both available on your system. To avoid issues, we suggest that both use the same version of `carl `_.
+To avoid issues, we suggest that both pycarl and Storm use the same version of `carl `_.
The simplest way of ensuring this is to first install carl as explained in the `Storm installation guide `_.
You can then install Storm and pycarl independently.
-.. topic:: Virtual Environments
-
- Virtual environments create isolated environments for your projects. This helps to keep your system clean, work with different versions of packages and different versions of python. While it is not required, we recommend the use of
- such virtual environments. To get you started, we recommend `this guide `_ or `this primer `_.
-
Installation Steps
====================
+Virtual Environments
+--------------------
+
+Virtual environments create isolated environments for your projects.
+This helps to keep your system clean, work with different versions of packages and different version of python.
+While it is not required, we recommend the use of such virtual environments. To get you started, we recommend
+`this guide `_ or
+`this primer `_.
+
+In short you can create a virtual environment ``env`` with::
+
+ $ pip install virtualenv
+ $ virtualenv -p python3 env
+ $ source env/bin/activate
+
+The last step activates the virtual environment.
+Whenever using the environment the console prompt is prefixed with ``(env)``.
+
+
+Building stormpy
+----------------
+
Clone stormpy into any suitable location::
$ git clone https://github.com/moves-rwth/stormpy.git
$ cd stormpy
-Here, build stormpy in develop mode using your favourite python distribution way of installing: e.g.::
+Build stormpy in develop mode using your favourite python distribution way of installing: e.g.::
$ python3 setup.py develop
@@ -37,17 +55,45 @@ or::
$ pip install -ve .
-.. topic:: Specifying which Storm library to use
+Optional build arguments
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+The build step ``build_ext`` also takes optional arguments for a more advanced configuration of stormpy.
+
+* *Specifying which Storm library to use*
+
+ If you have multiple versions of Storm or cmake is not able to find your Storm version,
+ you can specify the ``--storm-dir YOUR-PATH-TO-STORM`` flag::
- If you have multiple versions of Storm or cmake is not able to find your Storm version,
- you can specify the `--storm-dir YOUR-PATH-TO-STORM` flag in the build_ext step::
-
$ python3 setup.py build_ext --storm-dir YOUR-PATH-TO-STORM develop
-
+
+* *Disabling functionality*
+
+ If you want to disable certain functionality in stormpy from being built you can use the following flags:
+
+ * ``--disable-dft`` to disable support for dynamic fault trees (DFTs)
+ * ``--disable-pars`` to disable support for parametric models
+
+* *Building stormpy in debug mode*
+
+ If you want to build stormpy in debug mode you can add the ``--debug`` flag::
+
+ $ python3 setup.py build_ext --debug develop
+
+* *Setting number of build threads*
+
+ The build of stormpy uses all available cores per default.
+ If you want to configure the number of threads manually you can specify the ``--jobs`` (or ``-j``) flag::
+
+ $ python3 setup.py build_ext --jobs 2 develop
+
+
+Testing stormpy installation
+----------------------------
+
After building, you can run the test files by::
py.test tests/
-If tests pass, you can continue with our :doc:`getting_started`.
-
-
+If the tests pass, you can now use stormpy.
+To get started, continue with our :doc:`getting_started`, consult the test files in ``tests/`` or the :doc:`api` (work in progress).
diff --git a/examples/04-getting-started.py b/examples/04-getting-started.py
index a5c0d60..e1fcd66 100644
--- a/examples/04-getting-started.py
+++ b/examples/04-getting-started.py
@@ -1,40 +1,27 @@
import stormpy
import stormpy.core
-import pycarl
-import pycarl.core
-
import stormpy.examples
import stormpy.examples.files
-import stormpy._config as config
-
def example_getting_started_04():
- # Check support for parameters
- if not config.storm_with_pars:
- print("Support parameters is missing. Try building storm-pars.")
- return
-
- import stormpy.pars
- path = stormpy.examples.files.prism_pdtmc_die
+ path = stormpy.examples.files.prism_dtmc_die
prism_program = stormpy.parse_prism_program(path)
formula_str = "P=? [F s=7 & d=2]"
properties = stormpy.parse_properties_for_prism_program(formula_str, prism_program)
- model = stormpy.build_parametric_model(prism_program, properties)
- print("Model supports parameters: {}".format(model.supports_parameters))
- parameters = model.collect_probability_parameters()
- assert len(parameters) == 2
-
- instantiator = stormpy.pars.PDtmcInstantiator(model)
- point = dict()
- for x in parameters:
- print(x.name)
- point[x] = stormpy.RationalRF(0.4)
- instantiated_model = instantiator.instantiate(point)
- result = stormpy.model_checking(instantiated_model, properties[0])
- print(result)
+ model = stormpy.build_model(prism_program, properties)
+
+ print(model.model_type)
+
+ for state in model.states:
+ if state.id in model.initial_states:
+ print(state)
+ for action in state.actions:
+ for transition in action.transitions:
+ print("From state {}, with probability {}, go to state {}".format(state, transition.value(),
+ transition.column))
if __name__ == '__main__':
diff --git a/examples/06-getting-started.py b/examples/06-getting-started.py
deleted file mode 100644
index e8d1378..0000000
--- a/examples/06-getting-started.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import stormpy
-import stormpy.core
-
-import stormpy.examples
-import stormpy.examples.files
-
-def example_getting_started_06():
- path = stormpy.examples.files.prism_dtmc_die
- prism_program = stormpy.parse_prism_program(path)
-
- formula_str = "P=? [F s=7 & d=2]"
- properties = stormpy.parse_properties_for_prism_program(formula_str, prism_program)
- model = stormpy.build_model(prism_program, properties)
-
- print(model.model_type)
-
- for state in model.states:
- if state.id in model.initial_states:
- print(state)
- for action in state.actions:
- for transition in action.transitions:
- print("From state {}, with probability {}, go to state {}".format(state, transition.value(), transition.column))
-
-
-if __name__ == '__main__':
- example_getting_started_06()
\ No newline at end of file
diff --git a/examples/dfts/01-dfts.py b/examples/dfts/01-dfts.py
new file mode 100644
index 0000000..160c975
--- /dev/null
+++ b/examples/dfts/01-dfts.py
@@ -0,0 +1,29 @@
+import stormpy
+import stormpy.dft
+
+import stormpy.examples
+import stormpy.examples.files
+
+
+def example_dfts_01():
+ # Load from JSON
+ path_json = stormpy.examples.files.dft_json_and
+ dft_small = stormpy.dft.load_dft_json_file(path_json)
+ print(dft_small)
+
+ # Load from Galileo
+ path = stormpy.examples.files.dft_galileo_hecs
+ dft = stormpy.dft.load_dft_galileo_file(path)
+ print("DFT with {} elements.".format(dft.nr_elements()))
+ print("DFT has {} BEs and {} dynamic elements.".format(dft.nr_be(), dft.nr_dynamic()))
+
+ # Analyze
+ formula_str = "T=? [ F \"failed\" ]"
+ formulas = stormpy.parse_properties(formula_str)
+ results = stormpy.dft.analyze_dft(dft, [formulas[0].raw_formula])
+ result = results[0]
+ print(result)
+
+
+if __name__ == '__main__':
+ example_dfts_01()
diff --git a/examples/exploration/01-exploration.py b/examples/exploration/01-exploration.py
new file mode 100644
index 0000000..fd22d2e
--- /dev/null
+++ b/examples/exploration/01-exploration.py
@@ -0,0 +1,30 @@
+import stormpy
+import stormpy.core
+
+import stormpy.examples
+import stormpy.examples.files
+
+
+def example_exploration_01():
+ """
+ Example to exploration of MDPs.
+ :return:
+ """
+ program = stormpy.parse_prism_program(stormpy.examples.files.prism_pomdp_maze)
+ prop = "R=? [F \"goal\"]"
+ properties = stormpy.parse_properties_for_prism_program(prop, program, None)
+ model = stormpy.build_model(program, properties)
+ print(model.model_type)
+
+
+ for state in model.states:
+ if state.id in model.initial_states:
+ print(state)
+ for action in state.actions:
+ for transition in action.transitions:
+ print("From state {} by action {}, with probability {}, go to state {}".format(state, action, transition.value(),
+ transition.column))
+
+
+if __name__ == '__main__':
+ example_exploration_01()
\ No newline at end of file
diff --git a/examples/exploration/02-exploration.py b/examples/exploration/02-exploration.py
new file mode 100644
index 0000000..951a29a
--- /dev/null
+++ b/examples/exploration/02-exploration.py
@@ -0,0 +1,38 @@
+import stormpy
+import stormpy.core
+
+import stormpy.examples
+import stormpy.examples.files
+
+
+def example_exploration_02():
+ """
+ Example to exploration of POMDPs.
+ :return:
+ """
+ program = stormpy.parse_prism_program(stormpy.examples.files.prism_pomdp_maze)
+ prop = "R=? [F \"goal\"]"
+ properties = stormpy.parse_properties_for_prism_program(prop, program, None)
+ model = stormpy.build_model(program, properties)
+ print(model.model_type)
+ # Internally, POMDPs are just MDPs with additional observation information.
+ # Thus, data structure exploration for MDPs can be applied as before.
+ initial_state = model.initial_states[0]
+
+ for state in model.states:
+ if state.id in model.initial_states:
+ print(state)
+ for action in state.actions:
+ for transition in action.transitions:
+ print("From state {} by action {}, with probability {}, go to state {}".format(state, action, transition.value(),
+ transition.column))
+
+ print(model.nr_observations)
+
+ for state in model.states:
+ print("State {} has observation id {}".format(state.id, model.observations[state.id]))
+
+
+
+if __name__ == '__main__':
+ example_exploration_02()
\ No newline at end of file
diff --git a/examples/highlevel_models/01-highlevel-models.py b/examples/highlevel_models/01-highlevel-models.py
new file mode 100644
index 0000000..46e2f41
--- /dev/null
+++ b/examples/highlevel_models/01-highlevel-models.py
@@ -0,0 +1,17 @@
+import stormpy
+import stormpy.core
+
+import stormpy.examples
+import stormpy.examples.files
+
+
+def example_highlevel_models():
+ path = stormpy.examples.files.prism_pdtmc_die
+ prism_program = stormpy.parse_prism_program(path)
+ for c in prism_program.constants:
+ print("constant {} with type {} is {} defined".format(c.name, c.type, "" if c.defined else "not"))
+
+
+
+if __name__ == '__main__':
+ example_highlevel_models()
\ No newline at end of file
diff --git a/examples/parametric_models/01-parametric-models.py b/examples/parametric_models/01-parametric-models.py
new file mode 100644
index 0000000..30c2d4e
--- /dev/null
+++ b/examples/parametric_models/01-parametric-models.py
@@ -0,0 +1,41 @@
+import stormpy
+import stormpy.core
+
+import pycarl
+import pycarl.core
+
+import stormpy.examples
+import stormpy.examples.files
+
+import stormpy._config as config
+
+
+def example_parametric_models_01():
+ # Check support for parameters
+ if not config.storm_with_pars:
+ print("Support parameters is missing. Try building storm-pars.")
+ return
+
+ import stormpy.pars
+ path = stormpy.examples.files.prism_pdtmc_die
+ prism_program = stormpy.parse_prism_program(path)
+
+ formula_str = "P=? [F s=7 & d=2]"
+ properties = stormpy.parse_properties_for_prism_program(formula_str, prism_program)
+ model = stormpy.build_parametric_model(prism_program, properties)
+ print("Model supports parameters: {}".format(model.supports_parameters))
+ parameters = model.collect_probability_parameters()
+ assert len(parameters) == 2
+
+ instantiator = stormpy.pars.PDtmcInstantiator(model)
+ point = dict()
+ for x in parameters:
+ print(x.name)
+ point[x] = stormpy.RationalRF(0.4)
+ instantiated_model = instantiator.instantiate(point)
+ result = stormpy.model_checking(instantiated_model, properties[0])
+ print(result)
+
+
+if __name__ == '__main__':
+ example_parametric_models_01()
diff --git a/examples/05-getting-started.py b/examples/parametric_models/02-parametric-models.py
similarity index 95%
rename from examples/05-getting-started.py
rename to examples/parametric_models/02-parametric-models.py
index 29403f6..d273c0b 100644
--- a/examples/05-getting-started.py
+++ b/examples/parametric_models/02-parametric-models.py
@@ -11,7 +11,7 @@ import stormpy.examples.files
import stormpy._config as config
-def example_getting_started_05():
+def example_parametric_models_02():
# Check support for parameters
if not config.storm_with_pars:
print("Support parameters is missing. Try building storm-pars.")
@@ -45,4 +45,4 @@ def example_getting_started_05():
if __name__ == '__main__':
- example_getting_started_05()
+ example_parametric_models_02()
diff --git a/examples/reward_models.py b/examples/reward_models.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/schedulers/01-schedulers.py b/examples/schedulers/01-schedulers.py
index 6a0b2bd..3cc40bc 100644
--- a/examples/schedulers/01-schedulers.py
+++ b/examples/schedulers/01-schedulers.py
@@ -7,7 +7,6 @@ import stormpy.examples.files
def example_schedulers_01():
path = stormpy.examples.files.prism_mdp_coin_2_2
-
formula_str = "Pmin=? [F \"finished\" & \"all_coins_equal_1\"]"
program = stormpy.parse_prism_program(path)
@@ -15,15 +14,17 @@ def example_schedulers_01():
model = stormpy.build_model(program, formulas)
initial_state = model.initial_states[0]
assert initial_state == 0
- result = stormpy.model_checking(model, formulas[0], extract_scheduler = True)
+ result = stormpy.model_checking(model, formulas[0], extract_scheduler=True)
assert result.has_scheduler
- print(result.scheduler)
- assert result.scheduler.memoryless
- assert result.scheduler.deterministic
-
- for i in range(0,model.nr_states):
- print("In state {} choose action {}".format(i,result.scheduler.get_choice(i).get_deterministic_choice()))
-
+ scheduler = result.scheduler
+ print(scheduler)
+ assert scheduler.memoryless
+ assert scheduler.deterministic
+
+ for state in model.states:
+ choice = scheduler.get_choice(state)
+ action = choice.get_deterministic_choice()
+ print("In state {} choose action {}".format(state, action))
if __name__ == '__main__':
diff --git a/examples/schedulers/02-schedulers.py b/examples/schedulers/02-schedulers.py
new file mode 100644
index 0000000..9ab79a7
--- /dev/null
+++ b/examples/schedulers/02-schedulers.py
@@ -0,0 +1,37 @@
+import stormpy
+import stormpy.core
+
+import stormpy.examples
+import stormpy.examples.files
+
+
+def example_schedulers_02():
+ path = stormpy.examples.files.prism_ma_simple
+ formula_str = "Tmin=? [ F s=4 ]"
+
+ program = stormpy.parse_prism_program(path, False, True)
+ formulas = stormpy.parse_properties_for_prism_program(formula_str, program)
+ ma = stormpy.build_model(program, formulas)
+ assert ma.model_type == stormpy.ModelType.MA
+
+ # Convert MA to MDP
+ mdp, mdp_formulas = stormpy.transform_to_discrete_time_model(ma, formulas)
+ assert mdp.model_type == stormpy.ModelType.MDP
+ initial_state = mdp.initial_states[0]
+ assert initial_state == 0
+
+ result = stormpy.model_checking(mdp, mdp_formulas[0], extract_scheduler=True)
+ assert result.has_scheduler
+ scheduler = result.scheduler
+ print(scheduler)
+ assert scheduler.memoryless
+ assert scheduler.deterministic
+
+ for state in mdp.states:
+ choice = scheduler.get_choice(state)
+ action = choice.get_deterministic_choice()
+ print("In state {} choose action {}".format(state, action))
+
+
+if __name__ == '__main__':
+ example_schedulers_02()
diff --git a/lib/.gitignore b/lib/.gitignore
deleted file mode 100644
index a34a658..0000000
--- a/lib/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.so
-__pycache__/
-stormpy.egg-info/
-**/_config.py
diff --git a/lib/stormpy/__init__.py b/lib/stormpy/__init__.py
index f1ee516..368fbcb 100644
--- a/lib/stormpy/__init__.py
+++ b/lib/stormpy/__init__.py
@@ -1,3 +1,8 @@
+import sys
+
+if sys.version_info[0] == 2:
+ raise ImportError('Python 2.x is not supported for stormpy.')
+
from . import core
from .core import *
from . import storage
@@ -10,7 +15,7 @@ from pycarl import Variable # needed for building parametric models
__version__ = "unknown"
try:
- from _version import __version__
+ from ._version import __version__
except ImportError:
# We're running in a tree that doesn't have a _version.py, so we don't know what our version is.
pass
@@ -18,108 +23,192 @@ except ImportError:
core._set_up("")
+def _convert_sparse_model(model, parametric=False):
+ """
+ Convert (parametric) model in sparse representation into model corresponding to exact model type.
+ :param model: Sparse model.
+ :param parametric: Flag indicating if the model is parametric.
+ :return: Model corresponding to exact model type.
+ """
+ if parametric:
+ assert model.supports_parameters
+ if model.model_type == ModelType.DTMC:
+ return model._as_sparse_pdtmc()
+ elif model.model_type == ModelType.MDP:
+ return model._as_sparse_pmdp()
+ elif model.model_type == ModelType.CTMC:
+ return model._as_sparse_pctmc()
+ elif model.model_type == ModelType.MA:
+ return model._as_sparse_pma()
+ else:
+ raise StormError("Not supported parametric model constructed")
+ else:
+ assert not model.supports_parameters
+ if model.model_type == ModelType.DTMC:
+ return model._as_sparse_dtmc()
+ elif model.model_type == ModelType.MDP:
+ return model._as_sparse_mdp()
+ elif model.model_type == ModelType.POMDP:
+ return model._as_sparse_pomdp()
+ elif model.model_type == ModelType.CTMC:
+ return model._as_sparse_ctmc()
+ elif model.model_type == ModelType.MA:
+ return model._as_sparse_ma()
+ else:
+ raise StormError("Not supported non-parametric model constructed")
+
+
+def _convert_symbolic_model(model, parametric=False):
+ """
+ Convert (parametric) model in symbolic representation into model corresponding to exact model type.
+ :param model: Symbolic model.
+ :param parametric: Flag indicating if the model is parametric.
+ :return: Model corresponding to exact model type.
+ """
+ if parametric:
+ assert model.supports_parameters
+ if model.model_type == ModelType.DTMC:
+ return model._as_symbolic_pdtmc()
+ elif model.model_type == ModelType.MDP:
+ return model._as_symbolic_pmdp()
+ elif model.model_type == ModelType.CTMC:
+ return model._as_symbolic_pctmc()
+ elif model.model_type == ModelType.MA:
+ return model._as_symbolic_pma()
+ else:
+ raise StormError("Not supported parametric model constructed")
+ else:
+ assert not model.supports_parameters
+ if model.model_type == ModelType.DTMC:
+ return model._as_symbolic_dtmc()
+ elif model.model_type == ModelType.MDP:
+ return model._as_symbolic_mdp()
+ elif model.model_type == ModelType.CTMC:
+ return model._as_symbolic_ctmc()
+ elif model.model_type == ModelType.MA:
+ return model._as_symbolic_ma()
+ else:
+ raise StormError("Not supported non-parametric model constructed")
+
+
def build_model(symbolic_description, properties=None):
"""
- Build a model from a symbolic description.
+ Build a model in sparse representation from a symbolic description.
+
+ :param symbolic_description: Symbolic model description to translate into a model.
+ :param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
+ :return: Model in sparse representation.
+ """
+ return build_sparse_model(symbolic_description, properties=properties)
+
+
+def build_parametric_model(symbolic_description, properties=None):
+ """
+ Build a parametric model in sparse representation from a symbolic description.
+
+ :param symbolic_description: Symbolic model description to translate into a model.
+ :param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
+ :return: Parametric model in sparse representation.
+ """
+ return build_sparse_parametric_model(symbolic_description, properties=properties)
+
+
+def build_sparse_model(symbolic_description, properties=None):
+ """
+ Build a model in sparse representation from a symbolic description.
:param symbolic_description: Symbolic model description to translate into a model.
:param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
:return: Model in sparse representation.
- :rtype: SparseDtmc or SparseMdp
"""
if not symbolic_description.undefined_constants_are_graph_preserving:
raise StormError("Program still contains undefined constants")
if properties:
- formulae = [prop.raw_formula for prop in properties]
- intermediate = core._build_sparse_model_from_prism_program(symbolic_description, formulae)
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ intermediate = core._build_sparse_model_from_symbolic_description(symbolic_description, formulae)
else:
- intermediate = core._build_sparse_model_from_prism_program(symbolic_description)
- assert not intermediate.supports_parameters
- if intermediate.model_type == ModelType.DTMC:
- return intermediate._as_dtmc()
- elif intermediate.model_type == ModelType.MDP:
- return intermediate._as_mdp()
- elif intermediate.model_type == ModelType.CTMC:
- return intermediate._as_ctmc()
- elif intermediate.model_type == ModelType.MA:
- return intermediate._as_ma()
- else:
- raise StormError("Not supported non-parametric model constructed")
+ intermediate = core._build_sparse_model_from_symbolic_description(symbolic_description)
+ return _convert_sparse_model(intermediate, parametric=False)
-def build_parametric_model(symbolic_description, properties=None):
+def build_sparse_parametric_model(symbolic_description, properties=None):
"""
- Build a parametric model from a symbolic description.
+ Build a parametric model in sparse representation from a symbolic description.
:param symbolic_description: Symbolic model description to translate into a model.
:param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
:return: Parametric model in sparse representation.
- :rtype: SparseParametricDtmc or SparseParametricMdp
"""
if not symbolic_description.undefined_constants_are_graph_preserving:
raise StormError("Program still contains undefined constants")
if properties:
- formulae = [prop.raw_formula for prop in properties]
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ intermediate = core._build_sparse_parametric_model_from_symbolic_description(symbolic_description, formulae)
+ else:
+ intermediate = core._build_sparse_parametric_model_from_symbolic_description(symbolic_description)
+ return _convert_sparse_model(intermediate, parametric=True)
+
+
+def build_symbolic_model(symbolic_description, properties=None):
+ """
+ Build a model in symbolic representation from a symbolic description.
+
+ :param symbolic_description: Symbolic model description to translate into a model.
+ :param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
+ :return: Model in symbolic representation.
+ """
+ if not symbolic_description.undefined_constants_are_graph_preserving:
+ raise StormError("Program still contains undefined constants")
+
+ if properties:
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ intermediate = core._build_symbolic_model_from_symbolic_description(symbolic_description, formulae)
else:
- formulae = []
- intermediate = core._build_sparse_parametric_model_from_prism_program(symbolic_description, formulae)
- assert intermediate.supports_parameters
- if intermediate.model_type == ModelType.DTMC:
- return intermediate._as_pdtmc()
- elif intermediate.model_type == ModelType.MDP:
- return intermediate._as_pmdp()
- elif intermediate.model_type == ModelType.CTMC:
- return intermediate._as_pctmc()
- elif intermediate.model_type == ModelType.MA:
- return intermediate._as_pma()
+ intermediate = core._build_symbolic_model_from_symbolic_description(symbolic_description)
+ return _convert_symbolic_model(intermediate, parametric=False)
+
+
+def build_symbolic_parametric_model(symbolic_description, properties=None):
+ """
+ Build a parametric model in symbolic representation from a symbolic description.
+
+ :param symbolic_description: Symbolic model description to translate into a model.
+ :param List[Property] properties: List of properties that should be preserved during the translation. If None, then all properties are preserved.
+ :return: Parametric model in symbolic representation.
+ """
+ if not symbolic_description.undefined_constants_are_graph_preserving:
+ raise StormError("Program still contains undefined constants")
+
+ if properties:
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ intermediate = core._build_symbolic_parametric_model_from_symbolic_description(symbolic_description, formulae)
else:
- raise StormError("Not supported parametric model constructed")
+ intermediate = core._build_symbolic_parametric_model_from_symbolic_description(symbolic_description)
+ return _convert_symbolic_model(intermediate, parametric=True)
def build_model_from_drn(file):
"""
- Build a model from the explicit DRN representation.
+ Build a model in sparse representation from the explicit DRN representation.
:param String file: DRN file containing the model.
:return: Model in sparse representation.
- :rtype: SparseDtmc or SparseMdp or SparseCTMC or SparseMA
"""
intermediate = core._build_sparse_model_from_drn(file)
- assert not intermediate.supports_parameters
- if intermediate.model_type == ModelType.DTMC:
- return intermediate._as_dtmc()
- elif intermediate.model_type == ModelType.MDP:
- return intermediate._as_mdp()
- elif intermediate.model_type == ModelType.CTMC:
- return intermediate._as_ctmc()
- elif intermediate.model_type == ModelType.MA:
- return intermediate._as_ma()
- else:
- raise StormError("Not supported non-parametric model constructed")
+ return _convert_sparse_model(intermediate, parametric=False)
def build_parametric_model_from_drn(file):
"""
- Build a parametric model from the explicit DRN representation.
+ Build a parametric model in sparse representation from the explicit DRN representation.
:param String file: DRN file containing the model.
:return: Parametric model in sparse representation.
- :rtype: SparseParametricDtmc or SparseParametricMdp or SparseParametricCTMC or SparseParametricMA
"""
intermediate = core._build_sparse_parametric_model_from_drn(file)
- assert intermediate.supports_parameters
- if intermediate.model_type == ModelType.DTMC:
- return intermediate._as_pdtmc()
- elif intermediate.model_type == ModelType.MDP:
- return intermediate._as_pmdp()
- elif intermediate.model_type == ModelType.CTMC:
- return intermediate._as_pctmc()
- elif intermediate.model_type == ModelType.MA:
- return intermediate._as_pma()
- else:
- raise StormError("Not supported parametric model constructed")
+ return _convert_sparse_model(intermediate, parametric=True)
def perform_bisimulation(model, properties, bisimulation_type):
@@ -130,20 +219,59 @@ def perform_bisimulation(model, properties, bisimulation_type):
:param bisimulation_type: Type of bisimulation (weak or strong).
:return: Model after bisimulation.
"""
- formulae = [prop.raw_formula for prop in properties]
+ return perform_sparse_bisimulation(model, properties, bisimulation_type)
+
+
+def perform_sparse_bisimulation(model, properties, bisimulation_type):
+ """
+ Perform bisimulation on model in sparse representation.
+ :param model: Model.
+ :param properties: Properties to preserve during bisimulation.
+ :param bisimulation_type: Type of bisimulation (weak or strong).
+ :return: Model after bisimulation.
+ """
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
if model.supports_parameters:
return core._perform_parametric_bisimulation(model, formulae, bisimulation_type)
else:
return core._perform_bisimulation(model, formulae, bisimulation_type)
-def model_checking(model, property, only_initial_states=False, extract_scheduler=False):
+def perform_symbolic_bisimulation(model, properties):
+ """
+ Perform bisimulation on model in symbolic representation.
+ :param model: Model.
+ :param properties: Properties to preserve during bisimulation.
+ :return: Model after bisimulation.
+ """
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ bisimulation_type = BisimulationType.STRONG
+ if model.supports_parameters:
+ return core._perform_symbolic_parametric_bisimulation(model, formulae, bisimulation_type)
+ else:
+ return core._perform_symbolic_bisimulation(model, formulae, bisimulation_type)
+
+
+def model_checking(model, property, only_initial_states=False, extract_scheduler=False, environment=Environment()):
+ """
+ Perform model checking on model for property.
+ :param model: Model.
+ :param property: Property to check for.
+ :param only_initial_states: If True, only results for initial states are computed, otherwise for all states.
+ :param extract_scheduler: If True, try to extract a scheduler
+ :return: Model checking result.
+ :rtype: CheckResult
+ """
+ return check_model_sparse(model, property, only_initial_states=only_initial_states,
+ extract_scheduler=extract_scheduler, environment=environment)
+
+
+def check_model_sparse(model, property, only_initial_states=False, extract_scheduler=False, environment=Environment()):
"""
Perform model checking on model for property.
:param model: Model.
:param property: Property to check for.
- :param only_initial_states: If True, only results for initial states are computed.
- If False, results for all states are computed.
+ :param only_initial_states: If True, only results for initial states are computed, otherwise for all states.
:param extract_scheduler: If True, try to extract a scheduler
:return: Model checking result.
:rtype: CheckResult
@@ -156,11 +284,81 @@ def model_checking(model, property, only_initial_states=False, extract_scheduler
if model.supports_parameters:
task = core.ParametricCheckTask(formula, only_initial_states)
task.set_produce_schedulers(extract_scheduler)
- return core._parametric_model_checking_sparse_engine(model, task)
+ return core._parametric_model_checking_sparse_engine(model, task, environment=environment)
else:
task = core.CheckTask(formula, only_initial_states)
task.set_produce_schedulers(extract_scheduler)
- return core._model_checking_sparse_engine(model, task)
+ return core._model_checking_sparse_engine(model, task, environment=environment)
+
+
+def check_model_dd(model, property, only_initial_states=False, environment=Environment()):
+ """
+ Perform model checking using dd engine.
+ :param model: Model.
+ :param property: Property to check for.
+ :param only_initial_states: If True, only results for initial states are computed, otherwise for all states.
+ :return: Model checking result.
+ :rtype: CheckResult
+ """
+ if isinstance(property, Property):
+ formula = property.raw_formula
+ else:
+ formula = property
+
+ if model.supports_parameters:
+ task = core.ParametricCheckTask(formula, only_initial_states)
+ return core._parametric_model_checking_dd_engine(model, task, environment=environment)
+ else:
+ task = core.CheckTask(formula, only_initial_states)
+ return core._model_checking_dd_engine(model, task, environment=environment)
+
+
+def check_model_hybrid(model, property, only_initial_states=False, environment=Environment()):
+ """
+ Perform model checking using hybrid engine.
+ :param model: Model.
+ :param property: Property to check for.
+ :param only_initial_states: If True, only results for initial states are computed, otherwise for all states.
+ :return: Model checking result.
+ :rtype: CheckResult
+ """
+ if isinstance(property, Property):
+ formula = property.raw_formula
+ else:
+ formula = property
+
+ if model.supports_parameters:
+ task = core.ParametricCheckTask(formula, only_initial_states)
+ return core._parametric_model_checking_hybrid_engine(model, task, environment=environment)
+ else:
+ task = core.CheckTask(formula, only_initial_states)
+ return core._model_checking_hybrid_engine(model, task, environment=environment)
+
+
+def transform_to_sparse_model(model):
+ """
+ Transform model in symbolic representation into model in sparse representation.
+ :param model: Symbolic model.
+ :return: Sparse model.
+ """
+ if model.supports_parameters:
+ return core._transform_to_sparse_parametric_model(model)
+ else:
+ return core._transform_to_sparse_model(model)
+
+
+def transform_to_discrete_time_model(model, properties):
+ """
+ Transform continuous-time model to discrete time model.
+ :param model: Continuous-time model.
+ :param properties: List of properties to transform as well.
+ :return: Tuple (Discrete-time model, converted properties).
+ """
+ formulae = [(prop.raw_formula if isinstance(prop, Property) else prop) for prop in properties]
+ if model.supports_parameters:
+ return core._transform_to_discrete_time_parametric_model(model, formulae)
+ else:
+ return core._transform_to_discrete_time_model(model, formulae)
def prob01min_states(model, eventually_formula):
@@ -214,3 +412,34 @@ def compute_prob01max_states(model, phi_states, psi_states):
return core._compute_prob01states_max_rationalfunc(model, phi_states, psi_states)
else:
return core._compute_prob01states_max_double(model, phi_states, psi_states)
+
+
+def topological_sort(model, forward=True, initial=[]):
+ """
+
+ :param model:
+ :param forward:
+ :return:
+ """
+ matrix = model.transition_matrix if forward else model.backward_transition_matrix
+ if isinstance(model, storage._SparseParametricModel):
+ return storage._topological_sort_rf(matrix, initial)
+ elif isinstance(model, storage._SparseModel):
+ return storage._topological_sort_double(matrix, initial)
+ else:
+ raise StormError("Unknown kind of model.")
+
+
+def construct_submodel(model, states, actions, keep_unreachable_states=True, options=SubsystemBuilderOptions()):
+ """
+
+ :param model: The model
+ :param states: Which states should be preserved
+ :param actions: Which actions should be preserved
+ :param keep_unreachable_states: If False, run a reachability analysis.
+ :return: A model with fewer states/actions
+ """
+ if isinstance(model, storage._SparseModel):
+ return core._construct_subsystem_double(model, states, actions, keep_unreachable_states, options)
+ else:
+ raise NotImplementedError()
diff --git a/lib/stormpy/_version.py b/lib/stormpy/_version.py
index c68196d..67bc602 100644
--- a/lib/stormpy/_version.py
+++ b/lib/stormpy/_version.py
@@ -1 +1 @@
-__version__ = "1.2.0"
+__version__ = "1.3.0"
diff --git a/lib/stormpy/examples/files.py b/lib/stormpy/examples/files.py
index 61d9fdb..48a51e5 100644
--- a/lib/stormpy/examples/files.py
+++ b/lib/stormpy/examples/files.py
@@ -19,6 +19,8 @@ prism_pdtmc_die = _path("pdtmc", "parametric_die.pm")
"""Knuth Yao Die -- 2 unfair coins Example"""
prism_dtmc_brp = _path("dtmc", "brp-16-2.pm")
"""Bounded Retransmission Protocol"""
+prism_ma_simple = _path("ma", "simple.ma")
+"""Prism file for a simple Markov automaton"""
drn_ctmc_dft = _path("ctmc", "dft.drn")
"""DRN format for a CTMC from a DFT"""
drn_pdtmc_die = _path("pdtmc", "die.drn")
@@ -26,4 +28,14 @@ drn_pdtmc_die = _path("pdtmc", "die.drn")
jani_dtmc_die = _path("dtmc", "die.jani")
"""Jani Version of Knuth Yao Die Example"""
prism_mdp_coin_2_2 = _path("mdp", "coin2-2.nm")
-"""Prism example for coin MDP"""
\ No newline at end of file
+"""Prism example for coin MDP"""
+prism_pmdp_coin_two_dice = _path("pmdp", "two_dice.nm")
+"""Prism example for parametric two dice"""
+prism_mdp_maze = _path("mdp", "maze_2.nm")
+"""Prism example for the maze MDP"""
+prism_pomdp_maze = _path("pomdp", "maze_2.prism")
+"""Prism example for the maze POMDP"""
+dft_galileo_hecs = _path("dft", "hecs.dft")
+"""DFT example for HECS (Galileo format)"""
+dft_json_and = _path("dft", "and.json")
+"""DFT example for AND gate (JSON format)"""
diff --git a/lib/stormpy/examples/files/ctmc/dft.drn b/lib/stormpy/examples/files/ctmc/dft.drn
index 1f1f317..91245df 100644
--- a/lib/stormpy/examples/files/ctmc/dft.drn
+++ b/lib/stormpy/examples/files/ctmc/dft.drn
@@ -3,71 +3,73 @@
@type: CTMC
@parameters
+@reward_models
+
@nr_states
16
@model
-state 0 init
+state 0 !1 failed
action 0
- 1 : 0.5
- 2 : 0.5
- 3 : 0.5
- 4 : 0.5
-state 1
- action 0
- 5 : 0.5
- 9 : 0.5
- 11 : 0.5
-state 2
- action 0
- 5 : 0.5
- 14 : 0.5
- 15 : 0.5
-state 3
+ 0 : 1
+state 1 !2 init
action 0
+ 2 : 0.5
9 : 0.5
- 12 : 0.5
+ 13 : 0.5
15 : 0.5
-state 4
- action 0
- 11 : 0.5
- 12 : 0.5
- 14 : 0.5
-state 5
+state 2 !1.5
action 0
+ 3 : 0.5
6 : 0.5
8 : 0.5
-state 6
+state 3 !1
action 0
- 7 : 0.5
-state 7 failed
+ 4 : 0.5
+ 5 : 0.5
+state 4 !0.5
+ action 0
+ 0 : 0.5
+state 5 !0.5
action 0
- 7 : 1
-state 8
+ 0 : 0.5
+state 6 !1
action 0
+ 4 : 0.5
7 : 0.5
-state 9
+state 7 !0.5
action 0
- 8 : 0.5
- 10 : 0.5
-state 10
+ 0 : 0.5
+state 8 !1
action 0
+ 5 : 0.5
7 : 0.5
-state 11
+state 9 !1.5
action 0
- 6 : 0.5
+ 3 : 0.5
10 : 0.5
-state 12
+ 12 : 0.5
+state 10 !1
action 0
- 10 : 0.5
- 13 : 0.5
-state 13
+ 4 : 0.5
+ 11 : 0.5
+state 11 !0.5
action 0
- 7 : 0.5
-state 14
+ 0 : 0.5
+state 12 !1
action 0
- 6 : 0.5
- 13 : 0.5
-state 15
+ 5 : 0.5
+ 11 : 0.5
+state 13 !1.5
action 0
8 : 0.5
- 13 : 0.5
+ 12 : 0.5
+ 14 : 0.5
+state 14 !1
+ action 0
+ 7 : 0.5
+ 11 : 0.5
+state 15 !1.5
+ action 0
+ 6 : 0.5
+ 10 : 0.5
+ 14 : 0.5
diff --git a/lib/stormpy/examples/files/dft/hecs.dft b/lib/stormpy/examples/files/dft/hecs.dft
new file mode 100644
index 0000000..b470ff9
--- /dev/null
+++ b/lib/stormpy/examples/files/dft/hecs.dft
@@ -0,0 +1,24 @@
+toplevel "n0";
+"n0" or "n116" "n137" "n120" "n21";
+"n137" wsp "n139" "n9";
+"n120" wsp "n118" "n134" "n10";
+"n21" 3of5 "n111" "n109" "n117" "n110" "n112";
+"n110" or "n18" "n7";
+"n117" or "n17" "n7";
+"n109" or "n15" "n23";
+"n112" or "n19" "n13";
+"n13" and "n23" "n7";
+"n111" or "n14" "n23";
+"n116" lambda=0.0018 dorm=0.0;
+"n139" lambda=2.0E-5 dorm=0.0;
+"n10" lambda=0.001 dorm=0.0;
+"n118" lambda=0.002 dorm=0.0;
+"n18" lambda=6.0E-4 dorm=0.0;
+"n17" lambda=6.0E-4 dorm=0.0;
+"n15" lambda=6.0E-4 dorm=0.0;
+"n19" lambda=6.0E-4 dorm=0.0;
+"n7" lambda=5.0E-4 dorm=0.0;
+"n23" lambda=5.0E-4 dorm=0.0;
+"n14" lambda=6.0E-4 dorm=0.0;
+"n134" lambda=0.002 dorm=0.0;
+"n9" lambda=1.0E-5 dorm=0.0;
diff --git a/lib/stormpy/examples/files/dtmc/brp.jani b/lib/stormpy/examples/files/dtmc/brp.jani
index 8420dc2..22847fd 100644
--- a/lib/stormpy/examples/files/dtmc/brp.jani
+++ b/lib/stormpy/examples/files/dtmc/brp.jani
@@ -1,2033 +1,1401 @@
-
{
- "jani-version":1,
- "name":"brp",
- "type":"dtmc",
- "features":[
- "derived-operators"
- ],
- "actions":[
- {
- "name":"NewFile"
- },
- {
- "name":"aF"
- },
+ "actions": [
{
- "name":"aB"
+ "name": "NewFile"
},
{
- "name":"TO_Msg"
+ "name": "SyncWait"
},
{
- "name":"TO_Ack"
+ "name": "TO_Ack"
},
{
- "name":"Ï„"
+ "name": "TO_Msg"
},
{
- "name":"SyncWait"
+ "name": "aA"
},
{
- "name":"aG"
+ "name": "aB"
},
{
- "name":"aA"
- }
- ],
- "constants":[
- {
- "name":"N",
- "type":"int"
+ "name": "aF"
},
{
- "name":"MAX",
- "type":"int"
+ "name": "aG"
}
],
- "variables":[
- {
- "name":"s",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":6
- }
- },
- {
- "name":"srep",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":3
- }
- },
- {
- "name":"nrtr",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":"MAX"
- }
- },
- {
- "name":"i",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":"N"
- }
- },
- {
- "name":"bs",
- "type":"bool"
- },
- {
- "name":"s_ab",
- "type":"bool"
- },
- {
- "name":"fs",
- "type":"bool"
- },
- {
- "name":"ls",
- "type":"bool"
- },
- {
- "name":"r",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":5
- }
- },
- {
- "name":"rrep",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":4
- }
- },
- {
- "name":"fr",
- "type":"bool"
- },
- {
- "name":"lr",
- "type":"bool"
- },
- {
- "name":"br",
- "type":"bool"
- },
- {
- "name":"r_ab",
- "type":"bool"
- },
- {
- "name":"recv",
- "type":"bool"
- },
- {
- "name":"T",
- "type":"bool"
- },
- {
- "name":"k",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":2
- }
- },
+ "automata": [
{
- "name":"l",
- "type":{
- "kind":"bounded",
- "base":"int",
- "lower-bound":0,
- "upper-bound":2
- }
- },
- {
- "name":"reward_",
- "type":"real",
- "initial-value":0.0,
- "transient":true
- }
- ],
- "restrict-initial":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":0
- },
- "right":{
- "op":"=",
- "left":"srep",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"nrtr",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"i",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"bs",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"s_ab",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"fs",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"ls",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"r",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"rrep",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"fr",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"lr",
- "right":false
- }
+ "edges": [
+ {
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "s",
+ "value": 5
},
- "right":{
- "op":"=",
- "left":"br",
- "right":false
+ {
+ "ref": "srep",
+ "value": 1
}
- },
- "right":{
- "op":"=",
- "left":"r_ab",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"recv",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"T",
- "right":false
- }
- },
- "right":{
- "op":"=",
- "left":"k",
- "right":0
- }
- },
- "right":{
- "op":"=",
- "left":"l",
- "right":0
- }
- }
- },
- "properties":[
- {
- "name":"Property_brp_0",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"srep",
- "right":1
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(((s = 3) & (nrtr = MAX)) & (i < N))",
+ "exp": {
+ "left": {
+ "left": {
+ "left": "s",
+ "op": "=",
+ "right": 3
},
- "right":{
- "op":"=",
- "left":"rrep",
- "right":3
+ "op": "∧",
+ "right": {
+ "left": "nrtr",
+ "op": "=",
+ "right": "MAX"
}
},
- "right":"recv"
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F srep=1 & rrep=3 & recv ]"
- },
- {
- "name":"Property_brp_1",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"srep",
- "right":1
- },
- "right":{
- "op":"=",
- "left":"rrep",
- "right":3
- }
- },
- "right":"recv"
+ "op": "∧",
+ "right": {
+ "left": "i",
+ "op": "<",
+ "right": "N"
+ }
}
- }
+ },
+ "location": "l"
},
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F srep=1 & rrep=3 & recv ]"
- },
- {
- "name":"Property_brp_2",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"srep",
- "right":3
+ {
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "s",
+ "value": 5
},
- "right":{
- "op":"¬",
- "exp":{
- "op":"=",
- "left":"rrep",
- "right":3
- }
+ {
+ "ref": "srep",
+ "value": 2
}
- },
- "right":"recv"
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F srep=3 & !(rrep=3) & recv ]"
- },
- {
- "name":"Property_brp_3",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"srep",
- "right":3
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(((s = 3) & (nrtr = MAX)) & (i = N))",
+ "exp": {
+ "left": {
+ "left": {
+ "left": "s",
+ "op": "=",
+ "right": 3
},
- "right":{
- "op":"¬",
- "exp":{
- "op":"=",
- "left":"rrep",
- "right":3
- }
+ "op": "∧",
+ "right": {
+ "left": "nrtr",
+ "op": "=",
+ "right": "MAX"
}
},
- "right":"recv"
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F srep=3 & !(rrep=3) & recv ]"
- },
- {
- "name":"Property_brp_4",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"=",
- "left":"s",
- "right":5
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 ]"
- },
- {
- "name":"Property_brp_5",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"=",
- "left":"s",
- "right":5
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 ]"
- },
- {
- "name":"Property_brp_6",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":5
- },
- "right":{
- "op":"=",
- "left":"srep",
- "right":2
+ "op": "∧",
+ "right": {
+ "left": "i",
+ "op": "=",
+ "right": "N"
}
}
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 & srep=2 ]"
- },
- {
- "name":"Property_brp_7",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":5
- },
- "right":{
- "op":"=",
- "left":"srep",
- "right":2
- }
- }
- }
+ },
+ "location": "l"
},
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 & srep=2 ]"
- },
- {
- "name":"Property_brp_8",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":5
+ {
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "s",
+ "value": 1
},
- "right":{
- "op":"=",
- "left":"srep",
- "right":1
+ {
+ "ref": "i",
+ "value": {
+ "left": "i",
+ "op": "+",
+ "right": 1
+ }
}
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((s = 4) & (i < N))",
+ "exp": {
+ "left": {
+ "left": "s",
+ "op": "=",
+ "right": 4
},
- "right":{
- "op":">",
- "left":"i",
- "right":8
+ "op": "∧",
+ "right": {
+ "left": "i",
+ "op": "<",
+ "right": "N"
}
}
- }
+ },
+ "location": "l"
},
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 & srep=1 & i>8 ]"
- },
- {
- "name":"Property_brp_9",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":5
+ {
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "s",
+ "value": 0
},
- "right":{
- "op":"=",
- "left":"srep",
- "right":1
- }
- },
- "right":{
- "op":">",
- "left":"i",
- "right":8
- }
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F s=5 & srep=1 & i>8 ]"
- },
- {
- "name":"Property_brp_10",
- "expression":{
- "op":"filter",
- "fun":"min",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"¬",
- "exp":{
- "op":"=",
- "left":"srep",
- "right":0
- }
- },
- "right":{
- "op":"¬",
- "exp":"recv"
- }
- }
- }
- },
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F !(srep=0) & !recv ]"
- },
- {
- "name":"Property_brp_11",
- "expression":{
- "op":"filter",
- "fun":"max",
- "values":{
- "op":"Pmax",
- "exp":{
- "op":"U",
- "left":true,
- "right":{
- "op":"∧",
- "left":{
- "op":"¬",
- "exp":{
- "op":"=",
- "left":"srep",
- "right":0
+ {
+ "ref": "srep",
+ "value": 3
}
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((s = 4) & (i = N))",
+ "exp": {
+ "left": {
+ "left": "s",
+ "op": "=",
+ "right": 4
},
- "right":{
- "op":"¬",
- "exp":"recv"
+ "op": "∧",
+ "right": {
+ "left": "i",
+ "op": "=",
+ "right": "N"
}
}
- }
+ },
+ "location": "l"
},
- "states":{
- "op":"initial"
- }
- },
- "comment":"P=?[ F !(srep=0) & !recv ]"
- }
- ],
- "automata":[
- {
- "name":"sender",
- "locations":[
- {
- "name":"location"
- }
- ],
- "initial-locations":[
- "location"
- ],
- "edges":[
{
- "location":"location",
- "action":"NewFile",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":0
- }
- },
- "destinations":[
+ "action": "NewFile",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":1
+ "ref": "s",
+ "value": 1
},
{
- "ref":"i",
- "value":1
+ "ref": "srep",
+ "value": 0
},
{
- "ref":"srep",
- "value":0
+ "ref": "i",
+ "value": 1
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"aF",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":1
+ ],
+ "guard": {
+ "comment": "(s = 0)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 0
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "SyncWait",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"s",
- "value":2
- },
- {
- "ref":"fs",
- "value":{
- "op":"=",
- "left":"i",
- "right":1
- }
- },
- {
- "ref":"ls",
- "value":{
- "op":"=",
- "left":"i",
- "right":"N"
- }
- },
- {
- "ref":"bs",
- "value":"s_ab"
- },
- {
- "ref":"nrtr",
- "value":0
- },
+ "assignments": [
{
- "ref":"reward_",
- "value":{
- "op":"ite",
- "if":{
- "op":"=",
- "left":"i",
- "right":1
- },
- "then":1,
- "else":0
- }
+ "ref": "s",
+ "value": 6
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"aB",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":2
+ ],
+ "guard": {
+ "comment": "(s = 5)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 5
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "SyncWait",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":4
+ "ref": "s_ab",
+ "value": false
},
{
- "ref":"s_ab",
- "value":{
- "op":"¬",
- "exp":"s_ab"
- }
+ "ref": "s",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"TO_Msg",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":2
+ ],
+ "guard": {
+ "comment": "(s = 6)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 6
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "TO_Ack",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":3
+ "ref": "s",
+ "value": 3
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"TO_Ack",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":2
+ ],
+ "guard": {
+ "comment": "(s = 2)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 2
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "TO_Msg",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":3
+ "ref": "s",
+ "value": 3
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(s = 2)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 2
+ }
+ },
+ "location": "l"
},
{
- "location":"location",
- "action":"aF",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":3
- },
- "right":{
- "op":"<",
- "left":"nrtr",
- "right":"MAX"
- }
+ "action": "aB",
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "s_ab",
+ "value": {
+ "exp": "s_ab",
+ "op": "¬"
+ }
+ },
+ {
+ "ref": "s",
+ "value": 4
+ }
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(s = 2)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 2
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aF",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":2
+ "ref": "bs",
+ "value": "s_ab"
},
{
- "ref":"fs",
- "value":{
- "op":"=",
- "left":"i",
- "right":1
+ "ref": "fs",
+ "value": {
+ "left": "i",
+ "op": "=",
+ "right": 1
}
},
{
- "ref":"ls",
- "value":{
- "op":"=",
- "left":"i",
- "right":"N"
+ "ref": "ls",
+ "value": {
+ "left": "i",
+ "op": "=",
+ "right": "N"
}
},
{
- "ref":"bs",
- "value":"s_ab"
- },
- {
- "ref":"nrtr",
- "value":{
- "op":"+",
- "left":"nrtr",
- "right":1
- }
+ "ref": "s",
+ "value": 2
},
{
- "ref":"reward_",
- "value":{
- "op":"ite",
- "if":{
- "op":"=",
- "left":"i",
- "right":1
- },
- "then":1,
- "else":0
- }
+ "ref": "nrtr",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":3
- },
- "right":{
- "op":"=",
- "left":"nrtr",
- "right":"MAX"
- }
- },
- "right":{
- "op":"<",
- "left":"i",
- "right":"N"
- }
+ ],
+ "guard": {
+ "comment": "(s = 1)",
+ "exp": {
+ "left": "s",
+ "op": "=",
+ "right": 1
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aF",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":5
+ "ref": "bs",
+ "value": "s_ab"
},
{
- "ref":"srep",
- "value":1
- }
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":3
+ "ref": "fs",
+ "value": {
+ "left": "i",
+ "op": "=",
+ "right": 1
+ }
},
- "right":{
- "op":"=",
- "left":"nrtr",
- "right":"MAX"
- }
- },
- "right":{
- "op":"=",
- "left":"i",
- "right":"N"
- }
- }
- },
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
{
- "ref":"s",
- "value":5
+ "ref": "ls",
+ "value": {
+ "left": "i",
+ "op": "=",
+ "right": "N"
+ }
},
{
- "ref":"srep",
- "value":2
- }
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":4
- },
- "right":{
- "op":"<",
- "left":"i",
- "right":"N"
- }
- }
- },
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"s",
- "value":1
+ "ref": "s",
+ "value": 2
},
{
- "ref":"i",
- "value":{
- "op":"+",
- "left":"i",
- "right":1
+ "ref": "nrtr",
+ "value": {
+ "left": "nrtr",
+ "op": "+",
+ "right": 1
}
}
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"s",
- "right":4
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((s = 3) & (nrtr < MAX))",
+ "exp": {
+ "left": {
+ "left": "s",
+ "op": "=",
+ "right": 3
},
- "right":{
- "op":"=",
- "left":"i",
- "right":"N"
+ "op": "∧",
+ "right": {
+ "left": "nrtr",
+ "op": "<",
+ "right": "MAX"
}
}
},
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"s",
- "value":0
- },
- {
- "ref":"srep",
- "value":3
- }
- ]
- }
- ]
- },
+ "location": "l"
+ }
+ ],
+ "initial-locations": [
+ "l"
+ ],
+ "locations": [
{
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":5
- }
- },
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"s",
- "value":6
- }
- ]
- }
- ]
- },
+ "name": "l"
+ }
+ ],
+ "name": "sender",
+ "variables": []
+ },
+ {
+ "edges": [
{
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"=",
- "left":"s",
- "right":6
- }
- },
- "destinations":[
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"s",
- "value":0
+ "ref": "r_ab",
+ "value": "br"
},
{
- "ref":"s_ab",
- "value":false
+ "ref": "r",
+ "value": 2
}
- ]
+ ],
+ "location": "l"
}
- ]
- }
- ]
- },
- {
- "name":"receiver",
- "locations":[
- {
- "name":"location"
- }
- ],
- "initial-locations":[
- "location"
- ],
- "edges":[
- {
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":0
+ ],
+ "guard": {
+ "comment": "(r = 1)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 1
}
},
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"r",
- "value":0
- }
- ]
- }
- ]
+ "location": "l"
},
{
- "location":"location",
- "action":"aG",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":0
- }
- },
- "destinations":[
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":1
+ "ref": "r",
+ "value": 3
},
{
- "ref":"fr",
- "value":"fs"
- },
- {
- "ref":"lr",
- "value":"ls"
- },
- {
- "ref":"br",
- "value":"bs"
+ "ref": "rrep",
+ "value": 1
+ }
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((((r = 2) & (r_ab = br)) & (fr = true)) & (lr = false))",
+ "exp": {
+ "left": {
+ "left": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 2
+ },
+ "op": "∧",
+ "right": {
+ "left": "r_ab",
+ "op": "=",
+ "right": "br"
+ }
},
- {
- "ref":"recv",
- "value":"T"
+ "op": "∧",
+ "right": {
+ "left": "fr",
+ "op": "=",
+ "right": true
}
- ]
+ },
+ "op": "∧",
+ "right": {
+ "left": "lr",
+ "op": "=",
+ "right": false
+ }
}
- ]
+ },
+ "location": "l"
},
{
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":1
- }
- },
- "destinations":[
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":2
+ "ref": "r",
+ "value": 3
},
{
- "ref":"r_ab",
- "value":"br"
+ "ref": "rrep",
+ "value": 2
}
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":2
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((((r = 2) & (r_ab = br)) & (fr = false)) & (lr = false))",
+ "exp": {
+ "left": {
+ "left": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 2
},
- "right":{
- "op":"=",
- "left":"r_ab",
- "right":"br"
+ "op": "∧",
+ "right": {
+ "left": "r_ab",
+ "op": "=",
+ "right": "br"
}
},
- "right":{
- "op":"=",
- "left":"fr",
- "right":true
+ "op": "∧",
+ "right": {
+ "left": "fr",
+ "op": "=",
+ "right": false
}
},
- "right":{
- "op":"=",
- "left":"lr",
- "right":false
+ "op": "∧",
+ "right": {
+ "left": "lr",
+ "op": "=",
+ "right": false
}
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":3
+ "ref": "r",
+ "value": 3
},
{
- "ref":"rrep",
- "value":1
+ "ref": "rrep",
+ "value": 3
}
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":2
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((((r = 2) & (r_ab = br)) & (fr = false)) & (lr = true))",
+ "exp": {
+ "left": {
+ "left": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 2
},
- "right":{
- "op":"=",
- "left":"r_ab",
- "right":"br"
+ "op": "∧",
+ "right": {
+ "left": "r_ab",
+ "op": "=",
+ "right": "br"
}
},
- "right":{
- "op":"=",
- "left":"fr",
- "right":false
+ "op": "∧",
+ "right": {
+ "left": "fr",
+ "op": "=",
+ "right": false
}
},
- "right":{
- "op":"=",
- "left":"lr",
- "right":false
+ "op": "∧",
+ "right": {
+ "left": "lr",
+ "op": "=",
+ "right": true
}
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "SyncWait",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":3
- },
- {
- "ref":"rrep",
- "value":2
+ "ref": "r",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(r = 0)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 0
+ }
+ },
+ "location": "l"
},
{
- "location":"location",
- "action":"Ï„",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":2
- },
- "right":{
- "op":"=",
- "left":"r_ab",
- "right":"br"
- }
- },
- "right":{
- "op":"=",
- "left":"fr",
- "right":false
+ "action": "SyncWait",
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "r",
+ "value": 5
}
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((r = 4) & (ls = true))",
+ "exp": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 4
},
- "right":{
- "op":"=",
- "left":"lr",
- "right":true
+ "op": "∧",
+ "right": {
+ "left": "ls",
+ "op": "=",
+ "right": true
}
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "SyncWait",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":3
+ "ref": "r",
+ "value": 5
},
{
- "ref":"rrep",
- "value":3
+ "ref": "rrep",
+ "value": 4
}
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"aA",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":2
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((r = 4) & (ls = false))",
+ "exp": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 4
},
- "right":{
- "op":"¬",
- "exp":{
- "op":"=",
- "left":"r_ab",
- "right":"br"
- }
+ "op": "∧",
+ "right": {
+ "left": "ls",
+ "op": "=",
+ "right": false
}
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "SyncWait",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
+ {
+ "ref": "r",
+ "value": 0
+ },
{
- "ref":"r",
- "value":4
+ "ref": "rrep",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"aA",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":3
+ ],
+ "guard": {
+ "comment": "(r = 5)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 5
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aA",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":4
+ "ref": "r_ab",
+ "value": {
+ "exp": "r_ab",
+ "op": "¬"
+ }
},
{
- "ref":"r_ab",
- "value":{
- "op":"¬",
- "exp":"r_ab"
- }
+ "ref": "r",
+ "value": 4
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(r = 3)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 3
+ }
+ },
+ "location": "l"
},
{
- "location":"location",
- "action":"aG",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":4
+ "action": "aA",
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "r",
+ "value": 4
+ }
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "((r = 2) & !((r_ab = br)))",
+ "exp": {
+ "left": {
+ "left": "r",
+ "op": "=",
+ "right": 2
+ },
+ "op": "∧",
+ "right": {
+ "exp": {
+ "left": "r_ab",
+ "op": "=",
+ "right": "br"
+ },
+ "op": "¬"
+ }
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aG",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":2
+ "ref": "fr",
+ "value": "fs"
},
{
- "ref":"fr",
- "value":"fs"
+ "ref": "lr",
+ "value": "ls"
},
{
- "ref":"lr",
- "value":"ls"
+ "ref": "br",
+ "value": "bs"
},
{
- "ref":"br",
- "value":"bs"
+ "ref": "recv",
+ "value": "T"
},
{
- "ref":"recv",
- "value":"T"
+ "ref": "r",
+ "value": 1
}
- ]
+ ],
+ "location": "l"
}
- ]
- },
- {
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":4
- },
- "right":{
- "op":"=",
- "left":"ls",
- "right":true
- }
+ ],
+ "guard": {
+ "comment": "(r = 0)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 0
}
},
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"r",
- "value":5
- }
- ]
- }
- ]
+ "location": "l"
},
{
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"∧",
- "left":{
- "op":"=",
- "left":"r",
- "right":4
- },
- "right":{
- "op":"=",
- "left":"ls",
- "right":false
- }
- }
- },
- "destinations":[
+ "action": "aG",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"r",
- "value":5
+ "ref": "fr",
+ "value": "fs"
},
{
- "ref":"rrep",
- "value":4
- }
- ]
- }
- ]
- },
- {
- "location":"location",
- "action":"SyncWait",
- "guard":{
- "exp":{
- "op":"=",
- "left":"r",
- "right":5
- }
- },
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "ref": "lr",
+ "value": "ls"
+ },
+ {
+ "ref": "br",
+ "value": "bs"
+ },
{
- "ref":"r",
- "value":0
+ "ref": "recv",
+ "value": "T"
},
{
- "ref":"rrep",
- "value":0
+ "ref": "r",
+ "value": 2
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(r = 4)",
+ "exp": {
+ "left": "r",
+ "op": "=",
+ "right": 4
+ }
+ },
+ "location": "l"
}
- ]
- },
- {
- "name":"checker",
- "locations":[
+ ],
+ "initial-locations": [
+ "l"
+ ],
+ "locations": [
{
- "name":"location"
+ "name": "l"
}
],
- "initial-locations":[
- "location"
- ],
- "edges":[
+ "name": "receiver",
+ "variables": []
+ },
+ {
+ "edges": [
{
- "location":"location",
- "action":"NewFile",
- "guard":{
- "exp":{
- "op":"=",
- "left":"T",
- "right":false
- }
- },
- "destinations":[
+ "action": "NewFile",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"T",
- "value":true
+ "ref": "T",
+ "value": true
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(T = false)",
+ "exp": {
+ "left": "T",
+ "op": "=",
+ "right": false
+ }
+ },
+ "location": "l"
}
- ]
- },
- {
- "name":"channelK",
- "locations":[
+ ],
+ "initial-locations": [
+ "l"
+ ],
+ "locations": [
{
- "name":"location"
+ "name": "l"
}
],
- "initial-locations":[
- "location"
- ],
- "edges":[
+ "name": "checker",
+ "variables": []
+ },
+ {
+ "edges": [
{
- "location":"location",
- "action":"aF",
- "guard":{
- "exp":{
- "op":"=",
- "left":"k",
- "right":0
+ "action": "TO_Msg",
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "k",
+ "value": 0
+ }
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(k = 2)",
+ "exp": {
+ "left": "k",
+ "op": "=",
+ "right": 2
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aF",
+ "destinations": [
{
- "probability":{
- "exp":0.9800000
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"k",
- "value":1
+ "ref": "k",
+ "value": 1
}
- ]
+ ],
+ "location": "l",
+ "probability": {
+ "comment": "49/50",
+ "exp": 0.98
+ }
},
{
- "probability":{
- "exp":0.0200000
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"k",
- "value":2
+ "ref": "k",
+ "value": 2
}
- ]
+ ],
+ "location": "l",
+ "probability": {
+ "comment": "1/50",
+ "exp": 0.02
+ }
}
- ]
- },
- {
- "location":"location",
- "action":"aG",
- "guard":{
- "exp":{
- "op":"=",
- "left":"k",
- "right":1
+ ],
+ "guard": {
+ "comment": "(k = 0)",
+ "exp": {
+ "left": "k",
+ "op": "=",
+ "right": 0
}
},
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"k",
- "value":0
- }
- ]
- }
- ]
+ "location": "l"
},
{
- "location":"location",
- "action":"TO_Msg",
- "guard":{
- "exp":{
- "op":"=",
- "left":"k",
- "right":2
- }
- },
- "destinations":[
+ "action": "aG",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"k",
- "value":0
+ "ref": "k",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(k = 1)",
+ "exp": {
+ "left": "k",
+ "op": "=",
+ "right": 1
}
- ]
+ },
+ "location": "l"
}
- ]
- },
- {
- "name":"channelL",
- "locations":[
+ ],
+ "initial-locations": [
+ "l"
+ ],
+ "locations": [
{
- "name":"location"
+ "name": "l"
}
],
- "initial-locations":[
- "location"
- ],
- "edges":[
+ "name": "channelK",
+ "variables": []
+ },
+ {
+ "edges": [
{
- "location":"location",
- "action":"aA",
- "guard":{
- "exp":{
- "op":"=",
- "left":"l",
- "right":0
+ "action": "TO_Ack",
+ "destinations": [
+ {
+ "assignments": [
+ {
+ "ref": "l",
+ "value": 0
+ }
+ ],
+ "location": "l"
+ }
+ ],
+ "guard": {
+ "comment": "(l = 2)",
+ "exp": {
+ "left": "l",
+ "op": "=",
+ "right": 2
}
},
- "destinations":[
+ "location": "l"
+ },
+ {
+ "action": "aA",
+ "destinations": [
{
- "probability":{
- "exp":0.9900000
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"l",
- "value":1
+ "ref": "l",
+ "value": 1
}
- ]
+ ],
+ "location": "l",
+ "probability": {
+ "comment": "99/100",
+ "exp": 0.99
+ }
},
{
- "probability":{
- "exp":0.0100000
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"l",
- "value":2
+ "ref": "l",
+ "value": 2
}
- ]
+ ],
+ "location": "l",
+ "probability": {
+ "comment": "1/100",
+ "exp": 0.01
+ }
}
- ]
- },
- {
- "location":"location",
- "action":"aB",
- "guard":{
- "exp":{
- "op":"=",
- "left":"l",
- "right":1
+ ],
+ "guard": {
+ "comment": "(l = 0)",
+ "exp": {
+ "left": "l",
+ "op": "=",
+ "right": 0
}
},
- "destinations":[
- {
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
- {
- "ref":"l",
- "value":0
- }
- ]
- }
- ]
+ "location": "l"
},
{
- "location":"location",
- "action":"TO_Ack",
- "guard":{
- "exp":{
- "op":"=",
- "left":"l",
- "right":2
- }
- },
- "destinations":[
+ "action": "aB",
+ "destinations": [
{
- "probability":{
- "exp":1
- },
- "location":"location",
- "assignments":[
+ "assignments": [
{
- "ref":"l",
- "value":0
+ "ref": "l",
+ "value": 0
}
- ]
+ ],
+ "location": "l"
}
- ]
+ ],
+ "guard": {
+ "comment": "(l = 1)",
+ "exp": {
+ "left": "l",
+ "op": "=",
+ "right": 1
+ }
+ },
+ "location": "l"
}
- ]
+ ],
+ "initial-locations": [
+ "l"
+ ],
+ "locations": [
+ {
+ "name": "l"
+ }
+ ],
+ "name": "channelL",
+ "variables": []
+ }
+ ],
+ "constants": [
+ {
+ "name": "N",
+ "type": "int"
+ },
+ {
+ "name": "MAX",
+ "type": "int"
}
],
- "system":{
- "elements":[
+ "features": [
+ "derived-operators"
+ ],
+ "jani-version": 1,
+ "name": "jani_from_prism",
+ "properties": [],
+ "restrict-initial": {
+ "exp": true
+ },
+ "system": {
+ "elements": [
{
- "automaton":"sender"
+ "automaton": "sender"
},
{
- "automaton":"receiver"
+ "automaton": "receiver"
},
{
- "automaton":"checker"
+ "automaton": "checker"
},
{
- "automaton":"channelK"
+ "automaton": "channelK"
},
{
- "automaton":"channelL"
+ "automaton": "channelL"
}
],
- "syncs":[
+ "syncs": [
{
- "synchronise":[
- "aB",
- null,
+ "result": "NewFile",
+ "synchronise": [
+ "NewFile",
null,
+ "NewFile",
null,
- "aB"
- ],
- "result":"aB"
+ null
+ ]
},
{
- "synchronise":[
- "TO_Ack",
- null,
+ "result": "SyncWait",
+ "synchronise": [
+ "SyncWait",
+ "SyncWait",
null,
null,
- "TO_Ack"
- ],
- "result":"TO_Ack"
+ null
+ ]
},
{
- "synchronise":[
- null,
- "aA",
- null,
+ "result": "TO_Ack",
+ "synchronise": [
+ "TO_Ack",
null,
- "aA"
- ],
- "result":"aA"
- },
- {
- "synchronise":[
- "aF",
null,
null,
- "aF",
- null
- ],
- "result":"aF"
+ "TO_Ack"
+ ]
},
{
- "synchronise":[
+ "result": "TO_Msg",
+ "synchronise": [
"TO_Msg",
null,
null,
"TO_Msg",
null
- ],
- "result":"TO_Msg"
+ ]
},
{
- "synchronise":[
+ "result": "aA",
+ "synchronise": [
null,
- "aG",
- null,
- "aG",
- null
- ],
- "result":"aG"
- },
- {
- "synchronise":[
- "NewFile",
+ "aA",
null,
- "NewFile",
null,
- null
- ],
- "result":"NewFile"
+ "aA"
+ ]
},
{
- "synchronise":[
- "SyncWait",
- "SyncWait",
+ "result": "aB",
+ "synchronise": [
+ "aB",
null,
null,
- null
- ],
- "result":"SyncWait"
+ null,
+ "aB"
+ ]
},
{
- "synchronise":[
- "Ï„",
- null,
+ "result": "aF",
+ "synchronise": [
+ "aF",
null,
null,
+ "aF",
null
- ],
- "result":"Ï„"
+ ]
},
{
- "synchronise":[
- null,
- "Ï„",
+ "result": "aG",
+ "synchronise": [
null,
+ "aG",
null,
+ "aG",
null
- ],
- "result":"Ï„"
+ ]
}
]
- }
+ },
+ "type": "dtmc",
+ "variables": [
+ {
+ "initial-value": 0,
+ "name": "s",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 6
+ }
+ },
+ {
+ "initial-value": 0,
+ "name": "srep",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 3
+ }
+ },
+ {
+ "initial-value": 0,
+ "name": "nrtr",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": "MAX"
+ }
+ },
+ {
+ "initial-value": 0,
+ "name": "i",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": "N"
+ }
+ },
+ {
+ "initial-value": false,
+ "name": "bs",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "s_ab",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "fs",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "ls",
+ "type": "bool"
+ },
+ {
+ "initial-value": 0,
+ "name": "r",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 5
+ }
+ },
+ {
+ "initial-value": 0,
+ "name": "rrep",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 4
+ }
+ },
+ {
+ "initial-value": false,
+ "name": "fr",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "lr",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "br",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "r_ab",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "recv",
+ "type": "bool"
+ },
+ {
+ "initial-value": false,
+ "name": "T",
+ "type": "bool"
+ },
+ {
+ "initial-value": 0,
+ "name": "k",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 2
+ }
+ },
+ {
+ "initial-value": 0,
+ "name": "l",
+ "type": {
+ "base": "int",
+ "kind": "bounded",
+ "lower-bound": 0,
+ "upper-bound": 2
+ }
+ }
+ ]
}
diff --git a/lib/stormpy/examples/files/mdp/maze_2.nm b/lib/stormpy/examples/files/mdp/maze_2.nm
new file mode 100644
index 0000000..396c48b
--- /dev/null
+++ b/lib/stormpy/examples/files/mdp/maze_2.nm
@@ -0,0 +1,126 @@
+
+
+// maze example (POMDP)
+// slightly extends that presented in
+// Littman, Cassandra and Kaelbling
+// Learning policies for partially observable environments: Scaling up
+// Technical Report CS, Brown University
+// gxn 29/01/16
+// Made into a MDP for documentation of stormpy.
+
+// state space (value of variable "s")
+
+// 0 1 2 3 4
+// 5 6 7
+// 8 9 10
+// 11 13 12
+
+// 13 is the target
+
+mdp
+
+
+module maze
+
+ s : [-1..13];
+
+ // initialisation
+ [] s=-1 -> 1/13 : (s'=0)
+ + 1/13 : (s'=1)
+ + 1/13 : (s'=2)
+ + 1/13 : (s'=3)
+ + 1/13 : (s'=4)
+ + 1/13 : (s'=5)
+ + 1/13 : (s'=6)
+ + 1/13 : (s'=7)
+ + 1/13 : (s'=8)
+ + 1/13 : (s'=9)
+ + 1/13 : (s'=10)
+ + 1/13 : (s'=11)
+ + 1/13 : (s'=12);
+
+ // moving around the maze
+
+ [east] s=0 -> (s'=1);
+ [west] s=0 -> (s'=0);
+ [north] s=0 -> (s'=0);
+ [south] s=0 -> (s'=5);
+
+ [east] s=1 -> (s'=2);
+ [west] s=1 -> (s'=0);
+ [north] s=1 -> (s'=1);
+ [south] s=1 -> (s'=1);
+
+ [east] s=2 -> (s'=3);
+ [west] s=2 -> (s'=1);
+ [north] s=2 -> (s'=2);
+ [south] s=2 -> (s'=6);
+
+ [east] s=3 -> (s'=4);
+ [west] s=3 -> (s'=2);
+ [north] s=3 -> (s'=3);
+ [south] s=3 -> (s'=3);
+
+ [east] s=4 -> (s'=4);
+ [west] s=4 -> (s'=3);
+ [north] s=4 -> (s'=4);
+ [south] s=4 -> (s'=7);
+
+ [east] s=5 -> (s'=5);
+ [west] s=5 -> (s'=5);
+ [north] s=5 -> (s'=0);
+ [south] s=5 -> (s'=8);
+
+ [east] s=6 -> (s'=6);
+ [west] s=6 -> (s'=6);
+ [north] s=6 -> (s'=2);
+ [south] s=6 -> (s'=9);
+
+ [east] s=7 -> (s'=7);
+ [west] s=7 -> (s'=7);
+ [north] s=7 -> (s'=4);
+ [south] s=7 -> (s'=10);
+
+ [east] s=8 -> (s'=8);
+ [west] s=8 -> (s'=8);
+ [north] s=8 -> (s'=5);
+ [south] s=8 -> (s'=11);
+
+ [east] s=9 -> (s'=9);
+ [west] s=9 -> (s'=9);
+ [north] s=9 -> (s'=6);
+ [south] s=9 -> (s'=13);
+
+ [east] s=10 -> (s'=9);
+ [west] s=10 -> (s'=9);
+ [north] s=10 -> (s'=7);
+ [south] s=10 -> (s'=12);
+ [east] s=11 -> (s'=11);
+ [west] s=11 -> (s'=11);
+ [north] s=11 -> (s'=8);
+ [south] s=11 -> (s'=11);
+
+ [east] s=12 -> (s'=12);
+ [west] s=12 -> (s'=12);
+ [north] s=12 -> (s'=10);
+ [south] s=12 -> (s'=12);
+
+ // loop when we reach the target
+ [done] s=13 -> true;
+
+endmodule
+
+// reward structure (number of steps to reach the target)
+rewards
+
+ [east] true : 1;
+ [west] true : 1;
+ [north] true : 1;
+ [south] true : 1;
+
+endrewards
+
+// target observation
+label "goal" = s=13;
+
+
diff --git a/lib/stormpy/examples/files/pomdp/maze_2.prism b/lib/stormpy/examples/files/pomdp/maze_2.prism
new file mode 100644
index 0000000..f89d179
--- /dev/null
+++ b/lib/stormpy/examples/files/pomdp/maze_2.prism
@@ -0,0 +1,139 @@
+
+
+// maze example (POMDP)
+// slightly extends that presented in
+// Littman, Cassandra and Kaelbling
+// Learning policies for partially observable environments: Scaling up
+// Technical Report CS, Brown University
+// gxn 29/01/16
+
+// state space (value of variable "s")
+
+// 0 1 2 3 4
+// 5 6 7
+// 8 9 10
+// 11 13 12
+
+// 13 is the target
+
+pomdp
+
+// can observe the walls and target
+observables
+ o
+endobservables
+// o=0 - observation in initial state
+// o=1 - west and north walls (s0)
+// o=2 - north and south ways (s1 and s3)
+// o=3 - north wall (s2)
+// o=4 - east and north way (s4)
+// o=5 - east and west walls (s5, s6, s7, s8, s9 and s10)
+// o=6 - east, west and south walls (s11 and s12)
+// o=7 - the target (s13)
+
+module maze
+
+ s : [-1..13];
+ o : [0..7];
+
+ // initialisation
+ [] s=-1 -> 1/13 : (s'=0) & (o'=1)
+ + 1/13 : (s'=1) & (o'=2)
+ + 1/13 : (s'=2) & (o'=3)
+ + 1/13 : (s'=3) & (o'=2)
+ + 1/13 : (s'=4) & (o'=4)
+ + 1/13 : (s'=5) & (o'=5)
+ + 1/13 : (s'=6) & (o'=5)
+ + 1/13 : (s'=7) & (o'=5)
+ + 1/13 : (s'=8) & (o'=5)
+ + 1/13 : (s'=9) & (o'=5)
+ + 1/13 : (s'=10) & (o'=5)
+ + 1/13 : (s'=11) & (o'=6)
+ + 1/13 : (s'=12) & (o'=6);
+
+ // moving around the maze
+
+ [east] s=0 -> (s'=1) & (o'=2);
+ [west] s=0 -> (s'=0);
+ [north] s=0 -> (s'=0);
+ [south] s=0 -> (s'=5) & (o'=5);
+
+ [east] s=1 -> (s'=2) & (o'=3);
+ [west] s=1 -> (s'=0) & (o'=1);
+ [north] s=1 -> (s'=1);
+ [south] s=1 -> (s'=1);
+
+ [east] s=2 -> (s'=3) & (o'=2);
+ [west] s=2 -> (s'=1) & (o'=2);
+ [north] s=2 -> (s'=2);
+ [south] s=2 -> (s'=6) & (o'=5);
+
+ [east] s=3 -> (s'=4) & (o'=4);
+ [west] s=3 -> (s'=2) & (o'=2);
+ [north] s=3 -> (s'=3);
+ [south] s=3 -> (s'=3);
+
+ [east] s=4 -> (s'=4);
+ [west] s=4 -> (s'=3) & (o'=2);
+ [north] s=4 -> (s'=4);
+ [south] s=4 -> (s'=7) & (o'=5);
+
+ [east] s=5 -> (s'=5);
+ [west] s=5 -> (s'=5);
+ [north] s=5 -> (s'=0) & (o'=1);
+ [south] s=5 -> (s'=8);
+
+ [east] s=6 -> (s'=6);
+ [west] s=6 -> (s'=6);
+ [north] s=6 -> (s'=2) & (o'=3);
+ [south] s=6 -> (s'=9);
+
+ [east] s=7 -> (s'=7);
+ [west] s=7 -> (s'=7);
+ [north] s=7 -> (s'=4) & (o'=4);
+ [south] s=7 -> (s'=10);
+
+ [east] s=8 -> (s'=8);
+ [west] s=8 -> (s'=8);
+ [north] s=8 -> (s'=5);
+ [south] s=8 -> (s'=11) & (o'=6);
+
+ [east] s=9 -> (s'=9);
+ [west] s=9 -> (s'=9);
+ [north] s=9 -> (s'=6);
+ [south] s=9 -> (s'=13) & (o'=7);
+
+ [east] s=10 -> (s'=9);
+ [west] s=10 -> (s'=9);
+ [north] s=10 -> (s'=7);
+ [south] s=10 -> (s'=12) & (o'=6);
+
+ [east] s=11 -> (s'=11);
+ [west] s=11 -> (s'=11);
+ [north] s=11 -> (s'=8) & (o'=5);
+ [south] s=11 -> (s'=11);
+
+ [east] s=12 -> (s'=12);
+ [west] s=12 -> (s'=12);
+ [north] s=12 -> (s'=10) & (o'=5);
+ [south] s=12 -> (s'=12);
+
+ // loop when we reach the target
+ [done] s=13 -> true;
+
+endmodule
+
+// reward structure (number of steps to reach the target)
+rewards
+
+ [east] true : 1;
+ [west] true : 1;
+ [north] true : 1;
+ [south] true : 1;
+
+endrewards
+
+// target observation
+label "goal" = o=7;
+
+
diff --git a/setup.py b/setup.py
index 6aba669..de75c00 100755
--- a/setup.py
+++ b/setup.py
@@ -3,9 +3,8 @@ import sys
import subprocess
import datetime
-from setuptools import setup, Extension
+from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
-from setuptools.command.test import test
from distutils.version import StrictVersion
import setup.helper as setup_helper
@@ -15,7 +14,11 @@ if sys.version_info[0] == 2:
sys.exit('Sorry, Python 2.x is not supported')
# Minimal storm version required
-storm_min_version = "1.2.0"
+storm_min_version = "1.3.1"
+
+# Get the long description from the README file
+with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md'), encoding='utf-8') as f:
+ long_description = f.read()
class CMakeExtension(Extension):
@@ -31,7 +34,7 @@ class CMakeBuild(build_ext):
('disable-dft', None, 'Disable support for DFTs'),
('disable-pars', None, 'Disable support for parametric models'),
('debug', None, 'Build in Debug mode'),
- ('jobs=', 'j', 'Number of jobs to use for compiling'),
+ ('jobs=', 'j', 'Number of jobs to use for compiling')
]
config = SetupConfig()
@@ -47,15 +50,16 @@ class CMakeBuild(build_ext):
", ".join(e.name for e in self.extensions))
# Build cmake version info
+ print("Stormpy - Building into {}".format(self.build_temp))
build_temp_version = self.build_temp + "-version"
setup_helper.ensure_dir_exists(build_temp_version)
# Write config
- setup_helper.ensure_dir_exists("build")
- self.config.write_config("build/build_config.cfg")
+ setup_helper.ensure_dir_exists(self.build_temp)
+ self.config.write_config(os.path.join(self.build_temp, "build_config.cfg"))
cmake_args = []
- storm_dir = self.config.get_as_string("storm_dir")
+ storm_dir = os.path.expanduser(self.config.get_as_string("storm_dir"))
if storm_dir:
cmake_args += ['-Dstorm_DIR=' + storm_dir]
_ = subprocess.check_output(['cmake', os.path.abspath("cmake")] + cmake_args, cwd=build_temp_version)
@@ -187,20 +191,12 @@ class CMakeBuild(build_ext):
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
setup_helper.ensure_dir_exists(self.build_temp)
- print("Pycarl - CMake args={}".format(cmake_args))
+ print("Stormpy - CMake args={}".format(cmake_args))
# Call cmake
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.', '--target', ext.name] + build_args, cwd=self.build_temp)
-class PyTest(test):
- def run_tests(self):
- # import here, cause outside the eggs aren't loaded
- import pytest
- errno = pytest.main(['tests'])
- sys.exit(errno)
-
-
setup(
name="stormpy",
version=setup_helper.obtain_version(),
@@ -208,12 +204,25 @@ setup(
author_email="matthias.volk@cs.rwth-aachen.de",
maintainer="S. Junges",
maintainer_email="sebastian.junges@cs.rwth-aachen.de",
- url="http://moves.rwth-aachen.de",
+ url="https://github.com/moves-rwth/stormpy/",
description="stormpy - Python Bindings for Storm",
- long_description='',
- packages=['stormpy', 'stormpy.info', 'stormpy.logic', 'stormpy.storage', 'stormpy.utility',
- 'stormpy.pars', 'stormpy.dft'],
+ long_description=long_description,
+ long_description_content_type='text/markdown',
+ project_urls={
+ 'Documentation': 'https://moves-rwth.github.io/stormpy/',
+ 'Source': 'https://github.com/moves-rwth/stormpy/',
+ 'Bug reports': 'https://github.com/moves-rwth/stormpy/issues',
+ },
+ classifiers=[
+ 'Intended Audience :: Science/Research',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ ],
+
+ packages=find_packages('lib'),
package_dir={'': 'lib'},
+ include_package_data=True,
+ package_data={'stormpy.examples': ['examples/files/*']},
ext_package='stormpy',
ext_modules=[CMakeExtension('core', subdir=''),
CMakeExtension('info', subdir='info'),
@@ -221,11 +230,11 @@ setup(
CMakeExtension('storage', subdir='storage'),
CMakeExtension('utility', subdir='utility'),
CMakeExtension('dft', subdir='dft'),
- CMakeExtension('pars', subdir='pars'),
- ],
- cmdclass={'build_ext': CMakeBuild, 'test': PyTest},
+ CMakeExtension('pars', subdir='pars')],
+
+ cmdclass={'build_ext': CMakeBuild},
zip_safe=False,
- install_requires=['pycarl>=2.0.2'],
+ install_requires=['pycarl>=2.0.3'],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
python_requires='>=3',
diff --git a/src/core/bisimulation.cpp b/src/core/bisimulation.cpp
index 7feddd0..6926791 100644
--- a/src/core/bisimulation.cpp
+++ b/src/core/bisimulation.cpp
@@ -1,4 +1,11 @@
#include "bisimulation.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+
+template
+std::shared_ptr> performBisimulationMinimization(std::shared_ptr> const& model, std::vector> const& formulas, storm::storage::BisimulationType const& bisimulationType = storm::storage::BisimulationType::Strong) {
+ return storm::api::performBisimulationMinimization(model, formulas, bisimulationType, storm::dd::bisimulation::SignatureMode::Eager);
+}
// Define python bindings
void define_bisimulation(py::module& m) {
@@ -6,6 +13,8 @@ void define_bisimulation(py::module& m) {
// Bisimulation
m.def("_perform_bisimulation", &storm::api::performBisimulationMinimization, "Perform bisimulation", py::arg("model"), py::arg("formulas"), py::arg("bisimulation_type"));
m.def("_perform_parametric_bisimulation", &storm::api::performBisimulationMinimization, "Perform bisimulation on parametric model", py::arg("model"), py::arg("formulas"), py::arg("bisimulation_type"));
+ m.def("_perform_symbolic_bisimulation", &performBisimulationMinimization, "Perform bisimulation", py::arg("model"), py::arg("formulas"), py::arg("bisimulation_type"));
+ m.def("_perform_symbolic_parametric_bisimulation", &performBisimulationMinimization, "Perform bisimulation on parametric model", py::arg("model"), py::arg("formulas"), py::arg("bisimulation_type"));
// BisimulationType
py::enum_(m, "BisimulationType", "Types of bisimulation")
diff --git a/src/core/core.cpp b/src/core/core.cpp
index b4b5de6..b5021ae 100644
--- a/src/core/core.cpp
+++ b/src/core/core.cpp
@@ -2,15 +2,30 @@
#include "storm/utility/initialize.h"
#include "storm/utility/DirectEncodingExporter.h"
#include "storm/storage/ModelFormulasPair.h"
+#include "storm/storage/dd/DdType.h"
+#include "storm/storage/jani/Property.h"
#include "storm/solver/OptimizationDirection.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+#include "storm-parsers/api/storm-parsers.h"
+#include "storm-counterexamples/settings/modules/CounterexampleGeneratorSettings.h"
void define_core(py::module& m) {
// Init
m.def("_set_up", [](std::string const& args) {
storm::utility::setUp();
storm::settings::initializeAll("StoRM-Py", "stormpy");
+ storm::settings::addModule();
storm::settings::SettingsManager::manager().setFromString(args);
}, "Initialize Storm", py::arg("arguments"));
+
+ m.def("_set_loglevel_debug", []() {
+ storm::utility::setLogLevel(l3pp::LogLevel::DEBUG);
+ }, "set loglevel for storm to debug");
+ m.def("_set_loglevel_trace", []() {
+ storm::utility::setLogLevel(l3pp::LogLevel::TRACE);
+ });
+
+
}
void define_parse(py::module& m) {
@@ -35,6 +50,8 @@ void define_parse(py::module& m) {
:return: A list of properties
)dox", py::arg("formula_string"), py::arg("prism_program"), py::arg("property_filter") = boost::none);
+ m.def("parse_properties_for_jani_model", &storm::api::parsePropertiesForJaniModel, py::arg("formula_string"), py::arg("jani_model"), py::arg("property_filter") = boost::none);
+
// Pair
py::class_(m, "ModelFormulasPair", "Pair of model and formulas")
.def_property_readonly("model", [](storm::storage::ModelFormulasPair const& pair) {
@@ -46,9 +63,9 @@ void define_parse(py::module& m) {
;
}
-// Thin wrapper for model building using one formula as argument
+// Thin wrapper for model building using sparse representation
template
-std::shared_ptr buildSparseModel(storm::storage::SymbolicModelDescription const& modelDescription, std::vector> const& formulas, bool jit = false, bool doctor = false) {
+std::shared_ptr> buildSparseModel(storm::storage::SymbolicModelDescription const& modelDescription, std::vector> const& formulas, bool jit = false, bool doctor = false) {
if (formulas.empty()) {
// Build all labels and rewards
storm::builder::BuilderOptions options(true, true);
@@ -59,27 +76,55 @@ std::shared_ptr buildSparseModel(storm::storage::Symbo
}
}
+template
+std::shared_ptr buildSparseModelWithOptions(storm::storage::SymbolicModelDescription const& modelDescription, storm::builder::BuilderOptions const& options, bool jit = false, bool doctor = false) {
+ return storm::api::buildSparseModel(modelDescription, options, jit, doctor);
+}
+
+// Thin wrapper for model building using symbolic representation
+template
+std::shared_ptr> buildSymbolicModel(storm::storage::SymbolicModelDescription const& modelDescription, std::vector> const& formulas) {
+ if (formulas.empty()) {
+ // Build full model
+ return storm::api::buildSymbolicModel(modelDescription, formulas, true);
+ } else {
+ // Only build labels necessary for formulas
+ return storm::api::buildSymbolicModel(modelDescription, formulas, false);
+ }
+}
+
void define_build(py::module& m) {
// Build model
- m.def("_build_sparse_model_from_prism_program", &buildSparseModel, "Build the model", py::arg("model_description"), py::arg("formulas") = std::vector>(), py::arg("use_jit") = false, py::arg("doctor") = false);
- m.def("_build_sparse_parametric_model_from_prism_program", &buildSparseModel, "Build the parametric model", py::arg("model_description"), py::arg("formulas") = std::vector>(), py::arg("use_jit") = false, py::arg("doctor") = false);
+ m.def("_build_sparse_model_from_symbolic_description", &buildSparseModel, "Build the model in sparse representation", py::arg("model_description"), py::arg("formulas") = std::vector>(), py::arg("use_jit") = false, py::arg("doctor") = false);
+ m.def("_build_sparse_parametric_model_from_symbolic_description", &buildSparseModel, "Build the parametric model in sparse representation", py::arg("model_description"), py::arg("formulas") = std::vector>(), py::arg("use_jit") = false, py::arg("doctor") = false);
+ m.def("build_sparse_model_with_options", &buildSparseModelWithOptions, "Build the model in sparse representation", py::arg("model_description"), py::arg("options"), py::arg("use_jit") = false, py::arg("doctor") = false);
+ m.def("_build_symbolic_model_from_symbolic_description", &buildSymbolicModel, "Build the model in symbolic representation", py::arg("model_description"), py::arg("formulas") = std::vector>());
+ m.def("_build_symbolic_parametric_model_from_symbolic_description", &buildSymbolicModel, "Build the parametric model in symbolic representation", py::arg("model_description"), py::arg("formulas") = std::vector>());
m.def("_build_sparse_model_from_drn", &storm::api::buildExplicitDRNModel, "Build the model from DRN", py::arg("file"));
m.def("_build_sparse_parametric_model_from_drn", &storm::api::buildExplicitDRNModel, "Build the parametric model from DRN", py::arg("file"));
m.def("build_sparse_model_from_explicit", &storm::api::buildExplicitModel, "Build the model model from explicit input", py::arg("transition_file"), py::arg("labeling_file"), py::arg("state_reward_file") = "", py::arg("transition_reward_file") = "", py::arg("choice_labeling_file") = "");
+
+ py::class_(m, "BuilderOptions", "Options for building process")
+ .def(py::init> const&>(), "Initialise with formulae to preserve", py::arg("formulae"))
+ .def(py::init(), "Initialise without formulae", py::arg("build_all_reward_models"), py::arg("build_all_labels"))
+ .def_property_readonly("preserved_label_names", &storm::builder::BuilderOptions::getLabelNames, "Labels preserved")
+ .def("set_build_with_choice_origins", &storm::builder::BuilderOptions::setBuildChoiceOrigins, "Build choice origins", py::arg("new_value")=true)
+ .def("set_add_out_of_bounds_state", &storm::builder::BuilderOptions::setAddOutOfBoundsState, "Build with out of bounds state", py::arg("new_value")=true)
+ .def("set_add_overlapping_guards_label", &storm::builder::BuilderOptions::setAddOverlappingGuardsLabel, "Build with overlapping guards state labeled", py::arg("new_value")=true);
}
void define_optimality_type(py::module& m) {
py::enum_(m, "OptimizationDirection")
- .value("Minimize", storm::solver::OptimizationDirection::Minimize)
- .value("Maximize", storm::solver::OptimizationDirection::Maximize)
- ;
+ .value("Minimize", storm::solver::OptimizationDirection::Minimize)
+ .value("Maximize", storm::solver::OptimizationDirection::Maximize)
+ ;
}
// Thin wrapper for exporting model
template
void exportDRN(std::shared_ptr> model, std::string const& file) {
- std::ofstream stream;
+ std::ofstream stream;
storm::utility::openFile(file, stream);
storm::exporter::explicitExportSparseModel(stream, model, {});
storm::utility::closeFile(stream);
diff --git a/src/core/core.h b/src/core/core.h
index 51bc696..cf5a0f9 100644
--- a/src/core/core.h
+++ b/src/core/core.h
@@ -1,5 +1,4 @@
-#ifndef PYTHON_CORE_CORE_H_
-#define PYTHON_CORE_CORE_H_
+#pragma once
#include "common.h"
@@ -7,6 +6,4 @@ void define_core(py::module& m);
void define_parse(py::module& m);
void define_build(py::module& m);
void define_export(py::module& m);
-void define_optimality_type(py::module& m);
-
-#endif /* PYTHON_CORE_CORE_H_ */
+void define_optimality_type(py::module& m);
\ No newline at end of file
diff --git a/src/core/counterexample.cpp b/src/core/counterexample.cpp
new file mode 100644
index 0000000..b4cb1e2
--- /dev/null
+++ b/src/core/counterexample.cpp
@@ -0,0 +1,63 @@
+#include
+
+#include "counterexample.h"
+#include "storm/environment/Environment.h"
+#include "storm-counterexamples/api/counterexamples.h"
+
+
+using namespace storm::counterexamples;
+
+// Define python bindings
+void define_counterexamples(py::module& m) {
+
+
+ py::class_>(m, "FlatSet", "Container to pass to program")
+ .def(py::init<>())
+ .def(py::init>(), "other"_a)
+ .def("insert", [](boost::container::flat_set& flatset, uint64_t value) {flatset.insert(value);})
+ .def("is_subset_of", [](boost::container::flat_set const& left, boost::container::flat_set const& right) {return std::includes(right.begin(), right.end(), left.begin(), left.end()); })
+ .def("insert_set", [](boost::container::flat_set& left, boost::container::flat_set const& additional) { for(auto const& i : additional) {left.insert(i);}})
+ .def("__str__", [](boost::container::flat_set const& set) { std::stringstream str; str << "["; for(auto const& i : set) { str << i << ", ";} str << "]"; return str.str(); })
+ .def("__len__", [](boost::container::flat_set const& set) { return set.size();})
+ .def("__iter__", [](boost::container::flat_set &v) {
+ return py::make_iterator(v.begin(), v.end());
+ }, py::keep_alive<0, 1>()) /* Keep vector alive while iterator is used */
+ ;
+
+ using CexGeneratorStats = SMTMinimalLabelSetGenerator::GeneratorStats;
+
+ py::class_(m, "SMTCounterExampleGeneratorStats", "Stats for highlevel counterexample generation")
+ .def(py::init<>())
+ .def_readonly("analysis_time", &CexGeneratorStats::analysisTime)
+ .def_readonly("setup_time", &CexGeneratorStats::setupTime)
+ .def_readonly("model_checking_time", &CexGeneratorStats::modelCheckingTime)
+ .def_readonly("solver_time", &CexGeneratorStats::solverTime)
+ .def_readonly("cut_time", &CexGeneratorStats::cutTime)
+ .def_readonly("iterations", &CexGeneratorStats::iterations);
+
+ using CexGeneratorOptions = SMTMinimalLabelSetGenerator::Options;
+ py::class_(m, "SMTCounterExampleGeneratorOptions", "Options for highlevel counterexample generation")
+ .def(py::init<>())
+ .def_readwrite("check_threshold_feasible", &CexGeneratorOptions::checkThresholdFeasible)
+ .def_readwrite("encode_reachability", &CexGeneratorOptions::encodeReachability)
+ .def_readwrite("silent", &CexGeneratorOptions::silent)
+ .def_readwrite("add_backward_implication_cuts", &CexGeneratorOptions::addBackwardImplicationCuts)
+ .def_readwrite("use_dynamic_constraints", &CexGeneratorOptions::useDynamicConstraints)
+ .def_readwrite("maximum_counterexamples", &CexGeneratorOptions::maximumCounterexamples)
+ .def_readwrite("continue_after_first_counterexample", &CexGeneratorOptions::continueAfterFirstCounterexampleUntil)
+ .def_readwrite("maximum_iterations_after_counterexample", &CexGeneratorOptions::maximumExtraIterations)
+ ;
+ py::class_>(m, "SMTCounterExampleGenerator", "Highlevel Counterexample Generator with SMT as backend").
+ def_static("precompute", &SMTMinimalLabelSetGenerator::precompute, "Precompute input for counterexample generation", py::arg("env"), py::arg("symbolic_model"), py::arg("model"), py::arg("formula")).
+ def_static("build", &SMTMinimalLabelSetGenerator::computeCounterexampleLabelSet, "Compute counterexample", py::arg("env"), py::arg("stats"), py::arg("symbolic_model"), py::arg("model"), py::arg("cex_input"), py::arg("dontcare"), py::arg("options"))
+
+
+ ;
+
+ using CexInput = SMTMinimalLabelSetGenerator::CexInput;
+ py::class_(m, "SMTCounterExampleInput", "Precomputed input for counterexample generation")
+ .def("add_reward_and_threshold", &CexInput::addRewardThresholdCombination, "add another reward structure and threshold", py::arg("reward_name"), py::arg("threshold"));
+
+
+
+}
diff --git a/src/core/counterexample.h b/src/core/counterexample.h
new file mode 100644
index 0000000..aa82a95
--- /dev/null
+++ b/src/core/counterexample.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "common.h"
+
+void define_counterexamples(py::module& m);
diff --git a/src/core/environment.cpp b/src/core/environment.cpp
index 23ad447..f9c1596 100644
--- a/src/core/environment.cpp
+++ b/src/core/environment.cpp
@@ -1,10 +1,52 @@
#include "environment.h"
#include "src/helpers.h"
#include "storm/environment/Environment.h"
+#include "storm/environment/solver/SolverEnvironment.h"
+#include "storm/environment/solver/EigenSolverEnvironment.h"
+#include "storm/environment/solver/GmmxxSolverEnvironment.h"
+#include "storm/environment/solver/GameSolverEnvironment.h"
+#include "storm/environment/solver/NativeSolverEnvironment.h"
+#include "storm/environment/solver/TopologicalSolverEnvironment.h"
+#include "storm/environment/solver/MultiplierEnvironment.h"
+#include "storm/environment/solver/MinMaxSolverEnvironment.h"
void define_environment(py::module& m) {
+ py::enum_(m, "EquationSolverType", "Solver type for equation systems")
+ .value("native", storm::solver::EquationSolverType::Native)
+ .value("eigen", storm::solver::EquationSolverType::Eigen)
+ .value("elimination", storm::solver::EquationSolverType::Elimination)
+ .value("gmmxx", storm::solver::EquationSolverType::Gmmxx)
+ .value("topological", storm::solver::EquationSolverType::Topological)
+ ;
+
+ py::enum_(m, "MinMaxMethod", "Method for min-max equation systems")
+ .value("policy_iteration", storm::solver::MinMaxMethod::PolicyIteration)
+ .value("value_iteration", storm::solver::MinMaxMethod::ValueIteration)
+ .value("linear_programming", storm::solver::MinMaxMethod::LinearProgramming)
+ .value("topological", storm::solver::MinMaxMethod::Topological)
+ .value("rational_search", storm::solver::MinMaxMethod::RationalSearch)
+ .value("interval_iteration", storm::solver::MinMaxMethod::IntervalIteration)
+ .value("sound_value_iteration", storm::solver::MinMaxMethod::SoundValueIteration)
+ .value("topological_cuda", storm::solver::MinMaxMethod::TopologicalCuda)
+ ;
+
+
py::class_(m, "Environment", "Environment")
.def(py::init<>(), "Construct default environment")
+ .def_property_readonly("solver_environment", [](storm::Environment& env) -> auto& {return env.solver();}, "solver part of environment")
;
+
+ py::class_(m, "SolverEnvironment", "Environment for solvers")
+ .def("set_force_sound", &storm::SolverEnvironment::setForceSoundness, "force soundness", py::arg("new_value") = true)
+ .def("set_linear_equation_solver_type", &storm::SolverEnvironment::setLinearEquationSolverType, "set solver type to use", py::arg("new_value"), py::arg("set_from_default") = false)
+ .def_property_readonly("minmax_solver_environment", [](storm::SolverEnvironment& senv) -> auto& { return senv.minMax(); })
+ ;
+
+ py::class_(m, "MinMaxSolverEnvironment", "Environment for Min-Max-Solvers")
+ .def_property("method", &storm::MinMaxSolverEnvironment::getMethod, [](storm::MinMaxSolverEnvironment& mmenv, storm::solver::MinMaxMethod const& m) { mmenv.setMethod(m, false); } )
+ .def_property("precision", &storm::MinMaxSolverEnvironment::getPrecision, &storm::MinMaxSolverEnvironment::setPrecision);
+
+
+
}
diff --git a/src/core/input.cpp b/src/core/input.cpp
index 1ce6a5a..3debf33 100644
--- a/src/core/input.cpp
+++ b/src/core/input.cpp
@@ -1,9 +1,12 @@
#include "input.h"
#include "src/helpers.h"
+#include "storm-parsers/api/storm-parsers.h"
+#include "storm/storage/jani/Property.h"
void define_property(py::module& m) {
py::class_(m, "Property", "Property")
- .def(py::init const&, std::string const&>(), "Construct property from formula", py::arg("name"), py::arg("formula"), py::arg("comment") = "")
+ .def(py::init const&, std::set const&, std::string const&>(), "Construct property from formula", py::arg("name"), py::arg("formula"), py::arg("undefined_constants") = std::set(), py::arg("comment") = "")
+ .def(py::init())
.def_property_readonly("name", &storm::jani::Property::getName, "Obtain the name of the property")
.def_property_readonly("raw_formula", &storm::jani::Property::getRawFormula, "Obtain the formula directly")
.def("__str__", &streamToString)
@@ -15,9 +18,11 @@ void define_property(py::module& m) {
void define_input(py::module& m) {
// Parse Prism program
- m.def("parse_prism_program", &storm::api::parseProgram, "Parse Prism program", py::arg("path"), py::arg("prism_compat") = false);
+ m.def("parse_prism_program", &storm::api::parseProgram, "Parse Prism program", py::arg("path"), py::arg("prism_compat") = false, py::arg("simplify") = true);
// Parse Jani model
- m.def("parse_jani_model", &storm::api::parseJaniModel, "Parse Jani model", py::arg("path"));
+ m.def("parse_jani_model", [](std::string const& path){
+ return storm::api::parseJaniModel(path);
+ }, "Parse Jani model", py::arg("path"));
// JaniType
py::enum_(m, "JaniModelType", "Type of the Jani model")
@@ -36,14 +41,6 @@ void define_input(py::module& m) {
.value("UNDEFINED", storm::jani::ModelType::UNDEFINED)
;
- // Jani Model
- py::class_(m, "JaniModel", "Jani Model")
- .def_property_readonly("name", &storm::jani::Model::getName, "Name of the jani model")
- .def_property_readonly("model_type", &storm::jani::Model::getModelType, "Model type")
- .def_property_readonly("has_undefined_constants", &storm::jani::Model::hasUndefinedConstants, "Flag if Jani model has undefined constants")
- .def_property_readonly("undefined_constants_are_graph_preserving", &storm::jani::Model::undefinedConstantsAreGraphPreserving, "Flag if the undefined constants do not change the graph structure")
- ;
-
// SymbolicModelDescription
py::class_(m, "SymbolicModelDescription", "Symbolic description of model")
.def(py::init(), "Construct from Prism program", py::arg("prism_program"))
diff --git a/src/core/modelchecking.cpp b/src/core/modelchecking.cpp
index 555a0fd..af25a26 100644
--- a/src/core/modelchecking.cpp
+++ b/src/core/modelchecking.cpp
@@ -1,15 +1,29 @@
#include "modelchecking.h"
-#include "storm/api/storm.h"
-#include "storm/environment/Environment.h"
+#include "result.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+#include "storm/modelchecker/results/CheckResult.h"
#include "storm/modelchecker/csl/helper/SparseCtmcCslHelper.h"
+#include "storm/environment/Environment.h"
template
using CheckTask = storm::modelchecker::CheckTask;
-// Thin wrapper for model checking
+// Thin wrapper for model checking using sparse engine
template
-std::shared_ptr modelCheckingSparseEngine(std::shared_ptr> model, CheckTask const& task) {
- return storm::api::verifyWithSparseEngine(model, task);
+std::shared_ptr modelCheckingSparseEngine(std::shared_ptr> model, CheckTask const& task, storm::Environment const& env) {
+ return storm::api::verifyWithSparseEngine(env, model, task);
+}
+
+// Thin wrapper for model checking using dd engine
+template
+std::shared_ptr modelCheckingDdEngine(std::shared_ptr> model, CheckTask const& task, storm::Environment const& env) {
+ return storm::api::verifyWithDdEngine(env, model, task);
+}
+
+// Thin wrapper for model checking using hybrid engine
+template
+std::shared_ptr modelCheckingHybridEngine(std::shared_ptr> model, CheckTask const& task, storm::Environment const& env) {
+ return storm::api::verifyWithHybridEngine(env, model, task);
}
std::vector computeAllUntilProbabilities(storm::Environment const& env, CheckTask const& task, std::shared_ptr> ctmc, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates) {
@@ -54,8 +68,12 @@ void define_modelchecking(py::module& m) {
;
// Model checking
- m.def("_model_checking_sparse_engine", &modelCheckingSparseEngine, "Perform model checking", py::arg("model"), py::arg("task"));
- m.def("_parametric_model_checking_sparse_engine", &modelCheckingSparseEngine, "Perform parametric model checking", py::arg("model"), py::arg("task"));
+ m.def("_model_checking_sparse_engine", &modelCheckingSparseEngine, "Perform model checking using the sparse engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
+ m.def("_parametric_model_checking_sparse_engine", &modelCheckingSparseEngine, "Perform parametric model checking using the sparse engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
+ m.def("_model_checking_dd_engine", &modelCheckingDdEngine, "Perform model checking using the dd engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
+ m.def("_parametric_model_checking_dd_engine", &modelCheckingDdEngine, "Perform parametric model checking using the dd engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
+ m.def("_model_checking_hybrid_engine", &modelCheckingHybridEngine, "Perform model checking using the hybrid engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
+ m.def("_parametric_model_checking_hybrid_engine", &modelCheckingHybridEngine, "Perform parametric model checking using the hybrid engine", py::arg("model"), py::arg("task"), py::arg("environment") = storm::Environment());
m.def("compute_all_until_probabilities", &computeAllUntilProbabilities, "Compute forward until probabilities");
m.def("compute_transient_probabilities", &computeTransientProbabilities, "Compute transient probabilities");
m.def("_compute_prob01states_double", &computeProb01, "Compute prob-0-1 states", py::arg("model"), py::arg("phi_states"), py::arg("psi_states"));
diff --git a/src/core/result.cpp b/src/core/result.cpp
index b564e3c..9827950 100644
--- a/src/core/result.cpp
+++ b/src/core/result.cpp
@@ -1,12 +1,24 @@
#include "result.h"
#include "storm/analysis/GraphConditions.h"
+#include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
+#include "storm/modelchecker/results/SymbolicQuantitativeCheckResult.h"
+#include "storm/modelchecker/results/HybridQuantitativeCheckResult.h"
+#include "storm/modelchecker/results/ExplicitQuantitativeCheckResult.h"
+#include "storm/modelchecker/results/ExplicitQualitativeCheckResult.h"
+
+#include "storm/models/symbolic/StandardRewardModel.h"
-// Thin wrapper
template
-std::vector getValues(storm::modelchecker::ExplicitQuantitativeCheckResult const& result) {
- return result.getValueVector();
+std::shared_ptr createFilterInitialStatesSparse(std::shared_ptr> model) {
+ return std::make_unique(model->getInitialStates());
+}
+
+template
+std::shared_ptr createFilterInitialStatesSymbolic(std::shared_ptr> model) {
+ return std::make_unique>(model->getReachableStates(), model->getInitialStates());
}
+
// Define python bindings
void define_result(py::module& m) {
@@ -34,6 +46,7 @@ void define_result(py::module& m) {
.def("as_explicit_parametric_quantitative", [](storm::modelchecker::CheckResult const& result) {
return result.asExplicitQuantitativeCheckResult();
}, "Convert into explicit quantitative result")
+ .def("filter", &storm::modelchecker::CheckResult::filter, py::arg("filter"), "Filter the result")
.def("__str__", [](storm::modelchecker::CheckResult const& result) {
std::stringstream stream;
result.writeToStream(stream);
@@ -44,27 +57,49 @@ void define_result(py::module& m) {
// QualitativeCheckResult
py::class_> qualitativeCheckResult(m, "_QualitativeCheckResult", "Abstract class for qualitative model checking results", checkResult);
py::class_>(m, "ExplicitQualitativeCheckResult", "Explicit qualitative model checking result", qualitativeCheckResult)
- .def("at", [](storm::modelchecker::ExplicitQualitativeCheckResult const& result, storm::storage::sparse::state_type state) {
+ .def("at", [](storm::modelchecker::ExplicitQualitativeCheckResult const& result, storm::storage::sparse::state_type state) {
return result[state];
}, py::arg("state"), "Get result for given state")
.def("get_truth_values", &storm::modelchecker::ExplicitQualitativeCheckResult::getTruthValuesVector, "Get BitVector representing the truth values")
;
+ py::class_, std::shared_ptr>>(m, "SymbolicQualitativeCheckResult", "Symbolic qualitative model checking result", qualitativeCheckResult)
+ ;
// QuantitativeCheckResult
py::class_, std::shared_ptr>> quantitativeCheckResult(m, "_QuantitativeCheckResult", "Abstract class for quantitative model checking results", checkResult);
+ quantitativeCheckResult.def_property_readonly("min", &storm::modelchecker::QuantitativeCheckResult::getMin, "Minimal value")
+ .def_property_readonly("max", &storm::modelchecker::QuantitativeCheckResult::getMax, "Maximal value")
+ ;
+
py::class_, std::shared_ptr>>(m, "ExplicitQuantitativeCheckResult", "Explicit quantitative model checking result", quantitativeCheckResult)
.def("at", [](storm::modelchecker::ExplicitQuantitativeCheckResult const& result, storm::storage::sparse::state_type state) {
return result[state];
}, py::arg("state"), "Get result for given state")
- .def("get_values", &getValues, "Get model checking result values for all states")
+ .def("get_values", [](storm::modelchecker::ExplicitQuantitativeCheckResult const& res) {return res.getValueVector();}, "Get model checking result values for all states")
.def_property_readonly("scheduler", [](storm::modelchecker::ExplicitQuantitativeCheckResult const& res) {return res.getScheduler();}, "get scheduler")
;
+ py::class_, std::shared_ptr>>(m, "SymbolicQuantitativeCheckResult", "Symbolic quantitative model checking result", quantitativeCheckResult)
+ ;
+ py::class_, std::shared_ptr>>(m, "HybridQuantitativeCheckResult", "Hybrid quantitative model checking result", quantitativeCheckResult)
+ .def("get_values", &storm::modelchecker::HybridQuantitativeCheckResult::getExplicitValueVector, "Get model checking result values for all states")
+ ;
+
py::class_, std::shared_ptr>> parametricQuantitativeCheckResult(m, "_ParametricQuantitativeCheckResult", "Abstract class for parametric quantitative model checking results", checkResult);
py::class_, std::shared_ptr>>(m, "ExplicitParametricQuantitativeCheckResult", "Explicit parametric quantitative model checking result", parametricQuantitativeCheckResult)
.def("at", [](storm::modelchecker::ExplicitQuantitativeCheckResult const& result, storm::storage::sparse::state_type state) {
return result[state];
}, py::arg("state"), "Get result for given state")
- .def("get_values", &getValues, "Get model checking result values for all states")
+ .def("get_values", [](storm::modelchecker::ExplicitQuantitativeCheckResult const& res) { return res.getValueVector();}, "Get model checking result values for all states")
+ ;
+ py::class_, std::shared_ptr>>(m, "SymbolicParametricQuantitativeCheckResult", "Symbolic parametric quantitative model checking result", quantitativeCheckResult)
+ ;
+ py::class_, std::shared_ptr>>(m, "HybridParametricQuantitativeCheckResult", "Symbolic parametric hybrid quantitative model checking result", quantitativeCheckResult)
+ .def("get_values", &storm::modelchecker::HybridQuantitativeCheckResult::getExplicitValueVector, "Get model checking result values for all states")
;
+
+
+ m.def("create_filter_initial_states_sparse", &createFilterInitialStatesSparse, "Create a filter for the initial states on a sparse model", py::arg("model"));
+ m.def("create_filter_initial_states_symbolic", &createFilterInitialStatesSymbolic, "Create a filter for the initial states on a symbolic model", py::arg("model"));
+
}
diff --git a/src/core/transformation.cpp b/src/core/transformation.cpp
new file mode 100644
index 0000000..a852563
--- /dev/null
+++ b/src/core/transformation.cpp
@@ -0,0 +1,43 @@
+#include "transformation.h"
+#include "storm/api/transformation.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+#include "storm/transformer/SubsystemBuilder.h"
+
+// Thin wrappers.
+template
+storm::transformer::SubsystemBuilderReturnType constructSubsystem(storm::models::sparse::Model const& originalModel, storm::storage::BitVector const& subsystemStates, storm::storage::BitVector const& subsystemActions, bool keepUnreachableStates, storm::transformer::SubsystemBuilderOptions options) {
+ return storm::transformer::buildSubsystem(originalModel, subsystemStates, subsystemActions, keepUnreachableStates, options);
+}
+
+template
+std::pair>, std::vector>> transformContinuousToDiscreteTimeSparseModel(std::shared_ptr> const& model, std::vector> const& formulas) {
+ return storm::api::transformContinuousToDiscreteTimeSparseModel(model, formulas);
+}
+
+void define_transformation(py::module& m) {
+ // Transform model
+ m.def("_transform_to_sparse_model", &storm::api::transformSymbolicToSparseModel, "Transform symbolic model into sparse model", py::arg("model"), py::arg("formulae") = std::vector>());
+ m.def("_transform_to_sparse_parametric_model", &storm::api::transformSymbolicToSparseModel, "Transform symbolic parametric model into sparse parametric model", py::arg("model"), py::arg("formulae") = std::vector>());
+
+ m.def("_transform_to_discrete_time_model", &transformContinuousToDiscreteTimeSparseModel, "Transform continuous time model to discrete time model", py::arg("model"), py::arg("formulae") = std::vector>());
+ m.def("_transform_to_discrete_time_parametric_model", &transformContinuousToDiscreteTimeSparseModel, "Transform continuous time model to discrete time model", py::arg("model"), py::arg("formulae") = std::vector>());
+
+ py::class_>(m, "SubsystemBuilderReturnTypeDouble", "Result of the construction of a subsystem")
+ .def_readonly("model", &storm::transformer::SubsystemBuilderReturnType::model, "the submodel")
+ .def_readonly("new_to_old_state_mapping", &storm::transformer::SubsystemBuilderReturnType::newToOldStateIndexMapping, "for each state in result, the state index in the original model")
+ .def_readonly("new_to_old_action_mapping", &storm::transformer::SubsystemBuilderReturnType::newToOldActionIndexMapping, "for each action in result, the action index in the original model")
+ .def_readonly("kept_actions", &storm::transformer::SubsystemBuilderReturnType::keptActions, "Actions of the subsystem available in the original system")
+ ;
+
+ py::class_(m, "SubsystemBuilderOptions", "Options for constructing the subsystem")
+ .def(py::init<>())
+ .def_readwrite("check_transitions_outside", &storm::transformer::SubsystemBuilderOptions::checkTransitionsOutside)
+ .def_readwrite("build_state_mapping", &storm::transformer::SubsystemBuilderOptions::buildStateMapping)
+ .def_readwrite("build_action_mapping", &storm::transformer::SubsystemBuilderOptions::buildActionMapping)
+ .def_readwrite("build_kept_actions", &storm::transformer::SubsystemBuilderOptions::buildKeptActions)
+ ;
+
+
+ m.def("_construct_subsystem_double", &constructSubsystem, "build a subsystem of a sparse model");
+
+}
\ No newline at end of file
diff --git a/src/core/transformation.h b/src/core/transformation.h
new file mode 100644
index 0000000..d098346
--- /dev/null
+++ b/src/core/transformation.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "common.h"
+
+void define_transformation(py::module& m);
diff --git a/src/dft/analysis.cpp b/src/dft/analysis.cpp
new file mode 100644
index 0000000..fd15ea7
--- /dev/null
+++ b/src/dft/analysis.cpp
@@ -0,0 +1,27 @@
+#include "analysis.h"
+
+#include "storm-dft/parser/DFTJsonParser.h"
+#include "storm-dft/builder/ExplicitDFTModelBuilder.h"
+#include "storm-dft/storage/dft/DFTIsomorphism.h"
+
+
+// Thin wrapper for DFT analysis
+template
+//std::vector analyzeDFT(storm::storage::DFT const& dft, std::vector> const& properties, bool symred, bool allowModularisation, std::set const& relevantEvents = {}, double approximationError = 0.0, storm::builder::ApproximationHeuristic approximationHeuristic = storm::builder::ApproximationHeuristic::DEPTH) {
+std::vector analyzeDFT(storm::storage::DFT const& dft, std::vector> const& properties, bool symred, bool allowModularisation, std::set const& relevantEvents, bool allowDCForRelevant) {
+ typename storm::modelchecker::DFTModelChecker::dft_results dftResults = storm::api::analyzeDFT(dft, properties, symred, allowModularisation, relevantEvents, allowDCForRelevant, 0.0, storm::builder::ApproximationHeuristic::DEPTH, false);
+
+ std::vector results;
+ for (auto result : dftResults) {
+ results.push_back(boost::get(result));
+ }
+ return results;
+}
+
+
+// Define python bindings
+void define_analysis(py::module& m) {
+
+ m.def("analyze_dft", &analyzeDFT, "Analyze the DFT", py::arg("dft"), py::arg("properties"), py::arg("symred")=true, py::arg("allow_modularisation")=false, py::arg("relevant_events")=std::set(), py::arg("dc_for_relevant")=true);
+
+}
diff --git a/src/dft/analysis.h b/src/dft/analysis.h
new file mode 100644
index 0000000..102b3b9
--- /dev/null
+++ b/src/dft/analysis.h
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "common.h"
+
+void define_analysis(py::module& m);
diff --git a/src/dft/common.h b/src/dft/common.h
index c9b9b0c..2de6bc0 100644
--- a/src/dft/common.h
+++ b/src/dft/common.h
@@ -1 +1,2 @@
#include "src/common.h"
+#include "storm-dft/api/storm-dft.h"
\ No newline at end of file
diff --git a/src/dft/dft.cpp b/src/dft/dft.cpp
index ccd2465..7af27d4 100644
--- a/src/dft/dft.cpp
+++ b/src/dft/dft.cpp
@@ -1,34 +1,35 @@
#include "dft.h"
-#include "storm-dft/parser/DFTJsonParser.h"
-#include "storm-dft/builder/ExplicitDFTModelBuilder.h"
+#include "src/helpers.h"
+#include "storm-dft/storage/dft/DFT.h"
#include "storm/settings/SettingsManager.h"
#include "storm-dft/settings/modules/FaultTreeSettings.h"
-#include "storm-dft/storage/dft/DFTIsomorphism.h"
-
-// Thin wrapper for model building using one formula as argument
-template
-std::shared_ptr> buildModelFromJsonDft(std::string const& jsonDft, bool symred) {
- // Build DFT
- storm::parser::DFTJsonParser parser;
- storm::storage::DFT dft = parser.parseJson(jsonDft);
- // Build model
- std::map>> emptySymmetry;
- storm::storage::DFTIndependentSymmetries symmetries(emptySymmetry);
- if (symred) {
- auto colouring = dft.colourDFT();
- symmetries = dft.findSymmetries(colouring);
- }
- storm::builder::ExplicitDFTModelBuilder builder(dft, symmetries, true);
- typename storm::builder::ExplicitDFTModelBuilder::LabelOptions labeloptions({}, true);
- builder.buildModel(labeloptions, 0, 0.0);
- return builder.getModel();
-}
+
+
+template using DFT = storm::storage::DFT;
+
void define_dft(py::module& m) {
+
m.def("_set_up", []() {
storm::settings::addModule();
}, "Initialize Storm-dft");
- // Build model
- m.def("build_sparse_model_from_json_dft", &buildModelFromJsonDft, "Build the model", py::arg("jsonDft"), py::arg("symred") = false);
- m.def("build_sparse_parametric_model_from_json_dft", &buildModelFromJsonDft, "Build the parametric model", py::arg("jsonDft"), py::arg("symred") = false);
+
+
+ // DFT class
+ py::class_, std::shared_ptr>>(m, "DFT", "DFT")
+ .def("nr_elements", &DFT::nrElements, "Total number of elements")
+ .def("nr_be", &DFT::nrBasicElements, "Number of basic elements")
+ .def("nr_dynamic", &DFT::nrDynamicElements, "Number of dynamic elements")
+ .def("can_have_nondeterminism", &DFT::canHaveNondeterminism, "Whether the model can contain non-deterministic choices")
+ .def("__str__", &DFT::getInfoString)
+ ;
+
+ py::class_, std::shared_ptr>>(m, "ParametricDFT", "Parametric DFT")
+ .def("nr_elements", &DFT::nrElements, "Total number of elements")
+ .def("nr_be", &DFT::nrBasicElements, "Number of basic elements")
+ .def("nr_dynamic", &DFT::nrDynamicElements, "Number of dynamic elements")
+ .def("can_have_nondeterminism", &DFT::canHaveNondeterminism, "Whether the model can contain non-deterministic choices")
+ .def("__str__", &DFT::getInfoString)
+ ;
+
}
diff --git a/src/dft/dft.h b/src/dft/dft.h
index 2dc129a..147b75a 100644
--- a/src/dft/dft.h
+++ b/src/dft/dft.h
@@ -1,8 +1,5 @@
-#ifndef PYTHON_DFT_DFT_H_
-#define PYTHON_DFT_DFT_H_
+#pragma once
#include "common.h"
void define_dft(py::module& m);
-
-#endif /* PYTHON_DFT_DFT_H_ */
diff --git a/src/dft/io.cpp b/src/dft/io.cpp
new file mode 100644
index 0000000..38526d9
--- /dev/null
+++ b/src/dft/io.cpp
@@ -0,0 +1,20 @@
+#include "io.h"
+#include "src/helpers.h"
+
+
+// Define python bindings
+void define_input(py::module& m) {
+
+ // Load DFT input
+ m.def("load_dft_galileo_file", &storm::api::loadDFTGalileoFile, "Load DFT from Galileo file", py::arg("path"));
+ // Parse Jani model
+ m.def("load_dft_json_file", &storm::api::loadDFTJsonFile