Browse Source

update to version 1.4.0 of sylvan

main
dehnert 8 years ago
parent
commit
d0cf2ef57b
  1. 6
      resources/3rdparty/sylvan/.travis.yml
  2. 48
      resources/3rdparty/sylvan/CHANGELOG.md
  3. 13
      resources/3rdparty/sylvan/CMakeLists.txt
  4. 7
      resources/3rdparty/sylvan/cmake/FindGMP.cmake
  5. 268
      resources/3rdparty/sylvan/docs/index.rst
  6. 328
      resources/3rdparty/sylvan/examples/ldd2bdd.c
  7. 834
      resources/3rdparty/sylvan/examples/lddmc.c
  8. 558
      resources/3rdparty/sylvan/examples/mc.c
  9. BIN
      resources/3rdparty/sylvan/models/anderson.4.bdd
  10. BIN
      resources/3rdparty/sylvan/models/anderson.4.ldd
  11. BIN
      resources/3rdparty/sylvan/models/anderson.6.ldd
  12. BIN
      resources/3rdparty/sylvan/models/anderson.8.ldd
  13. BIN
      resources/3rdparty/sylvan/models/at.5.8-rgs.bdd
  14. BIN
      resources/3rdparty/sylvan/models/at.6.8-rgs.bdd
  15. BIN
      resources/3rdparty/sylvan/models/at.7.8-rgs.bdd
  16. BIN
      resources/3rdparty/sylvan/models/bakery.4.bdd
  17. BIN
      resources/3rdparty/sylvan/models/bakery.4.ldd
  18. BIN
      resources/3rdparty/sylvan/models/bakery.5.ldd
  19. BIN
      resources/3rdparty/sylvan/models/bakery.6.ldd
  20. BIN
      resources/3rdparty/sylvan/models/bakery.7.ldd
  21. BIN
      resources/3rdparty/sylvan/models/blocks.2.ldd
  22. BIN
      resources/3rdparty/sylvan/models/blocks.3.ldd
  23. BIN
      resources/3rdparty/sylvan/models/blocks.4.ldd
  24. BIN
      resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd
  25. BIN
      resources/3rdparty/sylvan/models/collision.4.bdd
  26. BIN
      resources/3rdparty/sylvan/models/collision.4.ldd
  27. BIN
      resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd
  28. BIN
      resources/3rdparty/sylvan/models/collision.5.bdd
  29. BIN
      resources/3rdparty/sylvan/models/collision.5.ldd
  30. BIN
      resources/3rdparty/sylvan/models/collision.6.bdd
  31. BIN
      resources/3rdparty/sylvan/models/collision.6.ldd
  32. BIN
      resources/3rdparty/sylvan/models/lifts.6.bdd
  33. BIN
      resources/3rdparty/sylvan/models/lifts.6.ldd
  34. BIN
      resources/3rdparty/sylvan/models/lifts.7.bdd
  35. BIN
      resources/3rdparty/sylvan/models/lifts.7.ldd
  36. BIN
      resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd
  37. BIN
      resources/3rdparty/sylvan/models/schedule_world.2.bdd
  38. BIN
      resources/3rdparty/sylvan/models/schedule_world.2.ldd
  39. BIN
      resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd
  40. BIN
      resources/3rdparty/sylvan/models/schedule_world.3.bdd
  41. BIN
      resources/3rdparty/sylvan/models/schedule_world.3.ldd
  42. 8
      resources/3rdparty/sylvan/src/CMakeLists.txt
  43. 2
      resources/3rdparty/sylvan/src/avl.h
  44. 406
      resources/3rdparty/sylvan/src/lace.c
  45. 424
      resources/3rdparty/sylvan/src/lace.h
  46. 37
      resources/3rdparty/sylvan/src/sylvan.h
  47. 22
      resources/3rdparty/sylvan/src/sylvan_bdd.c
  48. 41
      resources/3rdparty/sylvan/src/sylvan_bdd.h
  49. 8
      resources/3rdparty/sylvan/src/sylvan_bdd_storm.h
  50. 13
      resources/3rdparty/sylvan/src/sylvan_cache.c
  51. 14
      resources/3rdparty/sylvan/src/sylvan_cache.h
  52. 2
      resources/3rdparty/sylvan/src/sylvan_common.c
  53. 4
      resources/3rdparty/sylvan/src/sylvan_common.h
  54. 7
      resources/3rdparty/sylvan/src/sylvan_gmp.c
  55. 10
      resources/3rdparty/sylvan/src/sylvan_gmp.h
  56. 134
      resources/3rdparty/sylvan/src/sylvan_int.h
  57. 248
      resources/3rdparty/sylvan/src/sylvan_ldd.c
  58. 140
      resources/3rdparty/sylvan/src/sylvan_ldd.h
  59. 27
      resources/3rdparty/sylvan/src/sylvan_ldd_int.h
  60. 7
      resources/3rdparty/sylvan/src/sylvan_mt.c
  61. 6
      resources/3rdparty/sylvan/src/sylvan_mt.h
  62. 394
      resources/3rdparty/sylvan/src/sylvan_mtbdd.c
  63. 425
      resources/3rdparty/sylvan/src/sylvan_mtbdd.h
  64. 49
      resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h
  65. 10
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
  66. 8
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
  67. 1
      resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp
  68. 11
      resources/3rdparty/sylvan/src/sylvan_refs.c
  69. 5
      resources/3rdparty/sylvan/src/sylvan_refs.h
  70. 8
      resources/3rdparty/sylvan/src/sylvan_sl.c
  71. 2
      resources/3rdparty/sylvan/src/sylvan_sl.h
  72. 17
      resources/3rdparty/sylvan/src/sylvan_stats.c
  73. 10
      resources/3rdparty/sylvan/src/sylvan_stats.h
  74. 201
      resources/3rdparty/sylvan/src/sylvan_table.c
  75. 31
      resources/3rdparty/sylvan/src/sylvan_table.h
  76. 3
      resources/3rdparty/sylvan/src/sylvan_tls.h
  77. 2
      src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp
  78. 1
      src/storm/utility/sylvan.h

6
resources/3rdparty/sylvan/.travis.yml

@ -58,12 +58,12 @@ install:
script:
- ${CC} --version
- ${CXX} --version
- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} -DSYLVAN_BUILD_DOCS=${SYLVAN_BUILD_DOCS}
- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} -DSYLVAN_BUILD_DOCS=${SYLVAN_BUILD_DOCS} -DSYLVAN_BUILD_EXAMPLES=ON
- make -j 2
- make test
- examples/simple
- examples/mc models/schedule_world.2.8-rgs.bdd -w 2 | tee /dev/fd/2 | grep -q "1,570,340"
- examples/lddmc models/blocks.2.ldd -w 2 | tee /dev/fd/2 | grep -q "7057 states"
- examples/mc models/schedule_world.2.bdd -w 2 | tee /dev/fd/2 | grep -q "1,570,340"
- examples/lddmc models/blocks.2.ldd -w 2 | tee /dev/fd/2 | grep -q "7,057 states"
notifications:
email: false

48
resources/3rdparty/sylvan/CHANGELOG.md

@ -2,12 +2,60 @@
All notable changes to Sylvan will be documented in this file.
## [Unreleased]
### Changed
- We now implement twisted tabulation as the hash function for the nodes table. The old hash function is still available and the default behavior can be changed in `sylvan_table.h`.
## [1.4.0] - 2017-07-12
### Added
- Function `mtbdd_cmpl` that computes the complement for MTBDDs. (0 becomes 1, non-0 becomes 0)
### Changed
- Changed file formats used by the examples to match the changes in LTSmin.
- Function `mtbdd_satcount` now does not count assignments leading to 0. Perhaps in the future we make this configurable. (Like in CuDD.)
- Slightly improved C++ support by wrapping header files in the namespace sylvan.
### Fixed
- There was a bug where Lace tasks are overwritten during SYNC, which causes problems during garbage collection. Lace reusing the bucket during SYNC is by design and is difficult to change. We fix the issue by checking during garbage collection if the stored task is still the same function, which in the worst case marks more nodes than needed.
- Band-aid patch for hashing; very similar nodes were hashing to similar positions and strides, causing early garbage collections and full tables. The patch works for now, but we need a more robust solution.
### Removed
- Removed support for HWLOC (pinning on NUMA machines). Planning to bring this back as an option, but in its current form it prevents multiple Sylvan programs from running simultaneously on the same machine.
## [1.3.3] - 2017-06-03
### Changed
- Changed file format for .bdd files in the MC example.
### Fixed
- A major bug in `lddmc_match_sat_par` has been fixed.
- A bug in the saturation algorithm in the model checking example has been fixed.
- A major bug in the hash table rehashing implementation has been fixed.
## [1.3.2] - 2017-05-23
### Added
- Now implements `lddmc_protect` and `lddmc_unprotect` for external pointer references.
- Now implements `lddmc_refs_pushptr` and `lddmc_refs_popptr` for internal pointer references
### Changed
- New version of Lace has slightly different API for manually created threads.
## [1.3.1] - 2017-05-22
### Fixed
- A bug in `mtbdd_refs_ptrs_up` caused a segfault. This has been fixed.
## [1.3.0] - 2017-05-16
### Added
- The embedded work-stealing framework now explicitly checks for stack overflows and aborts with an appropriate error message written to stderr.
- New functions `sylvan_project` and `sylvan_and_project` for BDDs, a dual of existential quantification, where instead of the variables to remove, the given set of variables are the variables to keep.
- New functions `mtbdd_refs_pushptr` and `mtbdd_refs_popptr` allow thread-specific referencing of pointers.
### Changed
- Rewritten initialization of Sylvan. Before the call to `sylvan_init_package`, table sizes must be initialized either using `sylvan_set_sizes` or with the new function `sylvan_set_limits`. This new function allows the user to set a maximum number of bytes allocated for the nodes table and for the operation cache.
- Rewritten MTBDD referencing system.
- Rewritten MTBDD map and set functions (no API change except renaming `mtbdd_map_addall` to `mtbdd_map_update` with backward compatibility)
- The lock-free unique table now uses double hashing instead of rehashing. This can improve the performance for custom leaves and improves the hash spread.
### Fixed
- A bug in `llmsset_lookup` affecting custom leaves has been fixed.
## [1.2.0] - 2017-02-03
### Added

13
resources/3rdparty/sylvan/CMakeLists.txt

@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.1)
project(sylvan LANGUAGES C CXX VERSION 1.2.0)
project(sylvan LANGUAGES C CXX VERSION 1.4.0)
set(PROJECT_DESCRIPTION "Sylvan, a parallel decision diagram library")
set(PROJECT_URL "https://github.com/trolando/sylvan")
@ -25,8 +25,8 @@ set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/")
option(SYLVAN_PORTABLE "If set, the created library will be portable." OFF)
option(USE_CARL "Sets whether carl should be included." ON)
set(CMAKE_C_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -fPIC ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated -fPIC ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated ${CMAKE_CXX_FLAGS}")
if (NOT SYLVAN_PORTABLE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
@ -73,18 +73,17 @@ include_directories("${PROJECT_BINARY_DIR}/../../../include")
include_directories(src)
add_subdirectory(src)
option(SYLVAN_BUILD_TESTS "Build example tools" ON)
option(SYLVAN_BUILD_TESTS "Build example tests" ON)
if(SYLVAN_BUILD_TESTS)
add_subdirectory(test)
endif()
option(SYLVAN_BUILD_EXAMPLES "Build example tools" ON)
option(SYLVAN_BUILD_EXAMPLES "Build example tools" OFF)
if(SYLVAN_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
option(SYLVAN_BUILD_DOCS "Build documentation" ON)
option(SYLVAN_BUILD_DOCS "Build documentation" OFF)
if(SYLVAN_BUILD_DOCS)
configure_file("docs/conf.py.in" "docs/conf.py" @ONLY)
find_package(Sphinx REQUIRED)

7
resources/3rdparty/sylvan/cmake/FindGMP.cmake

@ -8,16 +8,13 @@
find_package(PkgConfig)
pkg_check_modules(PC_GMP QUIET gmp)
set(GMP_INCLUDE "" CACHE PATH "include dir")
set(GMP_LOCATION "" CACHE PATH "location dir")
set(GMP_DEFINITIONS ${PC_GMP_CFLAGS_OTHER})
find_path(GMP_INCLUDE_DIR gmp.h
HINTS ${GMP_INCLUDE} ${PC_GMP_INCLUDEDIR} ${PC_GMP_INCLUDE_DIRS})
HINTS ${PC_GMP_INCLUDEDIR} ${PC_GMP_INCLUDE_DIRS})
find_library(GMP_LIBRARIES NAMES gmp libgmp
HINTS ${GMP_LOCATION} ${PC_GMP_LIBDIR} ${PC_GMP_LIBRARY_DIRS} NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH)
HINTS ${PC_GMP_LIBDIR} ${PC_GMP_LIBRARY_DIRS})
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set GMP_FOUND to TRUE

268
resources/3rdparty/sylvan/docs/index.rst

@ -31,15 +31,19 @@ Bindings for other languages than C/C++ also exist:
Dependencies
------------
Sylvan has the following required dependencies:
Sylvan has the following dependencies:
- **CMake** for compiling.
- **gmp** (``libgmp-dev``) for the GMP leaves in MTBDDs.
- **hwloc** (``libhwloc-dev``) for pinning worker threads to processors.
- **Sphinx** if you want to build the documentation.
Sylvan depends on the `work-stealing framework
Lace <http://fmt.ewi.utwente.nl/tools/lace>`__ for its implementation.
Lace is embedded in the Sylvan distribution.
Lace requires one additional library:
- **hwloc** (``libhwloc-dev``) for pinning worker threads to processors.
Building
--------
@ -71,14 +75,12 @@ To use Sylvan, the library and its dependency Lace must be initialized:
lace_init(n_workers, 0);
lace_startup(0, NULL, NULL);
size_t nodes_minsize = 1LL<<22;
size_t nodes_maxsize = 1LL<<26;
size_t cache_minsize = 1LL<<23;
size_t cache_maxsize = 1LL<<27;
sylvan_init_package(nodes_minsize, nodes_maxsize, cache_minsize, cache_maxsize);
// use at most 512 MB, nodes:cache ratio 2:1, initial size 1/32 of maximum
sylvan_set_limits(512*1024*1024, 1, 5);
sylvan_init_package();
sylvan_init_mtbdd();
...
/* ... do stuff ... */
sylvan_stats_report(stdout);
sylvan_quit();
@ -90,19 +92,20 @@ for work-stealing. The parameter ``n_workers`` can be set to 0 for auto-detectio
function ``lace_startup`` then creates all other worker threads. The worker threads run
until ``lace_exit`` is called. Lace must be started before Sylvan can be initialized.
Sylvan is initialized with a call to ``sylvan_init_package``. Here we choose the initial
and maximum sizes of the nodes table and the operation cache. In the example, we choose a maximum
nodes table size of 2^26 and a maximum cache size of 2^27. The initial sizes are
set to 2^22 and 2^23, respectively. The sizes must be powers of 2.
Sylvan allocates memory for the maximum sizes *in virtual memory* but only uses the space
needed for the initial sizes. The sizes are doubled during garbage collection, until the maximum
size has been reached.
Sylvan is initialized with a call to ``sylvan_init_package``. Before this call, Sylvan needs to know
how much memory to allocate for the nodes table and the operation cache. In this example, we use the
``sylvan_set_limits`` function to tell Sylvan that it may allocate at most 512 MB for these tables.
The second parameter indicates the ratio of the nodes table and the operation cache, with each
higher number doubling the size of the nodes table. Negative numbers double the size of the operation
cache instead. In the example, we want the nodes table to be twice as big as the operation cache.
The third parameter controls how often garbage collection doubles the table sizes before
their maximum size is reached. The value 5 means that the initial tables are 32x as small as the maximum size.
By default, every execution of garbage collection doubles the table sizes.
After ``sylvan_init_package``, the subpackages ``mtbdd`` and ``ldd`` can be initialized with
``sylvan_init_mtbdd`` and ``sylvan_init_ldd``. This mainly allocates auxiliary datastructures for
garbage collection.
After ``sylvan_init_package``, subpackages like ``mtbdd`` and ``ldd`` can be initialized with
``sylvan_init_mtbdd`` and ``sylvan_init_ldd``. This allocates auxiliary datastructures.
If you enable statistics generation (via CMake) then you can use ``sylvan_stats_report`` to report
If you enabled statistics generation (via CMake), then you can use ``sylvan_stats_report`` to report
the obtained statistics to a given ``FILE*``.
The Lace framework
@ -110,7 +113,7 @@ The Lace framework
Sylvan uses the Lace framework to offer 'automatic' parallelization of decision diagram operations.
Many functions in Sylvan are Lace tasks. To call a Lace task, the variables
``__lace_worker`` and ``__lace_dq_head`` must be initialized **locally**.
``__lace_worker`` and ``__lace_dq_head`` must be initialized as **local** variables of the current function.
Use the macro ``LACE_ME`` to initialize the variables in every function that calls Sylvan functions
and is not itself a Lace task.
@ -121,98 +124,207 @@ Like all decision diagram implementations, Sylvan performs garbage collection.
Garbage collection is triggered when trying to insert a new node and no
empty space can be found in the table within a reasonable upper bound.
Garbage collection can be disabled with ``sylvan_gc_disable`` and enabled again with ``sylvan_gc_enable``.
Call ``sylvan_gc`` to manually trigger garbage collection.
To ensure that no decision diagram nodes are overwritten, you must ensure that
Sylvan knows which decision diagrams you care about.
The easiest way to do this is with ``sylvan_protect`` and ``sylvan_unprotect`` to protect
a given pointer.
These functions protect the decision diagram referenced to by that pointer at the time
that garbage collection is performed.
Unlike some other implementations of decision diagrams,
you can modify the variable between the calls to ``sylvan_protect`` and ``sylvan_unprotect``
without explicitly changing the reference.
To manually trigger garbage collection, call ``sylvan_gc``.
You can use ``sylvan_gc_disable`` and ``sylvan_gc_enable`` to disable garbage collection or
enable it again. If garbage collection is disabled, the program will abort when the nodes table
is full.
Each subpackage implements mechanisms to store references to decision diagrams that must be kept.
For example, the *mtbdd* subpackage implements ``mtbdd_protect`` and ``mtbdd_unprotect`` to store pointers to
MTBDD variables.
.. code:: c
MTBDD* allocate_var() {
MTBDD* my_var = (MTBDD*)calloc(sizeof(MTBDD), 1);
mtbdd_protect(my_var);
return my_var;
}
free_var(MTBDD* my_var) {
mtbdd_unprotect(my_var);
free(my_var);
}
If you use ``mtbdd_protect`` you do not need to update the reference every time the value changes.
The *mtbdd* subpackage also implements thread-local stacks to temporarily store pointers and results of tasks:
.. code:: c
MTBDD some_thing = ...;
mtbdd_refs_pushptr(&some_thing);
MTBDD result_param1 = mtbdd_false, result_param2 = mtbdd_false;
mtbdd_refs_pushptr(&result_param1);
mtbdd_refs_pushptr(&result_param2);
while (some_condition) {
mtbdd_refs_spawn(SPAWN(an_operation, some_thing, param1));
result_param2 = CALL(an_operation, some_thing, param2);
result_param1 = mtbdd_refs_sync(SYNC(an_operation));
some_thing = CALL(another_operation, result1, result2);
}
mtbdd_refs_popptr(3);
return some_thing;
It is recommended to use the thread-local stacks for local variables, and to use the ``protect`` and ``unprotect``
functions for other variables. Every SPAWN and SYNC of a Lace task that returns an MTBDD must be decorated with
``mtbdd_refs_stack`` and ``mtbdd_refs_sync`` as in the above example.
References to decision diagrams must be added before a worker may cooperate on garbage collection.
Workers can cooperate on garbage collection during ``SYNC`` and when functions create nodes or use ``sylvan_gc_test`` to test whether to assist in garbage collection.
Functions for adding or removing references never perform garbage collection.
Furthermore, only the ``mtbdd_makenode`` function (and other node making primitives) implicitly reference their parameters; all other functions do not reference their parameters.
Nesting Sylvan functions (including ``sylvan_ithvar``) is bad practice and should be avoided.
**Warning**: Sylvan is a multi-threaded library and all workers must cooperate for garbage collection. If you use locking mechanisms in your code, beware of deadlocks!
You can explicitly cooperate on garbage collection with ``sylvan_gc_test()``.
Basic BDD/MTBDD functionality
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Basic BDD functionality
~~~~~~~~~~~~~~~~~~~~~~~
In Sylvan, BDDs are special cases of MTBDDs.
Several functions are specific for BDDs and they start with ``sylvan_``, whereas generic MTBDD functions start
with ``mtbdd_``.
To create new BDDs, you can use:
- ``sylvan_true``: representation of constant ``true``.
- ``sylvan_false``: representation of constant ``false``.
- ``mtbdd_true``: representation of constant ``true``.
- ``mtbdd_false``: representation of constant ``false``.
- ``sylvan_ithvar(var)``: representation of literal <var> (negated: ``sylvan_nithvar(var)``)
To follow the BDD edges and obtain the variable at the root of a BDD,
you can use (only for internal nodes, not for leaves ``sylvan_true`` and ``sylvan_false``):
you can use (only for internal nodes, not for leaves ``mtbdd_true`` and ``mtbdd_false``):
- ``sylvan_var(bdd)``: obtain the variable of the root node of <bdd>.
- ``sylvan_high(bdd)``: follow the high edge of <bdd>.
- ``sylvan_low(bdd)``: follow the low edge of <bdd>.
- ``mtbdd_getvar(bdd)``: obtain the variable of the root node of <bdd>.
- ``mtbdd_gethigh(bdd)``: follow the high edge of <bdd>.
- ``mtbdd_getlow(bdd)``: follow the low edge of <bdd>.
You need to manually reference BDDs that you want to keep during garbage
collection:
collection (see the above explanation):
- ``sylvan_protect(bddptr)``: add a pointer reference to <bddptr>.
- ``sylvan_unprotect(bddptr)``: remove a pointer reference to <bddptr>.
- ``sylvan_ref(bdd)``: add a reference to <bdd>.
- ``sylvan_deref(bdd)``: remove a reference to <bdd>.
- ``mtbdd_protect(bddptr)``: add a pointer reference to <bddptr>.
- ``mtbdd_unprotect(bddptr)``: remove a pointer reference to <bddptr>.
- ``mtbdd_refs_pushptr(bddptr)``: add a local pointer reference to <bddptr>.
- ``mtbdd_refs_popptr(amount)``: remove the last <amount> local pointer references.
- ``mtbdd_refs_spawn(SPAWN(...))``: spawn a task that returns a BDD/MTBDD.
- ``mtbdd_refs_sync(SYNC(...))``: sync a task that returns a BDD/MTBDD.
It is recommended to use ``sylvan_protect`` and ``sylvan_unprotect``.
It is recommended to use ``mtbdd_protect`` and ``mtbdd_unprotect``.
The C++ objects (defined in ``sylvan_obj.hpp``) handle this automatically.
For local variables, we recommend ``mtbdd_refs_pushptr`` and ``mtbdd_refs_popptr``.
The following basic operations are implemented:
The following basic BDD operations are implemented:
- ``sylvan_not(bdd)``: compute the negation of <bdd>.
- ``sylvan_ite(a,b,c)``: compute 'if <a> then <b> else <c>'.
- ``sylvan_and(a, b)``: compute '<a> and <b>'
- ``sylvan_or(a, b)``: compute '<a> or <b>'
- ``sylvan_nand(a, b)``: compute 'not (<a> and <b>)'
- ``sylvan_nor(a, b)``: compute 'not (<a> or <b>)'
- ``sylvan_imp(a, b)``: compute '<a> then <b>'
- ``sylvan_invimp(a, b)``: compute '<b> then <a>'
- ``sylvan_xor(a, b)``: compute '<a> xor <b>'
- ``sylvan_equiv(a, b)``: compute '<a> = <b>'
- ``sylvan_diff(a, b)``: compute '<a> and not <b>'
- ``sylvan_less(a, b)``: compute '<b> and not <a>'
- ``sylvan_and(a, b)``: compute '<a> and <b>'.
- ``sylvan_or(a, b)``: compute '<a> or <b>'.
- ``sylvan_nand(a, b)``: compute 'not (<a> and <b>)'.
- ``sylvan_nor(a, b)``: compute 'not (<a> or <b>)'.
- ``sylvan_imp(a, b)``: compute '<a> then <b>'.
- ``sylvan_invimp(a, b)``: compute '<b> then <a>'.
- ``sylvan_xor(a, b)``: compute '<a> xor <b>'.
- ``sylvan_equiv(a, b)``: compute '<a> = <b>'.
- ``sylvan_diff(a, b)``: compute '<a> and not <b>'.
- ``sylvan_less(a, b)``: compute '<b> and not <a>'.
- ``sylvan_exists(bdd, vars)``: existential quantification of <bdd> with respect to variables <vars>.
- ``sylvan_forall(bdd, vars)``: universal quantification of <bdd> with respect to variables <vars>.
- ``sylvan_project(bdd, vars)``: the dual of ``sylvan_exists``, projects the <bdd> to the variable domain <vars>.
A set of variables (like <vars> above) is a BDD representing the conjunction of the variables.
Other BDD operations
~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_bdd.h`` for other operations on BDDs, especially operations
that are relevant for model checking.
Basic MTBDD functionality
~~~~~~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_mtbdd.h`` for operations on multi-terminal BDDs.
Basic LDD functionality
~~~~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_ldd.h`` for operations on List DDs.
A number of convencience functions are defined to manipulate sets of variables:
- ``mtbdd_set_empty()``: obtain an empty set.
- ``mtbdd_set_isempty(set)``: compute whether the set is empty.
- ``mtbdd_set_first(set)``: obtain the first variable of the set.
- ``mtbdd_set_next(set)``: obtain the subset without the first variable.
- ``mtbdd_set_from_array(arr, len)``: create a set from a given array.
- ``mtbdd_set_to_array(set, arr)``: write the set to the given array.
- ``mtbdd_set_add(set, var)``: compute the set plus the variable.
- ``mtbdd_set_union(set1, set2)``: compute the union of two sets.
- ``mtbdd_set_remove(set, var)``: compute the set minus the variable.
- ``mtbdd_set_minus(set1, set2)``: compute the set <set1> minus the variables in <set2>.
- ``mtbdd_set_count(set)``: compute the number of variables in the set.
- ``mtbdd_set_contains(set, var)``: compute whether the set contains the variable.
Sylvan also implements composition and substitution/variable renaming using a "MTBDD map". An MTBDD map is a special structure
implemented with special MTBDD nodes to store a mapping from variables (uint32_t) to MTBDDs. Like sets of variables and MTBDDs, MTBDD maps must
also be referenced for garbage collection. The following functions are related to MTBDD maps:
- ``mtbdd_compose(dd, map)``: apply the map to the given decision diagram, transforming every node with a variable that is associated with some function F in the map by ``if <F> then <high> else <low>``.
- ``sylvan_compose(dd, map)``: same as ``mtbdd_compose``, but assumes the decision diagram only has Boolean leaves.
- ``mtbdd_map_empty()``: obtain an empty map.
- ``mtbdd_map_isempty(map)``: compute whether the map is empty.
- ``mtbdd_map_key(map)``: obtain the key of the first pair of the map.
- ``mtbdd_map_value(map)``: obtain the value of the first pair of the map.
- ``mtbdd_map_next(map)``: obtain the submap without the first pair.
- ``mtbdd_map_add(map, key, value)``: compute the map plus the given key-value pair.
- ``mtbdd_map_update(map1, map2)``: compute the union of two maps, with priority to map2 if both maps share variables.
- ``mtbdd_map_remove(map, var)``: compute the map minus the variable.
- ``mtbdd_map_removeall(map, set)``: compute the map minus the given variables.
- ``mtbdd_map_count(set)``: compute the number of pairs in the map.
- ``mtbdd_map_contains(map, var)``: compute whether the map contains the variable.
Sylvan implements a number of counting operations:
- ``mtbdd_satcount(bdd, number_of_vars)``: compute the number of minterms (assignments that lead to True) for a function with <number_of_vars> variables; we don't need to know the exact variables that may be in the BDD, just how many there are.
- ``sylvan_pathcount(bdd)``: compute the number of distinct paths to True.
- ``mtbdd_nodecount(bdd)``: compute the number of nodes (and leaves) in the BDD.
- ``mtbdd_nodecount_more(array, length)``: compute the number of nodes (and leaves) in the array of BDDs.
Sylvan implements various advanced operations:
- ``sylvan_and_exists(bdd_a, bdd_b, vars)``: compute ``sylvan_exists(sylvan_and(bdd_a, bdd_b), vars)`` in one step.
- ``sylvan_and_project(bdd_a, bdd_b, vars)``: compute ``sylvan_project(sylvan_and(bdd_a, bdd_b), vars)`` in one step.
- ``sylvan_cube(vars, values)``: compute a cube (to leaf True) of the given variables, where the array values indicates for each variable whether to use it in negative form (value 0) or positive form (value 1) or to skip it (as dont-care, value 2).
- ``sylvan_union_cube(set, vars, values)``: compute ``sylvan_or(set, sylvan_cube(vars, values))`` in one step.
- ``sylvan_constrain(bdd_f, bdd_c)``: compute the generic cofactor of F constrained by C, i.e, set F to False for all assignments not in C.
- ``sylvan_restrict(bdd_f, bdd_c)``: compute Coudert and Madre's restrict algorithm, which tries to minimize bdd_f according to a care set C using sibling substitution; the invariant is ``restrict(f, c) \and c == f \and c``; the result of this algorithm is often but not always smaller than the original.
- ``sylvan_pick_cube(bdd)`` or ``sylvan_sat_one_bdd(bdd)``: extract a single path to True from the BDD (returns the BDD of this path)
- ``sylvan_pick_single_cube(bdd, vars)`` or ``sylvan_sat_single(bdd, vars)`` extracts a single minterm from the BDD (returns the BDD of this assignment)
- ``sylvan_sat_one(bdd, vars, array)``: extract a single minterm from the BDD given the set of variables and write the values of the variables in order to the given array, with 0 when it is negative, 1 when it is positive, and 2 when it is dontcare.
Sylvan implements several operations for transition systems. These operations assume an interleaved variable ordering, such that *source* or *unprimed* variables have even parity (0, 2, 4...) and matching *target* or *primed* variables have odd parity (1, 3, 5...).
The transition relations may be partial transition relations that only manipulate a subset of variables; hence, the operations also require the set of variables.
- ``sylvan_relnext(set, relation, vars)``: apply the (partial) relation on the given variables to the set.
- ``sylvan_relprev(relation, set, vars)``: apply the (partial) relation in reverse to the set; this computes predecessors but can also concatenate relations as follows: ``sylvan_relprev(rel1, rel2, rel1_vars)``.
- ``sylvan_closure(relation)``: compute the transitive closure of the given set recursively (see Matsunaga et al, DAC 1993)
See ``src/sylvan_bdd.h`` and ``src/mtbdd.h`` for other operations on BDDs and MTBDDs.
Custom leaves
~~~~~~~~~~~~~
See ``src/sylvan_mt.h`` and the example in ``src/sylvan_gmp.h`` and ``src/sylvan_gmp.c`` for custom leaves in MTBDDs.
Custom decision diagram operations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adding custom decision diagram operations is easy. Include ``sylvan_int.h`` for the internal functions. See ``sylvan_cache.h``
for how to use the operation cache.
List decision diagrams
~~~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_ldd.h`` for operations on list decision diagrams.
File I/O
~~~~~~~~
You can store and load BDDs using a number of methods, which are documented in the header files ``sylvan_mtbdd.h`` and ``sylvan_ldd.h``.
Support for C++
~~~~~~~~~~~~~~~
See ``src/sylvan_obj.hpp`` for the C++ interface.
.. Adding custom decision diagram operations
.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Table resizing
~~~~~~~~~~~~~~
During garbage collection, it is possible to resize the nodes table and
the cache. Sylvan provides two default implementations: an aggressive
version that resizes every time garbage collection is performed, and a
the cache. By default, Sylvan doubles the table sizes during every garbage
collection until the maximum table size has been reached. There is also a
less aggressive version that only resizes when at least half the table is
full. This can be configured in ``src/sylvan_config.h``. It is not
possible to decrease the size of the nodes table and the cache.

328
resources/3rdparty/sylvan/examples/ldd2bdd.c

@ -13,15 +13,15 @@ static int workers = 0; // autodetect
static int verbose = 0;
static char* model_filename = NULL; // filename of model
static char* bdd_filename = NULL; // filename of output BDD
static char* sizes = "22,27,21,26"; // default sizes
static int check_results = 0;
static int no_reachable = 0;
/* argp configuration */
static struct argp_option options[] =
{
{"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
{"table-sizes", 1, "<tablesize>,<tablemax>,<cachesize>,<cachemax>", 0, "Sizes of nodes table and operation cache as powers of 2", 0},
{"check-results", 2, 0, 0, "Check new transition relations ", 0},
{"check-results", 2, 0, 0, "Check new transition relations", 0},
{"no-reachable", 1, 0, 0, "Do not write reachabile states", 0},
{"verbose", 'v', 0, 0, "Set verbose", 0},
{0, 0, 0, 0, 0, 0}
};
@ -37,7 +37,7 @@ parse_opt(int key, char *arg, struct argp_state *state)
verbose = 1;
break;
case 1:
sizes = arg;
no_reachable = 1;
break;
case 2:
check_results = 1;
@ -58,67 +58,112 @@ parse_opt(int key, char *arg, struct argp_state *state)
static struct argp argp = { options, parse_opt, "<model> [<output-bdd>]", 0, 0, 0, 0 };
/* Globals */
/**
* Types (set and relation)
*/
typedef struct set
{
MDD mdd;
MDD proj;
MDD dd;
} *set_t;
typedef struct relation
{
MDD mdd;
MDD meta;
MDD dd;
MDD meta; // for relprod
int r_k, w_k, *r_proj, *w_proj;
} *rel_t;
static size_t vector_size; // size of vector
static int vector_size; // size of vector
static int next_count; // number of partitions of the transition relation
static rel_t *next; // each partition of the transition relation
static int actionbits = 0;
static int has_actions = 0;
#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
#define Abort(...) { fprintf(stderr, __VA_ARGS__); fprintf(stderr, "Abort at line %d!\n", __LINE__); exit(-1); }
/* Load a set from file */
#define set_load(f) CALL(set_load, f)
TASK_1(set_t, set_load, FILE*, f)
{
lddmc_serialize_fromfile(f);
size_t mdd;
size_t proj;
int size;
set_t set = (set_t)malloc(sizeof(struct set));
if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
int k;
if (fread(&k, sizeof(int), 1, f) != 1) Abort("Invalid input file!");
if (k != -1) Abort("Invalid input file!");
set_t set = (set_t)malloc(sizeof(struct set));
set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj));
lddmc_serialize_fromfile(f);
size_t dd;
if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!");
set->dd = lddmc_serialize_get_reversed(dd);
lddmc_protect(&set->dd);
return set;
}
/* Load a relation from file */
#define rel_load(f) CALL(rel_load, f)
TASK_1(rel_t, rel_load, FILE*, f)
#define rel_load_proj(f) CALL(rel_load_proj, f)
TASK_1(rel_t, rel_load_proj, FILE*, f)
{
lddmc_serialize_fromfile(f);
int r_k, w_k;
if (fread(&r_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
if (fread(&w_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
size_t mdd;
size_t meta;
rel_t rel = (rel_t)malloc(sizeof(struct relation));
rel->r_k = r_k;
rel->w_k = w_k;
rel->r_proj = (int*)malloc(sizeof(int[rel->r_k]));
rel->w_proj = (int*)malloc(sizeof(int[rel->w_k]));
if (fread(rel->r_proj, sizeof(int), rel->r_k, f) != (size_t)rel->r_k) Abort("Invalid file format.");
if (fread(rel->w_proj, sizeof(int), rel->w_k, f) != (size_t)rel->w_k) Abort("Invalid file format.");
int *r_proj = rel->r_proj;
int *w_proj = rel->w_proj;
/* Compute the meta */
uint32_t meta[vector_size*2+2];
memset(meta, 0, sizeof(uint32_t[vector_size*2+2]));
int r_i=0, w_i=0, i=0, j=0;
for (;;) {
int type = 0;
if (r_i < r_k && r_proj[r_i] == i) {
r_i++;
type += 1; // read
}
if (w_i < w_k && w_proj[w_i] == i) {
w_i++;
type += 2; // write
}
if (type == 0) meta[j++] = 0;
else if (type == 1) { meta[j++] = 3; }
else if (type == 2) { meta[j++] = 4; }
else if (type == 3) { meta[j++] = 1; meta[j++] = 2; }
if (r_i == r_k && w_i == w_k) {
meta[j++] = 5; // action label
meta[j++] = (uint32_t)-1;
break;
}
i++;
}
if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
rel->meta = lddmc_cube((uint32_t*)meta, j);
rel->dd = lddmc_false;
rel_t rel = (rel_t)malloc(sizeof(struct relation));
rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta));
lddmc_protect(&rel->meta);
lddmc_protect(&rel->dd);
return rel;
}
#define rel_load(f, rel) CALL(rel_load, f, rel)
VOID_TASK_2(rel_load, FILE*, f, rel_t, rel)
{
lddmc_serialize_fromfile(f);
size_t dd;
if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!");
rel->dd = lddmc_serialize_get_reversed(dd);
}
/**
* Compute the highest value for each variable level.
* This method is called for the set of reachable states.
@ -199,7 +244,7 @@ VOID_TASK_3(compute_highest_action, MDD, dd, MDD, meta, uint32_t*, target)
*/
static uint64_t bdd_from_ldd_id;
#define bdd_from_ldd(dd, bits, firstvar) CALL(bdd_from_ldd, dd, bits, firstvar)
TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_dd, uint32_t, firstvar)
{
/* simple for leaves */
if (dd == lddmc_false) return mtbdd_false;
@ -208,16 +253,16 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
MTBDD result;
/* get from cache */
/* note: some assumptions about the encoding... */
if (cache_get3(bdd_from_ldd_id, dd, bits_mdd, firstvar, &result)) return result;
if (cache_get3(bdd_from_ldd_id, dd, bits_dd, firstvar, &result)) return result;
mddnode_t n = LDD_GETNODE(dd);
mddnode_t nbits = LDD_GETNODE(bits_mdd);
mddnode_t nbits = LDD_GETNODE(bits_dd);
int bits = (int)mddnode_getvalue(nbits);
/* spawn right, same bits_mdd and firstvar */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd, mddnode_getright(n), bits_mdd, firstvar));
/* spawn right, same bits_dd and firstvar */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd, mddnode_getright(n), bits_dd, firstvar));
/* call down, with next bits_mdd and firstvar */
/* call down, with next bits_dd and firstvar */
MTBDD down = CALL(bdd_from_ldd, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits);
/* encode current value */
@ -239,7 +284,7 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
mtbdd_refs_pop(2);
/* put in cache */
cache_put3(bdd_from_ldd_id, dd, bits_mdd, firstvar, result);
cache_put3(bdd_from_ldd_id, dd, bits_dd, firstvar, result);
return result;
}
@ -249,7 +294,7 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
*/
static uint64_t bdd_from_ldd_rel_id;
#define bdd_from_ldd_rel(dd, bits, firstvar, meta) CALL(bdd_from_ldd_rel, dd, bits, firstvar, meta)
TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD, meta)
TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_dd, uint32_t, firstvar, MDD, meta)
{
if (dd == lddmc_false) return mtbdd_false;
if (dd == lddmc_true) return mtbdd_true;
@ -266,11 +311,11 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
MTBDD result;
/* note: assumptions */
if (cache_get4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, &result)) return result;
if (cache_get4(bdd_from_ldd_rel_id, dd, bits_dd, firstvar, meta, &result)) return result;
const mddnode_t n = LDD_GETNODE(dd);
const mddnode_t nmeta = LDD_GETNODE(meta);
const mddnode_t nbits = LDD_GETNODE(bits_mdd);
const mddnode_t nbits = LDD_GETNODE(bits_dd);
const int bits = (int)mddnode_getvalue(nbits);
const uint32_t vmeta = mddnode_getvalue(nmeta);
@ -285,10 +330,10 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
assert(mddnode_getright(n) != mtbdd_true);
/* spawn right */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
/* compute down with same bits / firstvar */
MTBDD down = bdd_from_ldd_rel(mddnode_getdown(n), bits_mdd, firstvar, mddnode_getdown(nmeta));
MTBDD down = bdd_from_ldd_rel(mddnode_getdown(n), bits_dd, firstvar, mddnode_getdown(nmeta));
mtbdd_refs_push(down);
/* encode read value */
@ -319,7 +364,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
/* spawn right */
assert(mddnode_getright(n) != mtbdd_true);
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
/* get recursive result */
MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
@ -358,7 +403,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
assert(!mddnode_getcopy(n)); // do not process read copy nodes
/* spawn right */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
/* get recursive result */
MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
@ -402,7 +447,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
assert(vmeta <= 5);
}
cache_put4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, result);
cache_put4(bdd_from_ldd_rel_id, dd, bits_dd, firstvar, meta, result);
return result;
}
@ -411,7 +456,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
* Compute the BDD equivalent of the meta variable (to a variables cube)
*/
MTBDD
meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
meta_to_bdd(MDD meta, MDD bits_dd, uint32_t firstvar)
{
if (meta == lddmc_false || meta == lddmc_true) return mtbdd_true;
@ -430,10 +475,10 @@ meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
if (vmeta == 1) {
/* return recursive result, don't go down on bits */
return meta_to_bdd(mddnode_getdown(nmeta), bits_mdd, firstvar);
return meta_to_bdd(mddnode_getdown(nmeta), bits_dd, firstvar);
}
const mddnode_t nbits = LDD_GETNODE(bits_mdd);
const mddnode_t nbits = LDD_GETNODE(bits_dd);
const int bits = (int)mddnode_getvalue(nbits);
/* compute recursive result */
@ -450,16 +495,6 @@ meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
return res;
}
static char*
to_h(double size, char *buf)
{
const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
int i = 0;
for (;size>1024;size/=1024) i++;
sprintf(buf, "%.*f %s", i, size, units[i]);
return buf;
}
VOID_TASK_0(gc_start)
{
printf("Starting garbage collection\n");
@ -475,37 +510,13 @@ main(int argc, char **argv)
{
argp_parse(&argp, argc, argv, 0, 0, 0);
// Parse table sizes
int tablesize, maxtablesize, cachesize, maxcachesize;
if (sscanf(sizes, "%d,%d,%d,%d", &tablesize, &maxtablesize, &cachesize, &maxcachesize) != 4) {
Abort("Invalid string for --table-sizes, try e.g. --table-sizes=23,28,22,27");
}
if (tablesize < 10 || maxtablesize < 10 || cachesize < 10 || maxcachesize < 10 ||
tablesize > 40 || maxtablesize > 40 || cachesize > 40 || maxcachesize > 40) {
Abort("Invalid string for --table-sizes, must be between 10 and 40");
}
if (tablesize > maxtablesize) {
Abort("Invalid string for --table-sizes, tablesize is larger than maxtablesize");
}
if (cachesize > maxcachesize) {
Abort("Invalid string for --table-sizes, cachesize is larger than maxcachesize");
}
// Report table sizes
char buf[32];
to_h((1ULL<<maxtablesize)*24+(1ULL<<maxcachesize)*36, buf);
printf("Sylvan allocates %s virtual memory for nodes table and operation cache.\n", buf);
to_h((1ULL<<tablesize)*24+(1ULL<<cachesize)*36, buf);
printf("Initial nodes table and operation cache requires %s.\n", buf);
// Init Lace
lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
LACE_ME;
// Init Sylvan
sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_set_limits(1LL<<30, 1, 10);
sylvan_init_package();
sylvan_init_ldd();
sylvan_init_mtbdd();
@ -523,34 +534,20 @@ main(int argc, char **argv)
if (f == NULL) Abort("Cannot open file '%s'!\n", model_filename);
// Read integers per vector
if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&vector_size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
// Read initial state
if (verbose) {
printf("Loading initial state... ");
fflush(stdout);
}
if (verbose) printf("Loading initial state.\n");
set_t initial = set_load(f);
if (verbose) printf("done.\n");
// Read number of transitions
if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
next = (rel_t*)malloc(sizeof(rel_t) * next_count);
// Read transitions
if (verbose) {
printf("Loading transition relations... ");
fflush(stdout);
}
int i;
for (i=0; i<next_count; i++) {
next[i] = rel_load(f);
if (verbose) {
printf("%d, ", i);
fflush(stdout);
}
}
if (verbose) printf("done.\n");
if (verbose) printf("Loading transition relations.\n");
for (int i=0; i<next_count; i++) next[i] = rel_load_proj(f);
for (int i=0; i<next_count; i++) rel_load(f, next[i]);
// Read whether reachable states are stored
int has_reachable = 0;
@ -558,16 +555,13 @@ main(int argc, char **argv)
if (has_reachable == 0) Abort("Input file missing reachable states!\n");
// Read reachable states
if (verbose) {
printf("Loading reachable states... ");
fflush(stdout);
}
if (verbose) printf("Loading reachable states.\n");
set_t states = set_load(f);
if (verbose) printf("done.\n");
// Read number of action labels
int action_labels_count = 0;
if (fread(&action_labels_count, sizeof(int), 1, f) != 1) Abort("Input file missing action label count!\n");
if (fread(&action_labels_count, sizeof(int), 1, f) != 1) action_labels_count = 0;
// ignore: Abort("Input file missing action label count!\n");
// Read action labels
char *action_labels[action_labels_count];
@ -587,11 +581,11 @@ main(int argc, char **argv)
// Report statistics
if (verbose) {
printf("%zu integers per state, %d transition groups\n", vector_size, next_count);
printf("%d integers per state, %d transition groups\n", vector_size, next_count);
printf("LDD nodes:\n");
printf("Initial states: %zu LDD nodes\n", lddmc_nodecount(initial->mdd));
for (i=0; i<next_count; i++) {
printf("Transition %d: %zu LDD nodes\n", i, lddmc_nodecount(next[i]->mdd));
printf("Initial states: %zu LDD nodes\n", lddmc_nodecount(initial->dd));
for (int i=0; i<next_count; i++) {
printf("Transition %d: %zu LDD nodes\n", i, lddmc_nodecount(next[i]->dd));
}
}
@ -600,28 +594,18 @@ main(int argc, char **argv)
// Compute highest value at each level (from reachable states)
uint32_t highest[vector_size];
for (size_t i=0; i<vector_size; i++) highest[i] = 0;
compute_highest(states->mdd, highest);
for (int i=0; i<vector_size; i++) highest[i] = 0;
compute_highest(states->dd, highest);
// Compute highest action label value (from transition relations)
uint32_t highest_action = 0;
for (int i=0; i<next_count; i++) {
compute_highest_action(next[i]->mdd, next[i]->meta, &highest_action);
}
// Report highest integers
/*
printf("Highest integer per level: ");
for (size_t i=0; i<vector_size; i++) {
if (i>0) printf(", ");
printf("%u", highest[i]);
compute_highest_action(next[i]->dd, next[i]->meta, &highest_action);
}
printf("\n");
*/
// Compute number of bits for each level
int bits[vector_size];
for (size_t i=0; i<vector_size; i++) {
for (int i=0; i<vector_size; i++) {
bits[i] = 0;
while (highest[i] != 0) {
bits[i]++;
@ -641,7 +625,7 @@ main(int argc, char **argv)
// Report number of bits
if (verbose) {
printf("Bits per level: ");
for (size_t i=0; i<vector_size; i++) {
for (int i=0; i<vector_size; i++) {
if (i>0) printf(", ");
printf("%d", bits[i]);
}
@ -650,15 +634,15 @@ main(int argc, char **argv)
}
// Compute bits MDD
MDD bits_mdd = lddmc_true;
for (size_t i=0; i<vector_size; i++) {
bits_mdd = lddmc_makenode(bits[vector_size-i-1], bits_mdd, lddmc_false);
MDD bits_dd = lddmc_true;
for (int i=0; i<vector_size; i++) {
bits_dd = lddmc_makenode(bits[vector_size-i-1], bits_dd, lddmc_false);
}
lddmc_ref(bits_mdd);
lddmc_ref(bits_dd);
// Compute total number of bits
int totalbits = 0;
for (size_t i=0; i<vector_size; i++) {
for (int i=0; i<vector_size; i++) {
totalbits += bits[i];
}
@ -677,28 +661,23 @@ main(int argc, char **argv)
if (f == NULL) Abort("Cannot open file '%s'!\n", bdd_filename);
// Write domain...
int vector_size = 1;
fwrite(&totalbits, sizeof(int), 1, f); // use number of bits as vector size
fwrite(&vector_size, sizeof(int), 1, f); // set each to 1
fwrite(&vector_size, sizeof(int), 1, f);
fwrite(bits, sizeof(int), vector_size, f);
fwrite(&actionbits, sizeof(int), 1, f);
// Write initial state...
MTBDD new_initial = bdd_from_ldd(initial->mdd, bits_mdd, 0);
assert((size_t)mtbdd_satcount(new_initial, totalbits) == (size_t)lddmc_satcount_cached(initial->mdd));
MTBDD new_initial = bdd_from_ldd(initial->dd, bits_dd, 0);
assert((size_t)mtbdd_satcount(new_initial, totalbits) == (size_t)lddmc_satcount_cached(initial->dd));
mtbdd_refs_push(new_initial);
{
size_t a = sylvan_serialize_add(new_initial);
size_t b = sylvan_serialize_add(state_vars);
size_t s = totalbits;
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&s, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
int k = -1;
fwrite(&k, sizeof(int), 1, f);
mtbdd_writer_tobinary(f, &new_initial, 1);
}
// Custom operation that converts to BDD given number of bits for each level
MTBDD new_states = bdd_from_ldd(states->mdd, bits_mdd, 0);
assert((size_t)mtbdd_satcount(new_states, totalbits) == (size_t)lddmc_satcount_cached(states->mdd));
MTBDD new_states = bdd_from_ldd(states->dd, bits_dd, 0);
assert((size_t)mtbdd_satcount(new_states, totalbits) == (size_t)lddmc_satcount_cached(states->dd));
mtbdd_refs_push(new_states);
// Report size of BDD
@ -710,51 +689,52 @@ main(int argc, char **argv)
// Write number of transitions
fwrite(&next_count, sizeof(int), 1, f);
// Write transitions
// Write meta for each transition
for (int i=0; i<next_count; i++) {
fwrite(&next[i]->r_k, sizeof(int), 1, f);
fwrite(&next[i]->w_k, sizeof(int), 1, f);
fwrite(next[i]->r_proj, sizeof(int), next[i]->r_k, f);
fwrite(next[i]->w_proj, sizeof(int), next[i]->w_k, f);
}
// Write BDD for each transition
for (int i=0; i<next_count; i++) {
// Compute new transition relation
MTBDD new_rel = bdd_from_ldd_rel(next[i]->mdd, bits_mdd, 0, next[i]->meta);
MTBDD new_rel = bdd_from_ldd_rel(next[i]->dd, bits_dd, 0, next[i]->meta);
mtbdd_refs_push(new_rel);
mtbdd_writer_tobinary(f, &new_rel, 1);
// Compute new <variables> for the current transition relation
MTBDD new_vars = meta_to_bdd(next[i]->meta, bits_mdd, 0);
mtbdd_refs_push(new_vars);
// Report number of nodes
if (verbose) printf("Transition %d: %zu BDD nodes\n", i, mtbdd_nodecount(new_rel));
if (check_results) {
// Compute new <variables> for the current transition relation
MTBDD new_vars = meta_to_bdd(next[i]->meta, bits_dd, 0);
mtbdd_refs_push(new_vars);
// Test if the transition is correctly converted
MTBDD test = sylvan_relnext(new_states, new_rel, new_vars);
mtbdd_refs_push(test);
MDD succ = lddmc_relprod(states->mdd, next[i]->mdd, next[i]->meta);
MDD succ = lddmc_relprod(states->dd, next[i]->dd, next[i]->meta);
lddmc_refs_push(succ);
MTBDD test2 = bdd_from_ldd(succ, bits_mdd, 0);
MTBDD test2 = bdd_from_ldd(succ, bits_dd, 0);
if (test != test2) Abort("Conversion error!\n");
mtbdd_refs_pop(1);
lddmc_refs_pop(1);
mtbdd_refs_pop(2);
}
// Report number of nodes
if (verbose) printf("Transition %d: %zu BDD nodes\n", i, mtbdd_nodecount(new_rel));
size_t a = sylvan_serialize_add(new_rel);
size_t b = sylvan_serialize_add(new_vars);
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
mtbdd_refs_pop(1);
}
// Write reachable states
has_reachable = 1;
if (no_reachable) has_reachable = 0;
fwrite(&has_reachable, sizeof(int), 1, f);
{
size_t a = sylvan_serialize_add(new_states);
size_t b = sylvan_serialize_add(state_vars);
size_t s = totalbits;
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&s, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
if (has_reachable) {
int k = -1;
fwrite(&k, sizeof(int), 1, f);
mtbdd_writer_tobinary(f, &new_states, 1);
}
mtbdd_refs_pop(1); // new_states
// Write action labels
fwrite(&action_labels_count, sizeof(int), 1, f);

834
resources/3rdparty/sylvan/examples/lddmc.c
File diff suppressed because it is too large
View File

558
resources/3rdparty/sylvan/examples/mc.c

@ -10,15 +10,17 @@
#include <gperftools/profiler.h>
#endif
#include <getrss.h>
#include <sylvan.h>
#include <sylvan_table.h>
#include <sylvan_int.h>
/* Configuration */
/* Configuration (via argp) */
static int report_levels = 0; // report states at end of every level
static int report_table = 0; // report table size at end of every level
static int report_nodes = 0; // report number of nodes of BDDs
static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy
static int check_deadlocks = 0; // set to 1 to check for deadlocks
static int strategy = 2; // 0 = BFS, 1 = PAR, 2 = SAT, 3 = CHAINING
static int check_deadlocks = 0; // set to 1 to check for deadlocks on-the-fly (only bfs/par)
static int merge_relations = 0; // merge relations to 1 relation
static int print_transition_matrix = 0; // print transition relation matrix
static int workers = 0; // autodetect
@ -31,7 +33,7 @@ static char* profile_filename = NULL; // filename for profiling
static struct argp_option options[] =
{
{"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
{"strategy", 's', "<bfs|par|sat>", 0, "Strategy for reachability (default=par)", 0},
{"strategy", 's', "<bfs|par|sat|chaining>", 0, "Strategy for reachability (default=sat)", 0},
#ifdef HAVE_PROFILER
{"profiler", 'p', "<filename>", 0, "Filename for profiling", 0},
#endif
@ -54,6 +56,7 @@ parse_opt(int key, char *arg, struct argp_state *state)
if (strcmp(arg, "bfs")==0) strategy = 0;
else if (strcmp(arg, "par")==0) strategy = 1;
else if (strcmp(arg, "sat")==0) strategy = 2;
else if (strcmp(arg, "chaining")==0) strategy = 3;
else argp_usage(state);
break;
case 4:
@ -93,7 +96,9 @@ parse_opt(int key, char *arg, struct argp_state *state)
}
static struct argp argp = { options, parse_opt, "<model>", 0, 0, 0, 0 };
/* Globals */
/**
* Types (set and relation)
*/
typedef struct set
{
BDD bdd;
@ -104,15 +109,19 @@ typedef struct relation
{
BDD bdd;
BDD variables; // all variables in the relation (used by relprod)
int r_k, w_k, *r_proj, *w_proj;
} *rel_t;
static int vector_size; // size of vector
static int statebits, actionbits; // number of bits for state, number of bits for action
static int bits_per_integer; // number of bits per integer in the vector
static int vectorsize; // size of vector in integers
static int *statebits; // number of bits for each state integer
static int actionbits; // number of bits for action label
static int totalbits; // total number of bits
static int next_count; // number of partitions of the transition relation
static rel_t *next; // each partition of the transition relation
/* Obtain current wallclock time */
/**
* Obtain current wallclock time
*/
static double
wctime()
{
@ -123,66 +132,171 @@ wctime()
static double t_start;
#define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__)
#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
#define Abort(...) { fprintf(stderr, __VA_ARGS__); fprintf(stderr, "Abort at line %d!\n", __LINE__); exit(-1); }
/* Load a set from file */
#define set_load(f) CALL(set_load, f)
TASK_1(set_t, set_load, FILE*, f)
static char*
to_h(double size, char *buf)
{
sylvan_serialize_fromfile(f);
const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
int i = 0;
for (;size>1024;size/=1024) i++;
sprintf(buf, "%.*f %s", i, size, units[i]);
return buf;
}
size_t set_bdd, set_vector_size, set_state_vars;
if ((fread(&set_bdd, sizeof(size_t), 1, f) != 1) ||
(fread(&set_vector_size, sizeof(size_t), 1, f) != 1) ||
(fread(&set_state_vars, sizeof(size_t), 1, f) != 1)) {
Abort("Invalid input file!\n");
}
static void
print_memory_usage(void)
{
char buf[32];
to_h(getCurrentRSS(), buf);
INFO("Memory usage: %s\n", buf);
}
/**
* Load a set from file
* The expected binary format:
* - int k : projection size, or -1 for full state
* - int[k] proj : k integers specifying the variables of the projection
* - MTBDD[1] BDD (mtbdd binary format)
*/
#define set_load(f) CALL(set_load, f)
TASK_1(set_t, set_load, FILE*, f)
{
// allocate set
set_t set = (set_t)malloc(sizeof(struct set));
set->bdd = sylvan_serialize_get_reversed(set_bdd);
set->variables = sylvan_support(sylvan_serialize_get_reversed(set_state_vars));
set->bdd = sylvan_false;
set->variables = sylvan_true;
sylvan_protect(&set->bdd);
sylvan_protect(&set->variables);
// read k
int k;
if (fread(&k, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
if (k == -1) {
// create variables for a full state vector
uint32_t vars[totalbits];
for (int i=0; i<totalbits; i++) vars[i] = 2*i;
set->variables = sylvan_set_fromarray(vars, totalbits);
} else {
// read proj
int proj[k];
if (fread(proj, sizeof(int), k, f) != (size_t)k) Abort("Invalid input file!\n");
// create variables for a short/projected state vector
uint32_t vars[totalbits];
uint32_t cv = 0;
int j = 0, n = 0;
for (int i=0; i<vectorsize && j<k; i++) {
if (i == proj[j]) {
for (int x=0; x<statebits[i]; x++) vars[n++] = (cv += 2) - 2;
j++;
} else {
cv += 2 * statebits[i];
}
}
set->variables = sylvan_set_fromarray(vars, n);
}
// read bdd
if (mtbdd_reader_frombinary(f, &set->bdd, 1) != 0) Abort("Invalid input file!\n");
return set;
}
/* Load a relation from file */
#define rel_load(f) CALL(rel_load, f)
TASK_1(rel_t, rel_load, FILE*, f)
/**
* Load a relation from file
* This part just reads the r_k, w_k, r_proj and w_proj variables.
*/
#define rel_load_proj(f) CALL(rel_load_proj, f)
TASK_1(rel_t, rel_load_proj, FILE*, f)
{
sylvan_serialize_fromfile(f);
size_t rel_bdd, rel_vars;
if ((fread(&rel_bdd, sizeof(size_t), 1, f) != 1) ||
(fread(&rel_vars, sizeof(size_t), 1, f) != 1)) {
Abort("Invalid input file!\n");
}
rel_t rel = (rel_t)malloc(sizeof(struct relation));
rel->bdd = sylvan_serialize_get_reversed(rel_bdd);
rel->variables = sylvan_support(sylvan_serialize_get_reversed(rel_vars));
int r_k, w_k;
if (fread(&r_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
if (fread(&w_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
rel->r_k = r_k;
rel->w_k = w_k;
int *r_proj = (int*)malloc(sizeof(int[r_k]));
int *w_proj = (int*)malloc(sizeof(int[w_k]));
if (fread(r_proj, sizeof(int), r_k, f) != (size_t)r_k) Abort("Invalid file format.");
if (fread(w_proj, sizeof(int), w_k, f) != (size_t)w_k) Abort("Invalid file format.");
rel->r_proj = r_proj;
rel->w_proj = w_proj;
rel->bdd = sylvan_false;
sylvan_protect(&rel->bdd);
/* Compute a_proj the union of r_proj and w_proj, and a_k the length of a_proj */
int a_proj[r_k+w_k];
int r_i = 0, w_i = 0, a_i = 0;
for (;r_i < r_k || w_i < w_k;) {
if (r_i < r_k && w_i < w_k) {
if (r_proj[r_i] < w_proj[w_i]) {
a_proj[a_i++] = r_proj[r_i++];
} else if (r_proj[r_i] > w_proj[w_i]) {
a_proj[a_i++] = w_proj[w_i++];
} else /* r_proj[r_i] == w_proj[w_i] */ {
a_proj[a_i++] = w_proj[w_i++];
r_i++;
}
} else if (r_i < r_k) {
a_proj[a_i++] = r_proj[r_i++];
} else if (w_i < w_k) {
a_proj[a_i++] = w_proj[w_i++];
}
}
const int a_k = a_i;
/* Compute all_variables, which are all variables the transition relation is defined on */
uint32_t all_vars[totalbits * 2];
uint32_t curvar = 0; // start with variable 0
int i=0, j=0, n=0;
for (; i<vectorsize && j<a_k; i++) {
if (i == a_proj[j]) {
for (int k=0; k<statebits[i]; k++) {
all_vars[n++] = curvar;
all_vars[n++] = curvar + 1;
curvar += 2;
}
j++;
} else {
curvar += 2 * statebits[i];
}
}
rel->variables = sylvan_set_fromarray(all_vars, n);
sylvan_protect(&rel->variables);
return rel;
}
/**
* Load a relation from file
* This part just reads the bdd of the relation
*/
#define rel_load(rel, f) CALL(rel_load, rel, f)
VOID_TASK_2(rel_load, rel_t, rel, FILE*, f)
{
if (mtbdd_reader_frombinary(f, &rel->bdd, 1) != 0) Abort("Invalid file format!\n");
}
/**
* Print a single example of a set to stdout
* Assumption: the example is a full vector and variables contains all state variables...
*/
#define print_example(example, variables) CALL(print_example, example, variables)
VOID_TASK_2(print_example, BDD, example, BDDSET, variables)
{
uint8_t str[vector_size * bits_per_integer];
uint8_t str[totalbits];
if (example != sylvan_false) {
sylvan_sat_one(example, variables, str);
int x=0;
printf("[");
for (int i=0; i<vector_size; i++) {
for (int i=0; i<vectorsize; i++) {
uint32_t res = 0;
for (int j=0; j<bits_per_integer; j++) {
if (str[bits_per_integer*i+j] == 1) res++;
res<<=1;
for (int j=0; j<statebits[i]; j++) {
if (str[x++] == 1) res++;
res <<= 1;
}
if (i>0) printf(",");
printf("%" PRIu32, res);
@ -191,7 +305,84 @@ VOID_TASK_2(print_example, BDD, example, BDDSET, variables)
}
}
/* Straight-forward implementation of parallel reduction */
/**
* Implementation of (parallel) saturation
* (assumes relations are ordered on first variable)
*/
TASK_2(BDD, go_sat, BDD, set, int, idx)
{
/* Terminal cases */
if (set == sylvan_false) return sylvan_false;
if (idx == next_count) return set;
/* Consult the cache */
BDD result;
const BDD _set = set;
if (cache_get3(200LL<<40, _set, idx, 0, &result)) return result;
mtbdd_refs_pushptr(&_set);
/**
* Possible improvement: cache more things (like intermediate results?)
* and chain-apply more of the current level before going deeper?
*/
/* Check if the relation should be applied */
const uint32_t var = sylvan_var(next[idx]->variables);
if (set == sylvan_true || var <= sylvan_var(set)) {
/* Count the number of relations starting here */
int count = idx+1;
while (count < next_count && var == sylvan_var(next[count]->variables)) count++;
count -= idx;
/*
* Compute until fixpoint:
* - SAT deeper
* - chain-apply all current level once
*/
BDD prev = sylvan_false;
BDD step = sylvan_false;
mtbdd_refs_pushptr(&set);
mtbdd_refs_pushptr(&prev);
mtbdd_refs_pushptr(&step);
while (prev != set) {
prev = set;
// SAT deeper
set = CALL(go_sat, set, idx+count);
// chain-apply all current level once
for (int i=0;i<count;i++) {
step = sylvan_relnext(set, next[idx+i]->bdd, next[idx+i]->variables);
set = sylvan_or(set, step);
step = sylvan_false; // unset, for gc
}
}
mtbdd_refs_popptr(3);
result = set;
} else {
/* Recursive computation */
mtbdd_refs_spawn(SPAWN(go_sat, sylvan_low(set), idx));
BDD high = mtbdd_refs_push(CALL(go_sat, sylvan_high(set), idx));
BDD low = mtbdd_refs_sync(SYNC(go_sat));
mtbdd_refs_pop(1);
result = sylvan_makenode(sylvan_var(set), low, high);
}
/* Store in cache */
cache_put3(200LL<<40, _set, idx, 0, result);
mtbdd_refs_popptr(1);
return result;
}
/**
* Wrapper for the Saturation strategy
*/
VOID_TASK_1(sat, set_t, set)
{
set->bdd = CALL(go_sat, set->bdd, 0);
}
/**
* Implement parallel strategy (that performs the relnext operations in parallel)
* This function does one level...
*/
TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks)
{
if (len == 1) {
@ -239,7 +430,9 @@ TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, dea
}
}
/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */
/**
* Implementation of the PAR strategy
*/
VOID_TASK_1(par, set_t, set)
{
BDD visited = set->bdd;
@ -301,7 +494,10 @@ VOID_TASK_1(par, set_t, set)
sylvan_unprotect(&deadlocks);
}
/* Sequential version of merge-reduction */
/**
* Implement sequential strategy (that performs the relnext operations one by one)
* This function does one level...
*/
TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks)
{
if (len == 1) {
@ -350,7 +546,9 @@ TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, dea
}
}
/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */
/**
* Implementation of the BFS strategy
*/
VOID_TASK_1(bfs, set_t, set)
{
BDD visited = set->bdd;
@ -412,26 +610,77 @@ VOID_TASK_1(bfs, set_t, set)
sylvan_unprotect(&deadlocks);
}
/**
* Implementation of the Chaining strategy (does not support deadlock detection)
*/
VOID_TASK_1(chaining, set_t, set)
{
BDD visited = set->bdd;
BDD next_level = visited;
BDD succ = sylvan_false;
bdd_refs_pushptr(&visited);
bdd_refs_pushptr(&next_level);
bdd_refs_pushptr(&succ);
int iteration = 1;
do {
// calculate successors in parallel
for (int i=0; i<next_count; i++) {
succ = sylvan_relnext(next_level, next[i]->bdd, next[i]->variables);
next_level = sylvan_or(next_level, succ);
succ = sylvan_false; // reset, for gc
}
// new = new - visited
// visited = visited + new
next_level = sylvan_diff(next_level, visited);
visited = sylvan_or(visited, next_level);
if (report_table && report_levels) {
size_t filled, total;
sylvan_table_usage(&filled, &total);
INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n",
iteration, sylvan_satcount(visited, set->variables),
100.0*(double)filled/total, filled);
} else if (report_table) {
size_t filled, total;
sylvan_table_usage(&filled, &total);
INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n",
iteration,
100.0*(double)filled/total, filled);
} else if (report_levels) {
INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables));
} else {
INFO("Level %d done\n", iteration);
}
iteration++;
} while (next_level != sylvan_false);
set->bdd = visited;
bdd_refs_popptr(3);
}
/**
* Extend a transition relation to a larger domain (using s=s')
*/
#define extend_relation(rel, vars) CALL(extend_relation, rel, vars)
TASK_2(BDD, extend_relation, BDD, relation, BDDSET, variables)
TASK_2(BDD, extend_relation, MTBDD, relation, MTBDD, variables)
{
/* first determine which state BDD variables are in rel */
int has[statebits];
for (int i=0; i<statebits; i++) has[i] = 0;
BDDSET s = variables;
int has[totalbits];
for (int i=0; i<totalbits; i++) has[i] = 0;
MTBDD s = variables;
while (!sylvan_set_isempty(s)) {
BDDVAR v = sylvan_set_first(s);
if (v/2 >= (unsigned)statebits) break; // action labels
uint32_t v = sylvan_set_first(s);
if (v/2 >= (unsigned)totalbits) break; // action labels
has[v/2] = 1;
s = sylvan_set_next(s);
}
/* create "s=s'" for all variables not in rel */
BDD eq = sylvan_true;
for (int i=statebits-1; i>=0; i--) {
for (int i=totalbits-1; i>=0; i--) {
if (has[i]) continue;
BDD low = sylvan_makenode(2*i+1, eq, sylvan_false);
bdd_refs_push(low);
@ -463,148 +712,209 @@ TASK_2(BDD, big_union, int, first, int, count)
return result;
}
/**
* Print one row of the transition matrix (for vars)
*/
static void
print_matrix(BDD vars)
print_matrix_row(rel_t rel)
{
for (int i=0; i<vector_size; i++) {
if (sylvan_set_isempty(vars)) {
fprintf(stdout, "-");
} else {
BDDVAR next_s = 2*((i+1)*bits_per_integer);
if (sylvan_set_first(vars) < next_s) {
fprintf(stdout, "+");
for (;;) {
vars = sylvan_set_next(vars);
if (sylvan_set_isempty(vars)) break;
if (sylvan_set_first(vars) >= next_s) break;
}
} else {
fprintf(stdout, "-");
}
int r_i = 0, w_i = 0;
for (int i=0; i<vectorsize; i++) {
int s = 0;
if (r_i < rel->r_k && rel->r_proj[r_i] == i) {
s |= 1;
r_i++;
}
if (w_i < rel->w_k && rel->w_proj[w_i] == i) {
s |= 2;
w_i++;
}
if (s == 0) fprintf(stdout, "-");
else if (s == 1) fprintf(stdout, "r");
else if (s == 2) fprintf(stdout, "w");
else if (s == 3) fprintf(stdout, "+");
}
}
VOID_TASK_0(gc_start)
{
INFO("(GC) Starting garbage collection...\n");
char buf[32];
to_h(getCurrentRSS(), buf);
INFO("(GC) Starting garbage collection... (rss: %s)\n", buf);
}
VOID_TASK_0(gc_end)
{
INFO("(GC) Garbage collection done.\n");
char buf[32];
to_h(getCurrentRSS(), buf);
INFO("(GC) Garbage collection done. (rss: %s)\n", buf);
}
int
main(int argc, char **argv)
{
/**
* Parse command line, set locale, set startup time for INFO messages.
*/
argp_parse(&argp, argc, argv, 0, 0, 0);
setlocale(LC_NUMERIC, "en_US.utf-8");
t_start = wctime();
FILE *f = fopen(model_filename, "r");
if (f == NULL) {
fprintf(stderr, "Cannot open file '%s'!\n", model_filename);
return -1;
}
// Init Lace
lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
/**
* Initialize Lace.
*
* First: setup with given number of workers (0 for autodetect) and some large size task queue.
* Second: start all worker threads with default settings.
* Third: setup local variables using the LACE_ME macro.
*/
lace_init(workers, 1000000);
lace_startup(0, NULL, NULL);
LACE_ME;
// Init Sylvan
// Nodes table size: 24 bytes * 2**N_nodes
// Cache table size: 36 bytes * 2**N_cache
// With: N_nodes=25, N_cache=24: 1.3 GB memory
sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
/**
* Initialize Sylvan.
*
* First: set memory limits
* - 2 GB memory, nodes table twice as big as cache, initial size halved 6x
* (that means it takes 6 garbage collections to get to the maximum nodes&cache size)
* Second: initialize package and subpackages
* Third: add hooks to report garbage collection
*/
sylvan_set_limits(2LL<<30, 1, 6);
sylvan_init_package();
sylvan_set_granularity(6); // granularity 6 is decent default value - 1 means "use cache for every operation"
sylvan_init_bdd();
sylvan_gc_hook_pregc(TASK(gc_start));
sylvan_gc_hook_postgc(TASK(gc_end));
/* Load domain information */
if ((fread(&vector_size, sizeof(int), 1, f) != 1) ||
(fread(&statebits, sizeof(int), 1, f) != 1) ||
(fread(&actionbits, sizeof(int), 1, f) != 1)) {
Abort("Invalid input file!\n");
}
/**
* Read the model from file
*/
bits_per_integer = statebits;
statebits *= vector_size;
/* Open the file */
FILE *f = fopen(model_filename, "r");
if (f == NULL) Abort("Cannot open file '%s'!\n", model_filename);
// Read initial state
/* Read domain data */
if (fread(&vectorsize, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
statebits = (int*)malloc(sizeof(int[vectorsize]));
if (fread(statebits, sizeof(int), vectorsize, f) != (size_t)vectorsize) Abort("Invalid input file!\n");
if (fread(&actionbits, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
totalbits = 0;
for (int i=0; i<vectorsize; i++) totalbits += statebits[i];
/* Read initial state */
set_t states = set_load(f);
// Read transitions
/* Read number of transition relations */
if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
next = (rel_t*)malloc(sizeof(rel_t) * next_count);
int i;
for (i=0; i<next_count; i++) {
next[i] = rel_load(f);
}
/* Read transition relations */
for (int i=0; i<next_count; i++) next[i] = rel_load_proj(f);
for (int i=0; i<next_count; i++) rel_load(next[i], f);
/* Done */
/* We ignore the reachable states and action labels that are stored after the relations */
/* Close the file */
fclose(f);
if (print_transition_matrix) {
for (i=0; i<next_count; i++) {
INFO("");
print_matrix(next[i]->variables);
fprintf(stdout, "\n");
/**
* Pre-processing and some statistics reporting
*/
if (strategy == 2 || strategy == 3) {
// for SAT and CHAINING, sort the transition relations (gnome sort because I like gnomes)
int i = 1, j = 2;
rel_t t;
while (i < next_count) {
rel_t *p = &next[i], *q = p-1;
if (sylvan_var((*q)->variables) > sylvan_var((*p)->variables)) {
t = *q;
*q = *p;
*p = t;
if (--i) continue;
}
i = j++;
}
}
// Report statistics
INFO("Read file '%s'\n", model_filename);
INFO("%d integers per state, %d bits per integer, %d transition groups\n", vector_size, bits_per_integer, next_count);
INFO("%d integers per state, %d bits per state, %d transition groups\n", vectorsize, totalbits, next_count);
if (merge_relations) {
BDD prime_variables = sylvan_set_empty();
for (int i=statebits-1; i>=0; i--) {
bdd_refs_push(prime_variables);
prime_variables = sylvan_set_add(prime_variables, i*2+1);
bdd_refs_pop(1);
/* if requested, print the transition matrix */
if (print_transition_matrix) {
for (int i=0; i<next_count; i++) {
INFO(""); // print time prefix
print_matrix_row(next[i]); // print row
fprintf(stdout, "\n"); // print newline
}
}
bdd_refs_push(prime_variables);
/* merge all relations to one big transition relation if requested */
if (merge_relations) {
BDD newvars = sylvan_set_empty();
bdd_refs_pushptr(&newvars);
for (int i=totalbits-1; i>=0; i--) {
newvars = sylvan_set_add(newvars, i*2+1);
newvars = sylvan_set_add(newvars, i*2);
}
INFO("Extending transition relations to full domain.\n");
for (int i=0; i<next_count; i++) {
next[i]->bdd = extend_relation(next[i]->bdd, next[i]->variables);
next[i]->variables = prime_variables;
next[i]->variables = newvars;
}
bdd_refs_popptr(1);
INFO("Taking union of all transition relations.\n");
next[0]->bdd = big_union(0, next_count);
for (int i=1; i<next_count; i++) {
next[i]->bdd = sylvan_false;
next[i]->variables = sylvan_true;
}
next_count = 1;
}
if (report_nodes) {
INFO("BDD nodes:\n");
INFO("Initial states: %zu BDD nodes\n", sylvan_nodecount(states->bdd));
for (i=0; i<next_count; i++) {
for (int i=0; i<next_count; i++) {
INFO("Transition %d: %zu BDD nodes\n", i, sylvan_nodecount(next[i]->bdd));
}
}
print_memory_usage();
#ifdef HAVE_PROFILER
if (profile_filename != NULL) ProfilerStart(profile_filename);
#endif
if (strategy == 1) {
if (strategy == 0) {
double t1 = wctime();
CALL(bfs, states);
double t2 = wctime();
INFO("BFS Time: %f\n", t2-t1);
} else if (strategy == 1) {
double t1 = wctime();
CALL(par, states);
double t2 = wctime();
INFO("PAR Time: %f\n", t2-t1);
} else {
} else if (strategy == 2) {
double t1 = wctime();
CALL(bfs, states);
CALL(sat, states);
double t2 = wctime();
INFO("BFS Time: %f\n", t2-t1);
INFO("SAT Time: %f\n", t2-t1);
} else if (strategy == 3) {
double t1 = wctime();
CALL(chaining, states);
double t2 = wctime();
INFO("CHAINING Time: %f\n", t2-t1);
} else {
Abort("Invalid strategy set?!\n");
}
#ifdef HAVE_PROFILER
if (profile_filename != NULL) ProfilerStop();
#endif
@ -615,6 +925,8 @@ main(int argc, char **argv)
INFO("Final states: %'zu BDD nodes\n", sylvan_nodecount(states->bdd));
}
print_memory_usage();
sylvan_stats_report(stdout);
return 0;

BIN
resources/3rdparty/sylvan/models/anderson.4.bdd

BIN
resources/3rdparty/sylvan/models/anderson.4.ldd

BIN
resources/3rdparty/sylvan/models/anderson.6.ldd

BIN
resources/3rdparty/sylvan/models/anderson.8.ldd

BIN
resources/3rdparty/sylvan/models/at.5.8-rgs.bdd

BIN
resources/3rdparty/sylvan/models/at.6.8-rgs.bdd

BIN
resources/3rdparty/sylvan/models/at.7.8-rgs.bdd

BIN
resources/3rdparty/sylvan/models/bakery.4.bdd

BIN
resources/3rdparty/sylvan/models/bakery.4.ldd

BIN
resources/3rdparty/sylvan/models/bakery.5.ldd

BIN
resources/3rdparty/sylvan/models/bakery.6.ldd

BIN
resources/3rdparty/sylvan/models/bakery.7.ldd

BIN
resources/3rdparty/sylvan/models/blocks.2.ldd

BIN
resources/3rdparty/sylvan/models/blocks.3.ldd

BIN
resources/3rdparty/sylvan/models/blocks.4.ldd

BIN
resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd

BIN
resources/3rdparty/sylvan/models/collision.4.bdd

BIN
resources/3rdparty/sylvan/models/collision.4.ldd

BIN
resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd

BIN
resources/3rdparty/sylvan/models/collision.5.bdd

BIN
resources/3rdparty/sylvan/models/collision.5.ldd

BIN
resources/3rdparty/sylvan/models/collision.6.bdd

BIN
resources/3rdparty/sylvan/models/collision.6.ldd

BIN
resources/3rdparty/sylvan/models/lifts.6.bdd

BIN
resources/3rdparty/sylvan/models/lifts.6.ldd

BIN
resources/3rdparty/sylvan/models/lifts.7.bdd

BIN
resources/3rdparty/sylvan/models/lifts.7.ldd

BIN
resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd

BIN
resources/3rdparty/sylvan/models/schedule_world.2.bdd

BIN
resources/3rdparty/sylvan/models/schedule_world.2.ldd

BIN
resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd

BIN
resources/3rdparty/sylvan/models/schedule_world.3.bdd

BIN
resources/3rdparty/sylvan/models/schedule_world.3.ldd

8
resources/3rdparty/sylvan/src/CMakeLists.txt

@ -37,6 +37,8 @@ set(HEADERS
sylvan_table.h
sylvan_tls.h
storm_wrapper.h
sylvan_bdd_storm.h
sylvan_mtbdd_storm.h
sylvan_storm_rational_function.h
sylvan_storm_rational_number.h
)
@ -47,10 +49,8 @@ option(BUILD_STATIC_LIBS "Enable/disable creation of static libraries" ON)
add_library(sylvan ${SOURCES})
find_package(GMP REQUIRED)
find_package(Hwloc REQUIRED)
include_directories(sylvan ${HWLOC_INCLUDE_DIR} ${GMP_INCLUDE_DIR})
target_link_libraries(sylvan m pthread ${GMP_LIBRARIES} ${HWLOC_LIBRARIES})
target_link_libraries(sylvan m pthread ${GMP_LIBRARIES})
if(UNIX AND NOT APPLE)
target_link_libraries(sylvan rt)
@ -60,12 +60,10 @@ option(SYLVAN_STATS "Collect statistics" OFF)
if(SYLVAN_STATS)
set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "SYLVAN_STATS")
endif()
set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "STORM_SILENCE_WARNINGS")
install(TARGETS sylvan DESTINATION "${CMAKE_INSTALL_LIBDIR}")
install(FILES ${HEADERS} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
# MODIFICATIONS NEEDED MADE FOR STORM
# We need to make sure that the binary is put into a folder that is independent of the

2
resources/3rdparty/sylvan/src/avl.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

406
resources/3rdparty/sylvan/src/lace.c

@ -15,6 +15,7 @@
* limitations under the License.
*/
#define _GNU_SOURCE
#include <errno.h> // for errno
#include <sched.h> // for sched_getaffinity
#include <stdio.h> // for fprintf
@ -26,28 +27,46 @@
#include <unistd.h>
#include <assert.h>
// work around for missing MAP_ANONYMOUS definition in sys/mman.h on
// older OS X versions
#if !(defined MAP_ANONYMOUS) && defined MAP_ANON
#define MAP_ANONYMOUS MAP_ANON
#endif
#include <lace.h>
#include <hwloc.h>
// public Worker data
static Worker **workers = NULL;
static size_t default_stacksize = 0; // set by lace_init
static size_t default_dqsize = 100000;
#if LACE_USE_HWLOC
#include <hwloc.h>
/**
* HWLOC information
*/
static hwloc_topology_t topo;
static unsigned int n_nodes, n_cores, n_pus;
#endif
/**
* (public) Worker data
*/
static Worker **workers = NULL;
/**
* Default sizes for program stack and task deque
*/
static size_t default_stacksize = 0; // 0 means "set by lace_init"
static size_t default_dqsize = 100000;
/**
* Verbosity flag, set with lace_set_verbosity
*/
static int verbosity = 0;
static int n_workers = 0;
static int enabled_workers = 0;
/**
* Number of workers and number of enabled/active workers
*/
static unsigned int n_workers = 0;
static unsigned int enabled_workers = 0;
/**
* Datastructure of the task deque etc for each worker.
* - first public cachelines (accessible via global "workers" variable)
* - then private cachelines
* - then the deque array
*/
typedef struct {
Worker worker_public;
char pad1[PAD(sizeof(Worker), LINE_SIZE)];
@ -56,26 +75,51 @@ typedef struct {
Task deque[];
} worker_data;
/**
* (Secret) holds pointers to the memory block allocated for each worker
*/
static worker_data **workers_memory = NULL;
/**
* Number of bytes allocated for each worker's worker data.
*/
static size_t workers_memory_size = 0;
// private Worker data (just for stats at end )
/**
* (Secret) holds pointer to private Worker data, just for stats collection at end
*/
static WorkerP **workers_p;
// set to 0 when quitting
/**
* Flag to signal all workers to quit.
*/
static int lace_quits = 0;
// for storing private Worker data
/**
* Thread-specific mechanism to access current worker data
*/
#ifdef __linux__ // use gcc thread-local storage (i.e. __thread variables)
static __thread WorkerP *current_worker;
#else
static pthread_key_t worker_key;
#endif
/**
* worker_attr used for creating threads
* - initialized by lace_init
* - used by lace_spawn_worker
*/
static pthread_attr_t worker_attr;
/**
* The condition/mutex pair for when the root thread sleeps until the end of the program
*/
static pthread_cond_t wait_until_done = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t wait_until_done_mutex = PTHREAD_MUTEX_INITIALIZER;
/**
* Data structure that contains the stack and stack size for each worker.
*/
struct lace_worker_init
{
void* stack;
@ -84,8 +128,14 @@ struct lace_worker_init
static struct lace_worker_init *workers_init;
/**
* Global newframe variable used for the implementation of NEWFRAME and TOGETHER
*/
lace_newframe_t lace_newframe;
/**
* Get the private Worker data of the current thread
*/
WorkerP*
lace_get_worker()
{
@ -96,14 +146,20 @@ lace_get_worker()
#endif
}
/**
* Find the head of the task deque, using the given private Worker data
*/
Task*
lace_get_head(WorkerP *self)
{
Task *dq = self->dq;
/* First check the first tasks linearly */
if (dq[0].thief == 0) return dq;
if (dq[1].thief == 0) return dq+1;
if (dq[2].thief == 0) return dq+2;
/* Then fast search for a low/high bound using powers of 2: 4, 8, 16... */
size_t low = 2;
size_t high = self->end - self->dq;
@ -118,6 +174,7 @@ lace_get_head(WorkerP *self)
}
}
/* Finally zoom in using binary search */
while (low < high) {
size_t mid = low + (high-low)/2;
if (dq[mid].thief == 0) high = mid;
@ -127,22 +184,27 @@ lace_get_head(WorkerP *self)
return dq+low;
}
size_t
/**
* Get the number of workers
*/
unsigned int
lace_workers()
{
return n_workers;
}
/**
* Get the default stack size (or 0 for automatically determine)
*/
size_t
lace_default_stacksize()
{
return default_stacksize;
}
#ifndef cas
#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new)))
#endif
/**
* If we are collecting PIE times, then we need some helper functions.
*/
#if LACE_PIE_TIMES
static uint64_t count_at_start, count_at_end;
static long long unsigned us_elapsed_timer;
@ -169,7 +231,9 @@ us_elapsed(void)
}
#endif
/* Barrier */
/**
* Lace barrier implementation, that synchronizes on all currently enabled workers.
*/
typedef struct {
volatile int __attribute__((aligned(LINE_SIZE))) count;
volatile int __attribute__((aligned(LINE_SIZE))) leaving;
@ -178,11 +242,14 @@ typedef struct {
barrier_t lace_bar;
/**
* Enter the Lace barrier and wait until all workers have entered the Lace barrier.
*/
void
lace_barrier()
{
int wait = lace_bar.wait;
if (enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) {
if ((int)enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) {
lace_bar.count = 0;
lace_bar.leaving = enabled_workers;
lace_bar.wait = 1 - wait; // flip wait
@ -193,12 +260,18 @@ lace_barrier()
__sync_add_and_fetch(&lace_bar.leaving, -1);
}
/**
* Initialize the Lace barrier
*/
static void
lace_barrier_init()
{
memset(&lace_bar, 0, sizeof(barrier_t));
}
/**
* Destroy the Lace barrier (just wait until all are exited)
*/
static void
lace_barrier_destroy()
{
@ -206,9 +279,13 @@ lace_barrier_destroy()
while (lace_bar.leaving != 0) continue;
}
static void
/**
* For debugging purposes, check if memory is allocated on the correct memory nodes.
*/
static void __attribute__((unused))
lace_check_memory(void)
{
#if LACE_USE_HWLOC
// get our current worker
WorkerP *w = lace_get_worker();
void* mem = workers_memory[w->worker];
@ -229,14 +306,10 @@ lace_check_memory(void)
hwloc_membind_policy_t policy;
int res = hwloc_get_area_membind_nodeset(topo, mem, sizeof(worker_data), memlocation, &policy, HWLOC_MEMBIND_STRICT);
if (res == -1) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: hwloc_get_area_membind_nodeset returned -1!\n");
#endif
}
if (policy != HWLOC_MEMBIND_BIND) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: Lace worker memory not bound with BIND policy!\n");
#endif
}
#endif
@ -258,22 +331,27 @@ lace_check_memory(void)
hwloc_bitmap_free(cpuset);
hwloc_bitmap_free(cpunodes);
hwloc_bitmap_free(memlocation);
#endif
}
WorkerP *
lace_init_worker(int worker)
void
lace_pin_worker(void)
{
// Get our core
#if LACE_USE_HWLOC
// Get our worker
unsigned int worker = lace_get_worker()->worker;
// Get our core (hwloc object)
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, worker % n_cores);
// Get our copy of the bitmap
hwloc_cpuset_t bmp = hwloc_bitmap_dup(pu->cpuset);
// Get number of PUs in set
// Get number of PUs in bitmap
int n = -1, count=0;
while ((n=hwloc_bitmap_next(bmp, n)) != -1) count++;
// Check if we actually have logical processors
// Check if we actually have any logical processors
if (count == 0) {
fprintf(stderr, "Lace error: trying to pin a worker on an empty core?\n");
exit(-1);
@ -293,18 +371,46 @@ lace_init_worker(int worker)
// Pin our thread...
if (hwloc_set_cpubind(topo, bmp, HWLOC_CPUBIND_THREAD) == -1) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: hwloc_set_cpubind returned -1!\n");
#endif
}
// Free allocated memory
// Free our copy of the bitmap
hwloc_bitmap_free(bmp);
// Get allocated memory
Worker *wt = &workers_memory[worker]->worker_public;
WorkerP *w = &workers_memory[worker]->worker_private;
// Pin the memory area (using the appropriate hwloc function)
#ifdef HWLOC_MEMBIND_BYNODESET
int res = hwloc_set_area_membind(topo, workers_memory[worker], workers_memory_size, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE | HWLOC_MEMBIND_BYNODESET);
#else
int res = hwloc_set_area_membind_nodeset(topo, workers_memory[worker], workers_memory_size, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE);
#endif
if (res != 0) {
fprintf(stderr, "Lace error: Unable to bind worker memory to node!\n");
}
// Check if everything is on the correct node
lace_check_memory();
#endif
}
void
lace_init_worker(unsigned int worker)
{
// Allocate our memory
workers_memory[worker] = mmap(NULL, workers_memory_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (workers_memory[worker] == MAP_FAILED) {
fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
exit(1);
}
// Set pointers
Worker *wt = workers[worker] = &workers_memory[worker]->worker_public;
WorkerP *w = workers_p[worker] = &workers_memory[worker]->worker_private;
w->dq = workers_memory[worker]->deque;
#ifdef __linux__
current_worker = w;
#else
pthread_setspecific(worker_key, w);
#endif
// Initialize public worker data
wt->dq = w->dq;
@ -318,7 +424,11 @@ lace_init_worker(int worker)
w->split = w->dq;
w->allstolen = 0;
w->worker = worker;
#if LACE_USE_HWLOC
w->pu = worker % n_cores;
#else
w->pu = -1;
#endif
w->enabled = 1;
if (workers_init[worker].stack != 0) {
w->stack_trigger = ((size_t)workers_init[worker].stack) + workers_init[worker].stacksize/20;
@ -328,20 +438,10 @@ lace_init_worker(int worker)
w->rng = (((uint64_t)rand())<<32 | rand());
#if LACE_COUNT_EVENTS
// Reset counters
// Initialize counters
{ int k; for (k=0; k<CTR_MAX; k++) w->ctr[k] = 0; }
#endif
// Set pointers
#ifdef __linux__
current_worker = w;
#else
pthread_setspecific(worker_key, w);
#endif
// Check if everything is on the correct node
lace_check_memory();
// Synchronize with others
lace_barrier();
@ -350,9 +450,14 @@ lace_init_worker(int worker)
w->level = 0;
#endif
return w;
if (worker == 0) {
lace_time_event(w, 1);
}
}
/**
* Some OSX systems do not implement pthread_barrier_t, so we provide an implementation here.
*/
#if defined(__APPLE__) && !defined(pthread_barrier_t)
typedef int pthread_barrierattr_t;
@ -442,13 +547,13 @@ lace_resume()
}
/**
* With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
* You can never disable the current worker or reduce the number of workers below 1.
* Disable worker <worker>.
* If the given worker is the current worker, this function does nothing.
*/
void
lace_disable_worker(int worker)
lace_disable_worker(unsigned int worker)
{
int self = lace_get_worker()->worker;
unsigned int self = lace_get_worker()->worker;
if (worker == self) return;
if (workers_p[worker]->enabled == 1) {
workers_p[worker]->enabled = 0;
@ -456,10 +561,14 @@ lace_disable_worker(int worker)
}
}
/**
* Enable worker <worker>.
* If the given worker is the current worker, this function does nothing.
*/
void
lace_enable_worker(int worker)
lace_enable_worker(unsigned int worker)
{
int self = lace_get_worker()->worker;
unsigned int self = lace_get_worker()->worker;
if (worker == self) return;
if (workers_p[worker]->enabled == 0) {
workers_p[worker]->enabled = 1;
@ -467,26 +576,38 @@ lace_enable_worker(int worker)
}
}
/**
* Enables all workers 0..(N-1) and disables workers N..max.
* This function _should_ be called by worker 0.
* Ignores the current worker if >= N.
* The number of workers is never reduces below 1.
*/
void
lace_set_workers(int workercount)
lace_set_workers(unsigned int workercount)
{
if (workercount < 1) workercount = 1;
if (workercount > n_workers) workercount = n_workers;
enabled_workers = workercount;
int self = lace_get_worker()->worker;
unsigned int self = lace_get_worker()->worker;
if (self >= workercount) workercount--;
int i;
for (i=0; i<n_workers; i++) {
for (unsigned int i=0; i<n_workers; i++) {
workers_p[i]->enabled = (i < workercount || i == self) ? 1 : 0;
}
}
int
/**
* Get the number of currently enabled workers.
*/
unsigned int
lace_enabled_workers()
{
return enabled_workers;
}
/**
* Simple random number generated (like rand) using the given seed.
* (Used for thread-specific (scalable) random number generation.
*/
static inline uint32_t
rng(uint32_t *seed, int max)
{
@ -500,6 +621,9 @@ rng(uint32_t *seed, int max)
return next % max;
}
/**
* (Try to) steal and execute a task from a random worker.
*/
VOID_TASK_0(lace_steal_random)
{
Worker *victim = workers[(__lace_worker->worker + 1 + rng(&__lace_worker->seed, n_workers-1)) % n_workers];
@ -515,26 +639,19 @@ VOID_TASK_0(lace_steal_random)
}
}
VOID_TASK_1(lace_steal_random_loop, int*, quit)
{
while(!(*(volatile int*)quit)) {
lace_steal_random();
if (must_suspend) {
lace_barrier();
do {
pthread_barrier_wait(&suspend_barrier);
} while (__lace_worker->enabled == 0);
}
}
}
/**
* Variable to hold the main/root task.
*/
static lace_startup_cb main_cb;
/**
* Wrapper around the main/root task.
*/
static void*
lace_main_wrapper(void *arg)
{
lace_init_main();
lace_init_worker(0);
lace_pin_worker();
LACE_ME;
WRAP(main_cb, arg);
lace_exit();
@ -547,7 +664,10 @@ lace_main_wrapper(void *arg)
return NULL;
}
#define lace_steal_loop(quit) CALL(lace_steal_loop, quit)
/**
* Main Lace worker implementation.
* Steal from random victims until "quit" is set.
*/
VOID_TASK_1(lace_steal_loop, int*, quit)
{
// Determine who I am
@ -599,12 +719,12 @@ VOID_TASK_1(lace_steal_loop, int*, quit)
/**
* Initialize worker 0.
* Calls lace_init_worker and then signals the event.
*/
void
lace_init_main()
{
WorkerP * __attribute__((unused)) __lace_worker = lace_init_worker(0);
lace_time_event(__lace_worker, 1);
lace_init_worker(0);
}
/**
@ -614,16 +734,13 @@ lace_init_main()
* For worker 0, use lace_init_main
*/
void
lace_run_worker(int worker)
lace_run_worker(void)
{
// Initialize local datastructure
WorkerP *__lace_worker = lace_init_worker(worker);
Task *__lace_dq_head = __lace_worker->dq;
// Steal for a while
lace_steal_loop(&lace_quits);
// Run the steal loop
LACE_ME;
CALL(lace_steal_loop, &lace_quits);
// Time the quit event
// Time worker exit event
lace_time_event(__lace_worker, 9);
// Synchronize with lace_exit
@ -633,7 +750,10 @@ lace_run_worker(int worker)
static void*
lace_default_worker_thread(void* arg)
{
lace_run_worker((int)(size_t)arg);
int worker = (int)(size_t)arg;
lace_init_worker(worker);
lace_pin_worker();
lace_run_worker();
return NULL;
}
@ -646,6 +766,7 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
size_t pagesize = sysconf(_SC_PAGESIZE);
stacksize = (stacksize + pagesize - 1) & ~(pagesize - 1); // ceil(stacksize, pagesize)
#if LACE_USE_HWLOC
// Get our logical processor
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus);
@ -655,6 +776,9 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
fprintf(stderr, "Lace error: Unable to allocate memory for the pthread stack!\n");
exit(1);
}
#else
void *stack_location = mmap(NULL, stacksize+ pagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
#endif
if (0 != mprotect(stack_location, pagesize, PROT_NONE)) {
fprintf(stderr, "Lace error: Unable to protect the allocated program stack with a guard page!\n");
@ -679,22 +803,23 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
return res;
}
static int
get_cpu_count()
{
int count = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
return count < 1 ? 1 : count;
}
/**
* Set the verbosity of Lace.
*/
void
lace_set_verbosity(int level)
{
verbosity = level;
}
/**
* Initialize Lace for work-stealing with <n> workers, where
* each worker gets a task deque with <dqsize> elements.
*/
void
lace_init(int _n_workers, size_t dqsize)
lace_init(unsigned int _n_workers, size_t dqsize)
{
#if LACE_USE_HWLOC
// Initialize topology and information about cpus
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
@ -702,15 +827,23 @@ lace_init(int _n_workers, size_t dqsize)
n_nodes = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE);
n_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
n_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
#elif defined(sched_getaffinity)
cpu_set_t cs;
CPU_ZERO(&cs);
sched_getaffinity(0, sizeof(cs), &cs);
unsigned int n_pus = CPU_COUNT(&cs);
#else
unsigned int n_pus = sysconf(_SC_NPROCESSORS_ONLN);
#endif
// Initialize globals
n_workers = _n_workers;
if (n_workers == 0) n_workers = get_cpu_count();
n_workers = _n_workers == 0 ? n_pus : _n_workers;
enabled_workers = n_workers;
if (dqsize != 0) default_dqsize = dqsize;
else dqsize = default_dqsize;
lace_quits = 0;
// Create barrier for all workers
// Initialize Lace barrier
lace_barrier_init();
// Create suspend barrier
@ -724,37 +857,9 @@ lace_init(int _n_workers, size_t dqsize)
exit(1);
}
// Allocate memory for each worker
// Compute memory size for each worker
workers_memory_size = sizeof(worker_data) + sizeof(Task) * dqsize;
for (int i=0; i<n_workers; i++) {
workers_memory[i] = mmap(NULL, workers_memory_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (workers_memory[i] == MAP_FAILED) {
fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
exit(1);
}
workers[i] = &workers_memory[i]->worker_public;
workers_p[i] = &workers_memory[i]->worker_private;
}
// Pin allocated memory of each worker
for (int i=0; i<n_workers; i++) {
// Get our core
hwloc_obj_t core = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, i % n_cores);
// Pin the memory area
#ifdef HWLOC_MEMBIND_BYNODESET
int res = hwloc_set_area_membind(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE | HWLOC_MEMBIND_BYNODESET);
#else
int res = hwloc_set_area_membind_nodeset(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE);
#endif
if (res != 0) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace error: Unable to bind worker memory to node!\n");
#endif
}
}
// Create pthread key
#ifndef __linux__
pthread_key_create(&worker_key, NULL);
@ -773,7 +878,11 @@ lace_init(int _n_workers, size_t dqsize)
}
if (verbosity) {
#if LACE_USE_HWLOC
fprintf(stderr, "Initializing Lace, %u nodes, %u cores, %u logical processors, %d workers.\n", n_nodes, n_cores, n_pus, n_workers);
#else
fprintf(stderr, "Initializing Lace, %u available cores, %d workers.\n", n_pus, n_workers);
#endif
}
// Prepare lace_init structure
@ -788,11 +897,18 @@ lace_init(int _n_workers, size_t dqsize)
#endif
}
/**
* Start the worker threads.
* If cb is set, then the current thread is suspended and Worker 0 is a new thread that starts with
* the given cb(arg) as the root task.
* If cb is not set, then the current thread is Worker 0 and this function returns.
*/
void
lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
{
if (stacksize == 0) stacksize = default_stacksize;
/* Report startup if verbose */
if (verbosity) {
if (cb != 0) {
fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers, stacksize);
@ -803,22 +919,21 @@ lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
}
}
/* Spawn workers */
int i;
for (i=1; i<n_workers; i++) lace_spawn_worker(i, stacksize, 0, 0);
/* Spawn all other workers */
for (unsigned int i=1; i<n_workers; i++) lace_spawn_worker(i, stacksize, 0, 0);
if (cb != 0) {
/* If cb set, spawn worker 0 */
main_cb = cb;
lace_spawn_worker(0, stacksize, lace_main_wrapper, arg);
// Suspend this thread until cb returns
/* Suspend this thread until cb returns */
pthread_mutex_lock(&wait_until_done_mutex);
if (lace_quits == 0) pthread_cond_wait(&wait_until_done, &wait_until_done_mutex);
pthread_mutex_unlock(&wait_until_done_mutex);
} else {
// use this thread as worker and return control
/* If cb not set, use current thread as worker 0 */
lace_init_worker(0);
lace_time_event(lace_get_worker(), 1);
}
}
@ -826,6 +941,9 @@ lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
static uint64_t ctr_all[CTR_MAX];
#endif
/**
* Reset the counters of Lace.
*/
void
lace_count_reset()
{
@ -851,6 +969,9 @@ lace_count_reset()
#endif
}
/**
* Report counters to the given file.
*/
void
lace_count_report_file(FILE *file)
{
@ -948,11 +1069,15 @@ lace_count_report_file(FILE *file)
(void)file;
}
/**
* End Lace. All disabled threads are re-enabled, and then all Workers are signaled to quit.
* This function waits until all threads are done, then returns.
*/
void lace_exit()
{
lace_time_event(lace_get_worker(), 2);
// first suspend all other threads
// first suspend all enabled threads
lace_suspend();
// now enable all threads and tell them to quit
@ -1030,7 +1155,7 @@ VOID_TASK_2(lace_together_helper, Task*, t, volatile int*, finished)
for (;;) {
int f = *finished;
if (cas(finished, f, f-1)) break;
if (__sync_bool_compare_and_swap(finished, f, f-1)) break;
}
while (*finished != 0) STEAL_RANDOM();
@ -1086,7 +1211,7 @@ lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t)
t2->d.args.arg_1 = t;
t2->d.args.arg_2 = &done;
while (!cas(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head);
while (!__sync_bool_compare_and_swap(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head);
lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2);
}
@ -1113,10 +1238,13 @@ lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t)
compiler_barrier();
while (!cas(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head);
while (!__sync_bool_compare_and_swap(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head);
lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2);
}
/**
* Called by _SPAWN functions when the Task stack is full.
*/
void
lace_abort_stack_overflow(void)
{

424
resources/3rdparty/sylvan/src/lace.h

@ -23,40 +23,281 @@
#ifndef __LACE_H__
#define __LACE_H__
#ifdef __has_include
# if __has_include("lace_config.h")
# include <lace_config.h>
# else
# define LACE_PIE_TIMES 0
# define LACE_COUNT_TASKS 0
# define LACE_COUNT_STEALS 0
# define LACE_COUNT_SPLITS 0
# define LACE_USE_HWLOC 0
# endif
#endif
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* Some flags */
/**
* Using Lace.
*
* Optionally set the verbosity level with lace_set_verbosity.
* Then call lace_init to initialize the system.
* - lace_init(n_workers, deque_size);
* set both parameters to 0 for reasonable defaults, using all available cores.
*
* You can create Worker threads yourself or let Lace create threads with lace_startup.
*
* When creating threads yourself, call the following functions:
* - lace_init_worker to allocate and initialize the worker data structures
* this method returns when all workers have called lace_init_worker
* - lace_pin_worker (optional) to pin the thread and memory to a core
* The main worker can now start its root task. All other workers:
* - lace_run_worker to perform work-stealing until the main worker calls lace_exit
*
* When letting Lace create threads with lace_startup
* - Call lace_startup with a callback to create N threads.
* Returns after the callback has returned and all created threads are destroyed
* - Call lace_startup without a callback to create N-1 threads.
* Returns control to the caller. When lace_exit is called, all created threads are terminated.
*/
#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */
#define LACE_DEBUG_PROGRAMSTACK 0
#endif
/**
* Type definitions used in the functions below.
* - WorkerP contains the (private) Worker data
* - Task contains a single Task
*/
typedef struct _WorkerP WorkerP;
typedef struct _Task Task;
#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */
#define LACE_LEAP_RANDOM 0
#endif
/**
* The macro LACE_TYPEDEF_CB(typedefname, taskname, parametertypes) defines
* a Task for use as a callback function.
*/
#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__);
#ifndef LACE_PIE_TIMES /* Record time spent stealing and leapfrogging */
#define LACE_PIE_TIMES 0
#endif
/**
* The lace_startup_cb type for a void Task with one void* parameter.
*/
LACE_TYPEDEF_CB(void, lace_startup_cb, void*);
#ifndef LACE_COUNT_TASKS /* Count number of tasks executed */
#define LACE_COUNT_TASKS 0
#endif
/**
* Set verbosity level (0 = no startup messages, 1 = startup messages)
* Default level: 0
*/
void lace_set_verbosity(int level);
/**
* Initialize Lace for <n_workers> workers with a deque size of <dqsize> per worker.
* If <n_workers> is set to 0, automatically detects available cores.
* If <dqsize> is est to 0, uses a reasonable default value.
*/
void lace_init(unsigned int n_workers, size_t dqsize);
/**
* Let Lace create worker threads.
* If <stacksize> is set to 0, uses a reaonable default value.
* If cb, arg are set to 0, then the current thread is initialized as the main Worker (Worker 0).
*
* If cb,arg are set, then the current thread is suspended. A new thread is made for Worker 0 and
* the task cb with paremeter arg is called; when cb returns, Lace is exited automatically.
*/
void lace_startup(size_t stacksize, lace_startup_cb, void* arg);
/**
* Initialize worker <worker>, allocating memory.
* If <worker> is 0, then the current thread is the main worker.
*/
void lace_init_worker(unsigned int worker);
/**
* Use hwloc to pin the current thread to a CPU and its allocated memory in the closest domain.
* Call this *after* lace_init_worker and *before* lace_run_worker.
*/
void lace_pin_worker(void);
/**
* Perform work-stealing until lace_exit is called.
*/
void lace_run_worker(void);
/**
* Steal a random task.
*/
#define lace_steal_random() CALL(lace_steal_random)
void lace_steal_random_CALL(WorkerP*, Task*);
/**
* Enter the Lace barrier. (all active workers must enter it before we can continue)
*/
void lace_barrier();
/**
* Suspend all workers except the current worker.
* May only be used when all other workers are idle.
*/
void lace_suspend();
/**
* Resume all workers.
*/
void lace_resume();
/**
* When all other workers are suspended, some workers can be disabled using the following functions.
* With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
* You can never disable the current worker or reduce the number of workers below 1.
* You cannot add workers.
*/
void lace_set_workers(unsigned int workercount);
/**
* Disable a suspended worker.
*/
void lace_disable_worker(unsigned int worker);
/**
* Enable a suspended worker.
*/
void lace_enable_worker(unsigned int worker);
/**
* Retrieve the number of enabled/active workers.
*/
unsigned int lace_enabled_workers();
/**
* Retrieve the number of Lace workers
*/
unsigned int lace_workers();
/**
* Retrieve the default program stack size
*/
size_t lace_default_stacksize();
/**
* Retrieve the current worker data.
*/
WorkerP *lace_get_worker();
/**
* Retrieve the current head of the deque
*/
Task *lace_get_head(WorkerP *);
/**
* Exit Lace.
* This function is automatically called when lace_startup is called with a callback.
* This function must be called to exit Lace when lace_startup is called without a callback.
*/
void lace_exit();
/**
* Create a pointer to a Tasks main function.
*/
#define TASK(f) ( f##_CALL )
/**
* Call a Tasks implementation (adds Lace variables to call)
*/
#define WRAP(f, ...) ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) )
/**
* Sync a task.
*/
#define SYNC(f) ( __lace_dq_head--, WRAP(f##_SYNC) )
/**
* Sync a task, but if the task is not stolen, then do not execute it.
*/
#define DROP() ( __lace_dq_head--, WRAP(lace_drop) )
/**
* Spawn a task.
*/
#define SPAWN(f, ...) ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ )
/**
* Directly execute a task.
*/
#define CALL(f, ...) ( WRAP(f##_CALL, ##__VA_ARGS__) )
/**
* Signal all workers to interrupt their current tasks and instead perform (a personal copy of) the given task.
*/
#define TOGETHER(f, ...) ( WRAP(f##_TOGETHER, ##__VA_ARGS__) )
/**
* Signal all workers to interrupt their current tasks and help the current thread with the given task.
*/
#define NEWFRAME(f, ...) ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) )
/**
* (Try to) steal a task from a random worker.
*/
#define STEAL_RANDOM() ( CALL(lace_steal_random) )
/**
* Get the current worker id.
*/
#define LACE_WORKER_ID ( __lace_worker->worker )
/**
* Get the core where the current worker is pinned.
*/
#define LACE_WORKER_PU ( __lace_worker->pu )
/**
* Initialize local variables __lace_worker and __lace_dq_head which are required for most Lace functionality.
*/
#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker);
#ifndef LACE_COUNT_STEALS /* Count number of steals performed */
#define LACE_COUNT_STEALS 0
/**
* Check if current tasks must be interrupted, and if so, interrupt.
*/
void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head);
#define YIELD_NEWFRAME() { if (unlikely((*(Task* volatile *)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
/**
* True if the given task is stolen, False otherwise.
*/
#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1)
/**
* True if the given task is completed, False otherwise.
*/
#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2)
/**
* Retrieves a pointer to the result of the given task.
*/
#define TASK_RESULT(t) (&t->d[0])
/**
* Compute a random number, thread-local (so scalable)
*/
#define LACE_TRNG (__lace_worker->rng = 2862933555777941757ULL * __lace_worker->rng + 3037000493ULL)
/* Some flags that influence Lace behavior */
#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */
#define LACE_DEBUG_PROGRAMSTACK 0
#endif
#ifndef LACE_COUNT_SPLITS /* Count number of times the split point is moved */
#define LACE_COUNT_SPLITS 0
#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */
#define LACE_LEAP_RANDOM 1
#endif
#ifndef LACE_COUNT_EVENTS
#define LACE_COUNT_EVENTS (LACE_PIE_TIMES || LACE_COUNT_TASKS || LACE_COUNT_STEALS || LACE_COUNT_SPLITS)
#endif
/**
* Now follows the implementation of Lace
*/
/* Typical cacheline size of system architectures */
#ifndef LINE_SIZE
#define LINE_SIZE 64
@ -167,10 +408,6 @@ typedef enum {
CTR_MAX
} CTR_index;
struct _WorkerP;
struct _Worker;
struct _Task;
#define THIEF_EMPTY ((struct _Worker*)0x0)
#define THIEF_TASK ((struct _Worker*)0x1)
#define THIEF_COMPLETED ((struct _Worker*)0x2)
@ -215,7 +452,7 @@ typedef struct _WorkerP {
size_t stack_trigger; // for stack overflow detection
uint64_t rng; // my random seed (for lace_trng)
uint32_t seed; // my random seed (for lace_steal_random)
int16_t worker; // what is my worker id?
uint16_t worker; // what is my worker id?
uint8_t allstolen; // my allstolen
volatile int8_t enabled; // if this worker is enabled
@ -228,145 +465,10 @@ typedef struct _WorkerP {
int16_t pu; // my pu (for HWLOC)
} WorkerP;
#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__);
LACE_TYPEDEF_CB(void, lace_startup_cb, void*);
/**
* Using Lace.
*
* Optionally set the verbosity level with lace_set_verbosity.
* Call lace_init to allocate all data structures.
*
* You can create threads yourself or let Lace create threads with lace_startup.
*
* When creating threads yourself:
* - call lace_init_main for worker 0
* this method returns when all other workers have started
* - call lace_run_worker for all other workers
* workers perform work-stealing until worker 0 calls lace_exit
*
* When letting Lace create threads with lace_startup
* - calling with startup callback creates N threads and returns
* after the callback has returned, and all created threads are destroyed
* - calling without a startup callback creates N-1 threads and returns
* control to the caller. When lace_exit is called, all created threads are terminated.
*/
/**
* Set verbosity level (0 = no startup messages, 1 = startup messages)
* Default level: 0
*/
void lace_set_verbosity(int level);
/**
* Initialize master structures for Lace with <n_workers> workers
* and default deque size of <dqsize>.
* Does not create new threads.
* Tries to detect number of cpus, if n_workers equals 0.
*/
void lace_init(int n_workers, size_t dqsize);
/**
* After lace_init, start all worker threads.
* If cb,arg are set, suspend this thread, call cb(arg) in a new thread
* and exit Lace upon return
* Otherwise, the current thread is initialized as worker 0.
*/
void lace_startup(size_t stacksize, lace_startup_cb, void* arg);
/**
* Initialize worker 0. This method returns when all other workers are initialized
* (using lace_run_worker).
*
* When done, run lace_exit so all worker threads return from lace_run_worker.
*/
void lace_init_main();
/**
* Initialize the current thread as the Lace thread of worker <worker>, and perform
* work-stealing until lace_exit is called.
*
* For worker 0, call lace_init_main instead.
*/
void lace_run_worker(int worker);
/**
* Steal a random task.
*/
#define lace_steal_random() CALL(lace_steal_random)
void lace_steal_random_CALL(WorkerP*, Task*);
/**
* Barrier (all workers must enter it before progressing)
*/
void lace_barrier();
/**
* Suspend and resume all other workers.
* May only be used when all other workers are idle.
*/
void lace_suspend();
void lace_resume();
/**
* When all tasks are suspended, workers can be temporarily disabled.
* With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
* You can never disable the current worker or reduce the number of workers below 1.
* You cannot add workers.
*/
void lace_disable_worker(int worker);
void lace_enable_worker(int worker);
void lace_set_workers(int workercount);
int lace_enabled_workers();
/**
* Retrieve number of Lace workers
*/
size_t lace_workers();
/**
* Retrieve default program stack size
*/
size_t lace_default_stacksize();
/**
* Retrieve current worker.
*/
WorkerP *lace_get_worker();
/**
* Retrieve the current head of the deque
*/
Task *lace_get_head(WorkerP *);
/**
* Exit Lace. Automatically called when started with cb,arg.
*/
void lace_exit();
#define LACE_STOLEN ((Worker*)0)
#define LACE_BUSY ((Worker*)1)
#define LACE_NOWORK ((Worker*)2)
#define TASK(f) ( f##_CALL )
#define WRAP(f, ...) ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) )
#define SYNC(f) ( __lace_dq_head--, WRAP(f##_SYNC) )
#define DROP() ( __lace_dq_head--, WRAP(lace_drop) )
#define SPAWN(f, ...) ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ )
#define CALL(f, ...) ( WRAP(f##_CALL, ##__VA_ARGS__) )
#define TOGETHER(f, ...) ( WRAP(f##_TOGETHER, ##__VA_ARGS__) )
#define NEWFRAME(f, ...) ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) )
#define STEAL_RANDOM() ( CALL(lace_steal_random) )
#define LACE_WORKER_ID ( __lace_worker->worker )
#define LACE_WORKER_PU ( __lace_worker->pu )
/* Use LACE_ME to initialize Lace variables, in case you want to call multiple Lace tasks */
#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker);
#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1)
#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2)
#define TASK_RESULT(t) (&t->d[0])
#if LACE_DEBUG_PROGRAMSTACK
static inline void CHECKSTACK(WorkerP *w)
{
@ -402,14 +504,6 @@ extern lace_newframe_t lace_newframe;
void lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
void lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head);
#define YIELD_NEWFRAME() { if (unlikely((*(Task* volatile *)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
/**
* Compute a random number, thread-local
*/
#define LACE_TRNG (__lace_worker->rng = 2862933555777941757ULL * __lace_worker->rng + 3037000493ULL)
/**
* Make all tasks of the current worker shared.
*/

37
resources/3rdparty/sylvan/src/sylvan.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,24 +17,49 @@
/**
* Sylvan: parallel MTBDD/ListDD package.
*
* This is a multi-core implementation of MTBDDs with complement edges.
*
* This package requires parallel the work-stealing framework Lace.
* Lace must be initialized before initializing Sylvan
* Include this file.
*/
#include <sylvan_config.h>
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h> // for FILE
#include <stdlib.h> // for realloc
#include <unistd.h>
#include <pthread.h>
#if SYLVAN_STATS
#ifdef __MACH__
#include <mach/mach_time.h>
#else
#include <time.h>
#endif
#endif
/**
* Sylvan header files outside the namespace
*/
#include <lace.h>
#include <sylvan_tls.h>
#ifdef __cplusplus
//namespace sylvan {
#endif
/**
* Sylvan header files inside the namespace
*/
#include <sylvan_common.h>
#include <sylvan_stats.h>
#include <sylvan_mt.h>
#include <sylvan_mtbdd.h>
#include <sylvan_bdd.h>
#include <sylvan_ldd.h>
#ifdef __cplusplus
//}
#endif

22
resources/3rdparty/sylvan/src/sylvan_bdd.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,20 +15,12 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_int.h>
#include <avl.h>
static int granularity = 1; // default
@ -45,12 +37,6 @@ sylvan_get_granularity()
return granularity;
}
BDD
sylvan_ithvar(BDDVAR level)
{
return sylvan_makenode(level, sylvan_false, sylvan_true);
}
/**
* Implementation of unary, binary and if-then-else operators.
*/
@ -1834,10 +1820,10 @@ TASK_IMPL_3(BDD, sylvan_union_cube, BDD, bdd, BDDSET, vars, uint8_t *, cube)
} else if (v > n_level) {
BDD high = node_high(bdd, n);
BDD low = node_low(bdd, n);
SPAWN(sylvan_union_cube, high, vars, cube);
bdd_refs_spawn(SPAWN(sylvan_union_cube, high, vars, cube));
BDD new_low = sylvan_union_cube(low, vars, cube);
bdd_refs_push(new_low);
BDD new_high = SYNC(sylvan_union_cube);
BDD new_high = bdd_refs_sync(SYNC(sylvan_union_cube));
bdd_refs_pop(1);
if (new_low != low || new_high != high) {
result = sylvan_makenode(n_level, new_low, new_high);

41
resources/3rdparty/sylvan/src/sylvan_bdd.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,10 +23,19 @@
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* For strictly non-MT BDDs */
#define sylvan_isconst(bdd) (bdd == sylvan_true || bdd == sylvan_false)
#define sylvan_isnode(bdd) (bdd != sylvan_true && bdd != sylvan_false)
static inline int
sylvan_isconst(MTBDD bdd)
{
return bdd == mtbdd_true || bdd == mtbdd_false ? 1 : 0;
}
static inline int
sylvan_isnode(MTBDD bdd)
{
return bdd != mtbdd_true && bdd != mtbdd_false ? 1 : 0;
}
/**
* Granularity (BDD only) determines usage of operation cache.
@ -43,15 +52,16 @@ extern "C" {
void sylvan_set_granularity(int granularity);
int sylvan_get_granularity(void);
/* Create a BDD representing just <var> or the negation of <var> */
BDD sylvan_ithvar(BDDVAR var);
#define sylvan_nithvar(var) sylvan_not(sylvan_ithvar(var))
/*
* Unary, binary and if-then-else operations.
* These operations are all implemented by NOT, AND and XOR.
*/
#define sylvan_not(a) (((BDD)a)^sylvan_complement)
static inline BDD
sylvan_not(BDD a)
{
return a ^ sylvan_complement;
}
TASK_DECL_4(BDD, sylvan_ite, BDD, BDD, BDD, BDDVAR);
#define sylvan_ite(a,b,c) (CALL(sylvan_ite,a,b,c,0))
TASK_DECL_3(BDD, sylvan_and, BDD, BDD, BDDVAR);
@ -68,6 +78,13 @@ TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR);
#define sylvan_diff(a,b) sylvan_and(a,sylvan_not(b))
#define sylvan_less(a,b) sylvan_and(sylvan_not(a),b)
/* Create a BDD representing just <var> or the negation of <var> */
static inline BDD
sylvan_nithvar(uint32_t var)
{
return sylvan_not(sylvan_ithvar(var));
}
/**
* Existential and universal quantification.
*/
@ -265,7 +282,11 @@ sylvan_fprint(FILE *f, BDD bdd)
sylvan_serialize_totext(f);
}
#define sylvan_print(dd) sylvan_fprint(stdout, dd)
static void __attribute__((unused))
sylvan_print(BDD bdd)
{
return sylvan_fprint(stdout, bdd);
}
#include "sylvan_bdd_storm.h"

8
resources/3rdparty/sylvan/src/sylvan_bdd_storm.h

@ -1,6 +1,14 @@
#ifdef __cplusplus
extern "C" {
#endif
#define bdd_isnegated(dd) ((dd & sylvan_complement) ? 1 : 0)
#define bdd_regular(dd) (dd & ~sylvan_complement)
#define bdd_isterminal(dd) (dd == sylvan_false || dd == sylvan_true)
TASK_DECL_3(BDD, sylvan_existsRepresentative, BDD, BDD, BDDVAR);
#define sylvan_existsRepresentative(a, vars) (CALL(sylvan_existsRepresentative, a, vars, 0))
#ifdef __cplusplus
}
#endif

13
resources/3rdparty/sylvan/src/sylvan_cache.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,19 +15,20 @@
* limitations under the License.
*/
#include <sylvan_int.h>
#include <errno.h> // for errno
#include <stdio.h> // for fprintf
#include <stdint.h> // for uint32_t etc
#include <stdlib.h> // for exit
#include <string.h> // for strerror
#include <sys/mman.h> // for mmap
#include <sylvan_cache.h>
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
#ifndef CACHE_MASK
#define CACHE_MASK 1
#endif
#ifndef compiler_barrier
#define compiler_barrier() { asm volatile("" ::: "memory"); }
#endif

14
resources/3rdparty/sylvan/src/sylvan_cache.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,21 +15,15 @@
* limitations under the License.
*/
#include <sylvan_config.h>
/* Do not include this file directly. Instead, include sylvan_int.h */
#include <stdint.h> // for uint32_t etc
#ifndef CACHE_H
#define CACHE_H
#ifndef SYLVAN_CACHE_H
#define SYLVAN_CACHE_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef CACHE_MASK
#define CACHE_MASK 1
#endif
/**
* Operation cache
*

2
resources/3rdparty/sylvan/src/sylvan_common.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

4
resources/3rdparty/sylvan/src/sylvan_common.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,6 +15,8 @@
* limitations under the License.
*/
/* Do not include this file directly. Instead, include sylvan.h */
#ifndef SYLVAN_COMMON_H
#define SYLVAN_COMMON_H

7
resources/3rdparty/sylvan/src/sylvan_gmp.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,14 +16,11 @@
*/
#include <sylvan_int.h>
#include <sylvan_gmp.h>
#include <assert.h>
#include <math.h>
#include <string.h>
#include <sylvan_gmp.h>
#include <gmp.h>
static uint32_t gmp_type;
/**

10
resources/3rdparty/sylvan/src/sylvan_gmp.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,13 +19,14 @@
* This is an implementation of GMP mpq custom leaves of MTBDDs
*/
#ifndef SYLVAN_GMP_H
#define SYLVAN_GMP_H
#include <sylvan.h>
#include <gmp.h>
#ifndef SYLVAN_GMP_H
#define SYLVAN_GMP_H
#ifdef __cplusplus
namespace sylvan {
extern "C" {
#endif /* __cplusplus */
@ -185,6 +186,7 @@ TASK_DECL_2(MTBDD, gmp_strict_threshold_d, MTBDD, double);
#ifdef __cplusplus
}
}
#endif /* __cplusplus */
#endif

134
resources/3rdparty/sylvan/src/sylvan_int.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,14 +16,22 @@
*/
/**
* Internals of Sylvan
* Sylvan: parallel MTBDD/ListDD package.
* Include this file for access to internals.
*/
#include <sylvan.h>
#ifdef __cplusplus
namespace sylvan {
#endif
/**
* Sylvan internal header files inside the namespace
*/
#include <sylvan_cache.h>
#include <sylvan_table.h>
#include <sylvan_stats.h>
#ifndef SYLVAN_INT_H
#define SYLVAN_INT_H
@ -42,68 +50,68 @@ extern llmsset_t nodes;
*/
// BDD operations
#define CACHE_BDD_ITE (0LL<<40)
#define CACHE_BDD_AND (1LL<<40)
#define CACHE_BDD_XOR (2LL<<40)
#define CACHE_BDD_EXISTS (3LL<<40)
#define CACHE_BDD_PROJECT (4LL<<40)
#define CACHE_BDD_AND_EXISTS (5LL<<40)
#define CACHE_BDD_AND_PROJECT (6LL<<40)
#define CACHE_BDD_RELNEXT (7LL<<40)
#define CACHE_BDD_RELPREV (8LL<<40)
#define CACHE_BDD_SATCOUNT (9LL<<40)
#define CACHE_BDD_COMPOSE (10LL<<40)
#define CACHE_BDD_RESTRICT (11LL<<40)
#define CACHE_BDD_CONSTRAIN (12LL<<40)
#define CACHE_BDD_CLOSURE (13LL<<40)
#define CACHE_BDD_ISBDD (14LL<<40)
#define CACHE_BDD_SUPPORT (15LL<<40)
#define CACHE_BDD_PATHCOUNT (16LL<<40)
static const uint64_t CACHE_BDD_ITE = (0LL<<40);
static const uint64_t CACHE_BDD_AND = (1LL<<40);
static const uint64_t CACHE_BDD_XOR = (2LL<<40);
static const uint64_t CACHE_BDD_EXISTS = (3LL<<40);
static const uint64_t CACHE_BDD_PROJECT = (4LL<<40);
static const uint64_t CACHE_BDD_AND_EXISTS = (5LL<<40);
static const uint64_t CACHE_BDD_AND_PROJECT = (6LL<<40);
static const uint64_t CACHE_BDD_RELNEXT = (7LL<<40);
static const uint64_t CACHE_BDD_RELPREV = (8LL<<40);
static const uint64_t CACHE_BDD_SATCOUNT = (9LL<<40);
static const uint64_t CACHE_BDD_COMPOSE = (10LL<<40);
static const uint64_t CACHE_BDD_RESTRICT = (11LL<<40);
static const uint64_t CACHE_BDD_CONSTRAIN = (12LL<<40);
static const uint64_t CACHE_BDD_CLOSURE = (13LL<<40);
static const uint64_t CACHE_BDD_ISBDD = (14LL<<40);
static const uint64_t CACHE_BDD_SUPPORT = (15LL<<40);
static const uint64_t CACHE_BDD_PATHCOUNT = (16LL<<40);
// MDD operations
#define CACHE_MDD_RELPROD (20LL<<40)
#define CACHE_MDD_MINUS (21LL<<40)
#define CACHE_MDD_UNION (22LL<<40)
#define CACHE_MDD_INTERSECT (23LL<<40)
#define CACHE_MDD_PROJECT (24LL<<40)
#define CACHE_MDD_JOIN (25LL<<40)
#define CACHE_MDD_MATCH (26LL<<40)
#define CACHE_MDD_RELPREV (27LL<<40)
#define CACHE_MDD_SATCOUNT (28LL<<40)
#define CACHE_MDD_SATCOUNTL1 (29LL<<40)
#define CACHE_MDD_SATCOUNTL2 (30LL<<40)
static const uint64_t CACHE_MDD_RELPROD = (20LL<<40);
static const uint64_t CACHE_MDD_MINUS = (21LL<<40);
static const uint64_t CACHE_MDD_UNION = (22LL<<40);
static const uint64_t CACHE_MDD_INTERSECT = (23LL<<40);
static const uint64_t CACHE_MDD_PROJECT = (24LL<<40);
static const uint64_t CACHE_MDD_JOIN = (25LL<<40);
static const uint64_t CACHE_MDD_MATCH = (26LL<<40);
static const uint64_t CACHE_MDD_RELPREV = (27LL<<40);
static const uint64_t CACHE_MDD_SATCOUNT = (28LL<<40);
static const uint64_t CACHE_MDD_SATCOUNTL1 = (29LL<<40);
static const uint64_t CACHE_MDD_SATCOUNTL2 = (30LL<<40);
// MTBDD operations
#define CACHE_MTBDD_APPLY (40LL<<40)
#define CACHE_MTBDD_UAPPLY (41LL<<40)
#define CACHE_MTBDD_ABSTRACT (42LL<<40)
#define CACHE_MTBDD_ITE (43LL<<40)
#define CACHE_MTBDD_AND_ABSTRACT_PLUS (44LL<<40)
#define CACHE_MTBDD_AND_ABSTRACT_MAX (45LL<<40)
#define CACHE_MTBDD_SUPPORT (46LL<<40)
#define CACHE_MTBDD_COMPOSE (47LL<<40)
#define CACHE_MTBDD_EQUAL_NORM (48LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_REL (49LL<<40)
#define CACHE_MTBDD_MINIMUM (50LL<<40)
#define CACHE_MTBDD_MAXIMUM (51LL<<40)
#define CACHE_MTBDD_LEQ (52LL<<40)
#define CACHE_MTBDD_LESS (53LL<<40)
#define CACHE_MTBDD_GEQ (54LL<<40)
#define CACHE_MTBDD_GREATER (55LL<<40)
#define CACHE_MTBDD_EVAL_COMPOSE (56LL<<40)
#define CACHE_MTBDD_NONZERO_COUNT (57LL<<40)
#define CACHE_MTBDD_AND_EXISTS_RN (58LL<<40)
#define CACHE_MTBDD_MINIMUM_RN (59LL<<40)
#define CACHE_MTBDD_MAXIMUM_RN (60LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_RN (61LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_REL_RN (62LL<<40)
#define CACHE_MTBDD_AND_EXISTS_RF (63LL<<40)
#define CACHE_MTBDD_MINIMUM_RF (64LL<<40)
#define CACHE_MTBDD_MAXIMUM_RF (65LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_RF (66LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_REL_RF (67LL<<40)
#define CACHE_MTBDD_ABSTRACT_REPRESENTATIVE (68LL<<40)
static const uint64_t CACHE_MTBDD_APPLY = (40LL<<40);
static const uint64_t CACHE_MTBDD_UAPPLY = (41LL<<40);
static const uint64_t CACHE_MTBDD_ABSTRACT = (42LL<<40);
static const uint64_t CACHE_MTBDD_ITE = (43LL<<40);
static const uint64_t CACHE_MTBDD_AND_ABSTRACT_PLUS = (44LL<<40);
static const uint64_t CACHE_MTBDD_AND_ABSTRACT_MAX = (45LL<<40);
static const uint64_t CACHE_MTBDD_SUPPORT = (46LL<<40);
static const uint64_t CACHE_MTBDD_COMPOSE = (47LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM = (48LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL = (49LL<<40);
static const uint64_t CACHE_MTBDD_MINIMUM = (50LL<<40);
static const uint64_t CACHE_MTBDD_MAXIMUM = (51LL<<40);
static const uint64_t CACHE_MTBDD_LEQ = (52LL<<40);
static const uint64_t CACHE_MTBDD_LESS = (53LL<<40);
static const uint64_t CACHE_MTBDD_GEQ = (54LL<<40);
static const uint64_t CACHE_MTBDD_GREATER = (55LL<<40);
static const uint64_t CACHE_MTBDD_EVAL_COMPOSE = (56LL<<40);
static const uint64_t CACHE_MTBDD_NONZERO_COUNT = (57LL<<40);
static const uint64_t CACHE_MTBDD_AND_EXISTS_RN = (58LL<<40);
static const uint64_t CACHE_MTBDD_MINIMUM_RN = (59LL<<40);
static const uint64_t CACHE_MTBDD_MAXIMUM_RN = (60LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM_RN = (61LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL_RN = (62LL<<40);
static const uint64_t CACHE_MTBDD_AND_EXISTS_RF = (63LL<<40);
static const uint64_t CACHE_MTBDD_MINIMUM_RF = (64LL<<40);
static const uint64_t CACHE_MTBDD_MAXIMUM_RF = (65LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM_RF = (66LL<<40);
static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL_RF = (67LL<<40);
static const uint64_t CACHE_MTBDD_ABSTRACT_REPRESENTATIVE = (68LL<<40);
#ifdef __cplusplus
}
@ -112,4 +120,8 @@ extern llmsset_t nodes;
#include <sylvan_mtbdd_int.h>
#include <sylvan_ldd_int.h>
#ifdef __cplusplus
} /* namespace */
#endif
#endif

248
resources/3rdparty/sylvan/src/sylvan_ldd.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,20 +15,12 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_int.h>
#include <avl.h>
#include <sylvan_refs.h>
#include <sha2.h>
@ -54,13 +46,15 @@ VOID_TASK_IMPL_1(lddmc_gc_mark_rec, MDD, mdd)
* External references
*/
refs_table_t mdd_refs;
refs_table_t lddmc_refs;
refs_table_t lddmc_protected;
static int lddmc_protected_created = 0;
MDD
lddmc_ref(MDD a)
{
if (a == lddmc_true || a == lddmc_false) return a;
refs_up(&mdd_refs, a);
refs_up(&lddmc_refs, a);
return a;
}
@ -68,13 +62,36 @@ void
lddmc_deref(MDD a)
{
if (a == lddmc_true || a == lddmc_false) return;
refs_down(&mdd_refs, a);
refs_down(&lddmc_refs, a);
}
size_t
lddmc_count_refs()
{
return refs_count(&mdd_refs);
return refs_count(&lddmc_refs);
}
void
lddmc_protect(MDD *a)
{
if (!lddmc_protected_created) {
// In C++, sometimes lddmc_protect is called before Sylvan is initialized. Just create a table.
protect_create(&lddmc_protected, 4096);
lddmc_protected_created = 1;
}
protect_up(&lddmc_protected, (size_t)a);
}
void
lddmc_unprotect(MDD *a)
{
if (lddmc_protected.refs_table != NULL) protect_down(&lddmc_protected, (size_t)a);
}
size_t
lddmc_count_protected(void)
{
return protect_count(&lddmc_protected);
}
/* Called during garbage collection */
@ -82,9 +99,24 @@ VOID_TASK_0(lddmc_gc_mark_external_refs)
{
// iterate through refs hash table, mark all found
size_t count=0;
uint64_t *it = refs_iter(&mdd_refs, 0, mdd_refs.refs_size);
uint64_t *it = refs_iter(&lddmc_refs, 0, lddmc_refs.refs_size);
while (it != NULL) {
SPAWN(lddmc_gc_mark_rec, refs_next(&mdd_refs, &it, mdd_refs.refs_size));
SPAWN(lddmc_gc_mark_rec, refs_next(&lddmc_refs, &it, lddmc_refs.refs_size));
count++;
}
while (count--) {
SYNC(lddmc_gc_mark_rec);
}
}
VOID_TASK_0(lddmc_gc_mark_protected)
{
// iterate through refs hash table, mark all found
size_t count=0;
uint64_t *it = protect_iter(&lddmc_protected, 0, lddmc_protected.refs_size);
while (it != NULL) {
MDD *to_mark = (MDD*)protect_next(&lddmc_protected, &it, lddmc_protected.refs_size);
SPAWN(lddmc_gc_mark_rec, *to_mark);
count++;
}
while (count--) {
@ -93,33 +125,77 @@ VOID_TASK_0(lddmc_gc_mark_external_refs)
}
/* Infrastructure for internal markings */
typedef struct lddmc_refs_task
{
Task *t;
void *f;
} *lddmc_refs_task_t;
typedef struct lddmc_refs_internal
{
const MDD **pbegin, **pend, **pcur;
MDD *rbegin, *rend, *rcur;
lddmc_refs_task_t sbegin, send, scur;
} *lddmc_refs_internal_t;
DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
VOID_TASK_0(lddmc_refs_mark_task)
VOID_TASK_2(lddmc_refs_mark_p_par, const MDD**, begin, size_t, count)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
size_t i, j=0;
for (i=0; i<lddmc_refs_key->r_count; i++) {
if (j >= 40) {
while (j--) SYNC(lddmc_gc_mark_rec);
j=0;
if (count < 32) {
while (count) {
lddmc_gc_mark_rec(**(begin++));
count--;
}
SPAWN(lddmc_gc_mark_rec, lddmc_refs_key->results[i]);
j++;
}
for (i=0; i<lddmc_refs_key->s_count; i++) {
Task *t = lddmc_refs_key->spawns[i];
if (!TASK_IS_STOLEN(t)) break;
if (TASK_IS_COMPLETED(t)) {
if (j >= 40) {
while (j--) SYNC(lddmc_gc_mark_rec);
j=0;
} else {
SPAWN(lddmc_refs_mark_p_par, begin, count / 2);
CALL(lddmc_refs_mark_p_par, begin + (count / 2), count - count / 2);
SYNC(lddmc_refs_mark_p_par);
}
}
VOID_TASK_2(lddmc_refs_mark_r_par, MDD*, begin, size_t, count)
{
if (count < 32) {
while (count) {
lddmc_gc_mark_rec(*begin++);
count--;
}
} else {
SPAWN(lddmc_refs_mark_r_par, begin, count / 2);
CALL(lddmc_refs_mark_r_par, begin + (count / 2), count - count / 2);
SYNC(lddmc_refs_mark_r_par);
}
}
VOID_TASK_2(lddmc_refs_mark_s_par, lddmc_refs_task_t, begin, size_t, count)
{
if (count < 32) {
while (count) {
Task *t = begin->t;
if (!TASK_IS_STOLEN(t)) return;
if (t->f == begin->f && TASK_IS_COMPLETED(t)) {
lddmc_gc_mark_rec(*(BDD*)TASK_RESULT(t));
}
SPAWN(lddmc_gc_mark_rec, *(BDD*)TASK_RESULT(t));
j++;
begin += 1;
count -= 1;
}
} else {
if (!TASK_IS_STOLEN(begin->t)) return;
SPAWN(lddmc_refs_mark_s_par, begin, count / 2);
CALL(lddmc_refs_mark_s_par, begin + (count / 2), count - count / 2);
SYNC(lddmc_refs_mark_s_par);
}
while (j--) SYNC(lddmc_gc_mark_rec);
}
VOID_TASK_0(lddmc_refs_mark_task)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
SPAWN(lddmc_refs_mark_p_par, lddmc_refs_key->pbegin, lddmc_refs_key->pcur-lddmc_refs_key->pbegin);
SPAWN(lddmc_refs_mark_r_par, lddmc_refs_key->rbegin, lddmc_refs_key->rcur-lddmc_refs_key->rbegin);
CALL(lddmc_refs_mark_s_par, lddmc_refs_key->sbegin, lddmc_refs_key->scur-lddmc_refs_key->sbegin);
SYNC(lddmc_refs_mark_r_par);
SYNC(lddmc_refs_mark_p_par);
}
VOID_TASK_0(lddmc_refs_mark)
@ -130,12 +206,12 @@ VOID_TASK_0(lddmc_refs_mark)
VOID_TASK_0(lddmc_refs_init_task)
{
lddmc_refs_internal_t s = (lddmc_refs_internal_t)malloc(sizeof(struct lddmc_refs_internal));
s->r_size = 128;
s->r_count = 0;
s->s_size = 128;
s->s_count = 0;
s->results = (BDD*)malloc(sizeof(BDD) * 128);
s->spawns = (Task**)malloc(sizeof(Task*) * 128);
s->pcur = s->pbegin = (const MDD**)malloc(sizeof(MDD*) * 1024);
s->pend = s->pbegin + 1024;
s->rcur = s->rbegin = (MDD*)malloc(sizeof(MDD) * 1024);
s->rend = s->rbegin + 1024;
s->scur = s->sbegin = (lddmc_refs_task_t)malloc(sizeof(struct lddmc_refs_task) * 1024);
s->send = s->sbegin + 1024;
SET_THREAD_LOCAL(lddmc_refs_key, s);
}
@ -146,6 +222,83 @@ VOID_TASK_0(lddmc_refs_init)
sylvan_gc_add_mark(TASK(lddmc_refs_mark));
}
void
lddmc_refs_ptrs_up(lddmc_refs_internal_t lddmc_refs_key)
{
size_t size = lddmc_refs_key->pend - lddmc_refs_key->pbegin;
lddmc_refs_key->pbegin = (const MDD**)realloc(lddmc_refs_key->pbegin, sizeof(MDD*) * size * 2);
lddmc_refs_key->pcur = lddmc_refs_key->pbegin + size;
lddmc_refs_key->pend = lddmc_refs_key->pbegin + (size * 2);
}
MDD __attribute__((noinline))
lddmc_refs_refs_up(lddmc_refs_internal_t lddmc_refs_key, MDD res)
{
long size = lddmc_refs_key->rend - lddmc_refs_key->rbegin;
lddmc_refs_key->rbegin = (MDD*)realloc(lddmc_refs_key->rbegin, sizeof(MDD) * size * 2);
lddmc_refs_key->rcur = lddmc_refs_key->rbegin + size;
lddmc_refs_key->rend = lddmc_refs_key->rbegin + (size * 2);
return res;
}
void __attribute__((noinline))
lddmc_refs_tasks_up(lddmc_refs_internal_t lddmc_refs_key)
{
long size = lddmc_refs_key->send - lddmc_refs_key->sbegin;
lddmc_refs_key->sbegin = (lddmc_refs_task_t)realloc(lddmc_refs_key->sbegin, sizeof(struct lddmc_refs_task) * size * 2);
lddmc_refs_key->scur = lddmc_refs_key->sbegin + size;
lddmc_refs_key->send = lddmc_refs_key->sbegin + (size * 2);
}
void __attribute__((unused))
lddmc_refs_pushptr(const MDD *ptr)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
*lddmc_refs_key->pcur++ = ptr;
if (lddmc_refs_key->pcur == lddmc_refs_key->pend) lddmc_refs_ptrs_up(lddmc_refs_key);
}
void __attribute__((unused))
lddmc_refs_popptr(size_t amount)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->pcur -= amount;
}
MDD __attribute__((unused))
lddmc_refs_push(MDD lddmc)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
*(lddmc_refs_key->rcur++) = lddmc;
if (lddmc_refs_key->rcur == lddmc_refs_key->rend) return lddmc_refs_refs_up(lddmc_refs_key, lddmc);
else return lddmc;
}
void __attribute__((unused))
lddmc_refs_pop(long amount)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->rcur -= amount;
}
void __attribute__((unused))
lddmc_refs_spawn(Task *t)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->scur->t = t;
lddmc_refs_key->scur->f = t->f;
lddmc_refs_key->scur += 1;
if (lddmc_refs_key->scur == lddmc_refs_key->send) lddmc_refs_tasks_up(lddmc_refs_key);
}
MDD __attribute__((unused))
lddmc_refs_sync(MDD result)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->scur -= 1;
return result;
}
VOID_TASK_DECL_0(lddmc_gc_mark_serialize);
/**
@ -155,7 +308,7 @@ VOID_TASK_DECL_0(lddmc_gc_mark_serialize);
static void
lddmc_quit()
{
refs_free(&mdd_refs);
refs_free(&lddmc_refs);
}
void
@ -163,9 +316,14 @@ sylvan_init_ldd()
{
sylvan_register_quit(lddmc_quit);
sylvan_gc_add_mark(TASK(lddmc_gc_mark_external_refs));
sylvan_gc_add_mark(TASK(lddmc_gc_mark_protected));
sylvan_gc_add_mark(TASK(lddmc_gc_mark_serialize));
refs_create(&mdd_refs, 1024);
refs_create(&lddmc_refs, 1024);
if (!lddmc_protected_created) {
protect_create(&lddmc_protected, 4096);
lddmc_protected_created = 1;
}
LACE_ME;
CALL(lddmc_refs_init);
@ -2000,7 +2158,7 @@ VOID_TASK_3(lddmc_match_sat, struct lddmc_match_sat_info *, info, lddmc_enum_cb,
ri->mdd = mddnode_getright(na);
di->mdd = mddnode_getdown(na);
ri->match = b;
di->match = mddnode_getdown(nb);
di->match = p_val == 1 ? mddnode_getdown(nb) : b;
ri->proj = proj;
di->proj = mddnode_getdown(p_node);
ri->count = info->count;

140
resources/3rdparty/sylvan/src/sylvan_ldd.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,11 +24,10 @@
extern "C" {
#endif /* __cplusplus */
typedef uint64_t MDD; // Note: low 40 bits only
#define lddmc_false ((MDD)0)
#define lddmc_true ((MDD)1)
static const MDD lddmc_false = 0;
static const MDD lddmc_true = 1;
/* Initialize LDD functionality */
void sylvan_init_ldd(void);
@ -53,19 +52,49 @@ MDD lddmc_make_copynode(MDD ifeq, MDD ifneq);
int lddmc_iscopy(MDD mdd);
MDD lddmc_followcopy(MDD mdd);
/* Add or remove external reference to MDD */
MDD lddmc_ref(MDD a);
void lddmc_deref(MDD a);
/**
* Infrastructure for external references using a hash table.
* Two hash tables store external references: a pointers table and a values table.
* The pointers table stores pointers to MDD variables, manipulated with protect and unprotect.
* The values table stores MDD, manipulated with ref and deref.
* We strongly recommend using the pointers table whenever possible.
*/
/* For use in custom mark functions */
VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD)
#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd)
/**
* Store the pointer <ptr> in the pointers table.
*/
void lddmc_protect(MDD* ptr);
/**
* Delete the pointer <ptr> from the pointers table.
*/
void lddmc_unprotect(MDD* ptr);
/**
* Compute the number of pointers in the pointers table.
*/
size_t lddmc_count_protected(void);
/**
* Store the MDD <dd> in the values table.
*/
MDD lddmc_ref(MDD dd);
/**
* Delete the MDD <dd> from the values table.
*/
void lddmc_deref(MDD dd);
/* Return the number of external references */
/**
* Compute the number of values in the values table.
*/
size_t lddmc_count_refs(void);
/* Mark MDD for "notify on dead" */
#define lddmc_notify_ondead(mdd) llmsset_notify_ondead(nodes, mdd)
/**
* Call mtbdd_gc_mark_rec for every mtbdd you want to keep in your custom mark functions.
*/
VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD)
#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd)
/* Sanity check - returns depth of MDD including 'true' terminal or 0 for empty set */
#ifndef NDEBUG
@ -233,54 +262,49 @@ void lddmc_serialize_totext(FILE *out);
void lddmc_serialize_tofile(FILE *out);
void lddmc_serialize_fromfile(FILE *in);
/* Infrastructure for internal markings */
typedef struct lddmc_refs_internal
{
size_t r_size, r_count;
size_t s_size, s_count;
MDD *results;
Task **spawns;
} *lddmc_refs_internal_t;
extern DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
static inline MDD
lddmc_refs_push(MDD ldd)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
if (lddmc_refs_key->r_count >= lddmc_refs_key->r_size) {
lddmc_refs_key->r_size *= 2;
lddmc_refs_key->results = (MDD*)realloc(lddmc_refs_key->results, sizeof(MDD) * lddmc_refs_key->r_size);
}
lddmc_refs_key->results[lddmc_refs_key->r_count++] = ldd;
return ldd;
}
/**
* Infrastructure for internal references.
* Every thread has its own reference stacks. There are three stacks: pointer, values, tasks stack.
* The pointers stack stores pointers to LDD variables, manipulated with pushptr and popptr.
* The values stack stores LDD, manipulated with push and pop.
* The tasks stack stores Lace tasks (that return LDD), manipulated with spawn and sync.
*
* It is recommended to use the pointers stack for local variables and the tasks stack for tasks.
*/
static inline void
lddmc_refs_pop(int amount)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->r_count-=amount;
}
/**
* Push a LDD variable to the pointer reference stack.
* During garbage collection the variable will be inspected and the contents will be marked.
*/
void lddmc_refs_pushptr(const MDD *ptr);
static inline void
lddmc_refs_spawn(Task *t)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
if (lddmc_refs_key->s_count >= lddmc_refs_key->s_size) {
lddmc_refs_key->s_size *= 2;
lddmc_refs_key->spawns = (Task**)realloc(lddmc_refs_key->spawns, sizeof(Task*) * lddmc_refs_key->s_size);
}
lddmc_refs_key->spawns[lddmc_refs_key->s_count++] = t;
}
/**
* Pop the last <amount> LDD variables from the pointer reference stack.
*/
void lddmc_refs_popptr(size_t amount);
static inline MDD
lddmc_refs_sync(MDD result)
{
LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
lddmc_refs_key->s_count--;
return result;
}
/**
* Push an LDD to the values reference stack.
* During garbage collection the references LDD will be marked.
*/
MDD lddmc_refs_push(MDD dd);
/**
* Pop the last <amount> LDD from the values reference stack.
*/
void lddmc_refs_pop(long amount);
/**
* Push a Task that returns an LDD to the tasks reference stack.
* Usage: lddmc_refs_spawn(SPAWN(function, ...));
*/
void lddmc_refs_spawn(Task *t);
/**
* Pop a Task from the task reference stack.
* Usage: MDD result = lddmc_refs_sync(SYNC(function));
*/
MDD lddmc_refs_sync(MDD dd);
#ifdef __cplusplus
}

27
resources/3rdparty/sylvan/src/sylvan_ldd_int.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,24 +15,7 @@
* limitations under the License.
*/
/*#include <sylvan_config.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_int.h>
#include <avl.h>
#include <sylvan_refs.h>
#include <sha2.h>
*/
/* Do not include this file directly. Instead, include sylvan_int.h */
/**
* Internals for LDDs
@ -51,7 +34,11 @@ typedef struct __attribute__((packed)) mddnode {
uint64_t a, b;
} * mddnode_t; // 16 bytes
#define LDD_GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd))
static inline mddnode_t
LDD_GETNODE(MDD mdd)
{
return ((mddnode_t)llmsset_index_to_ptr(nodes, mdd));
}
static inline uint32_t __attribute__((unused))
mddnode_getvalue(mddnode_t n)

7
resources/3rdparty/sylvan/src/sylvan_mt.c

@ -15,16 +15,11 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h> // for llmsset*, nodes, sylvan_register_quit
#include <assert.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan_mt.h>
#include <sylvan_int.h> // for llmsset*, nodes, sylvan_register_quit
/**
* Handling of custom leaves "registry"
*/

6
resources/3rdparty/sylvan/src/sylvan_mt.h

@ -19,13 +19,11 @@
* This file contains declarations for custom Multi-Terminal support.
*/
/* Do not include this file directly. Instead, include sylvan.h */
#ifndef SYLVAN_MT_H
#define SYLVAN_MT_H
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */

394
resources/3rdparty/sylvan/src/sylvan_mtbdd.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,26 +15,16 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_int.h>
#include <sylvan_refs.h>
#include <sylvan_sl.h>
#include <sha2.h>
#define BDD MTBDD
/* Primitives */
int
mtbdd_isleaf(MTBDD bdd)
@ -194,33 +184,77 @@ VOID_TASK_0(mtbdd_gc_mark_protected)
}
/* Infrastructure for internal markings */
typedef struct mtbdd_refs_task
{
Task *t;
void *f;
} *mtbdd_refs_task_t;
typedef struct mtbdd_refs_internal
{
const MTBDD **pbegin, **pend, **pcur;
MTBDD *rbegin, *rend, *rcur;
mtbdd_refs_task_t sbegin, send, scur;
} *mtbdd_refs_internal_t;
DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
VOID_TASK_0(mtbdd_refs_mark_task)
VOID_TASK_2(mtbdd_refs_mark_p_par, const MTBDD**, begin, size_t, count)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
size_t i, j=0;
for (i=0; i<mtbdd_refs_key->r_count; i++) {
if (j >= 40) {
while (j--) SYNC(mtbdd_gc_mark_rec);
j=0;
if (count < 32) {
while (count) {
mtbdd_gc_mark_rec(**(begin++));
count--;
}
SPAWN(mtbdd_gc_mark_rec, mtbdd_refs_key->results[i]);
j++;
}
for (i=0; i<mtbdd_refs_key->s_count; i++) {
Task *t = mtbdd_refs_key->spawns[i];
if (!TASK_IS_STOLEN(t)) break;
if (TASK_IS_COMPLETED(t)) {
if (j >= 40) {
while (j--) SYNC(mtbdd_gc_mark_rec);
j=0;
} else {
SPAWN(mtbdd_refs_mark_p_par, begin, count / 2);
CALL(mtbdd_refs_mark_p_par, begin + (count / 2), count - count / 2);
SYNC(mtbdd_refs_mark_p_par);
}
}
VOID_TASK_2(mtbdd_refs_mark_r_par, MTBDD*, begin, size_t, count)
{
if (count < 32) {
while (count) {
mtbdd_gc_mark_rec(*begin++);
count--;
}
} else {
SPAWN(mtbdd_refs_mark_r_par, begin, count / 2);
CALL(mtbdd_refs_mark_r_par, begin + (count / 2), count - count / 2);
SYNC(mtbdd_refs_mark_r_par);
}
}
VOID_TASK_2(mtbdd_refs_mark_s_par, mtbdd_refs_task_t, begin, size_t, count)
{
if (count < 32) {
while (count > 0) {
Task *t = begin->t;
if (!TASK_IS_STOLEN(t)) return;
if (t->f == begin->f && TASK_IS_COMPLETED(t)) {
mtbdd_gc_mark_rec(*(MTBDD*)TASK_RESULT(t));
}
SPAWN(mtbdd_gc_mark_rec, *(BDD*)TASK_RESULT(t));
j++;
begin += 1;
count -= 1;
}
} else {
if (!TASK_IS_STOLEN(begin->t)) return;
SPAWN(mtbdd_refs_mark_s_par, begin, count / 2);
CALL(mtbdd_refs_mark_s_par, begin + (count / 2), count - count / 2);
SYNC(mtbdd_refs_mark_s_par);
}
while (j--) SYNC(mtbdd_gc_mark_rec);
}
VOID_TASK_0(mtbdd_refs_mark_task)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
SPAWN(mtbdd_refs_mark_p_par, mtbdd_refs_key->pbegin, mtbdd_refs_key->pcur-mtbdd_refs_key->pbegin);
SPAWN(mtbdd_refs_mark_r_par, mtbdd_refs_key->rbegin, mtbdd_refs_key->rcur-mtbdd_refs_key->rbegin);
CALL(mtbdd_refs_mark_s_par, mtbdd_refs_key->sbegin, mtbdd_refs_key->scur-mtbdd_refs_key->sbegin);
SYNC(mtbdd_refs_mark_r_par);
SYNC(mtbdd_refs_mark_p_par);
}
VOID_TASK_0(mtbdd_refs_mark)
@ -231,12 +265,12 @@ VOID_TASK_0(mtbdd_refs_mark)
VOID_TASK_0(mtbdd_refs_init_task)
{
mtbdd_refs_internal_t s = (mtbdd_refs_internal_t)malloc(sizeof(struct mtbdd_refs_internal));
s->r_size = 128;
s->r_count = 0;
s->s_size = 128;
s->s_count = 0;
s->results = (BDD*)malloc(sizeof(BDD) * 128);
s->spawns = (Task**)malloc(sizeof(Task*) * 128);
s->pcur = s->pbegin = (const MTBDD**)malloc(sizeof(MTBDD*) * 1024);
s->pend = s->pbegin + 1024;
s->rcur = s->rbegin = (MTBDD*)malloc(sizeof(MTBDD) * 1024);
s->rend = s->rbegin + 1024;
s->scur = s->sbegin = (mtbdd_refs_task_t)malloc(sizeof(struct mtbdd_refs_task) * 1024);
s->send = s->sbegin + 1024;
SET_THREAD_LOCAL(mtbdd_refs_key, s);
}
@ -247,6 +281,84 @@ VOID_TASK_0(mtbdd_refs_init)
sylvan_gc_add_mark(TASK(mtbdd_refs_mark));
}
void
mtbdd_refs_ptrs_up(mtbdd_refs_internal_t mtbdd_refs_key)
{
size_t cur = mtbdd_refs_key->pcur - mtbdd_refs_key->pbegin;
size_t size = mtbdd_refs_key->pend - mtbdd_refs_key->pbegin;
mtbdd_refs_key->pbegin = (const MTBDD**)realloc(mtbdd_refs_key->pbegin, sizeof(MTBDD*) * size * 2);
mtbdd_refs_key->pcur = mtbdd_refs_key->pbegin + cur;
mtbdd_refs_key->pend = mtbdd_refs_key->pbegin + (size * 2);
}
MTBDD __attribute__((noinline))
mtbdd_refs_refs_up(mtbdd_refs_internal_t mtbdd_refs_key, MTBDD res)
{
long size = mtbdd_refs_key->rend - mtbdd_refs_key->rbegin;
mtbdd_refs_key->rbegin = (MTBDD*)realloc(mtbdd_refs_key->rbegin, sizeof(MTBDD) * size * 2);
mtbdd_refs_key->rcur = mtbdd_refs_key->rbegin + size;
mtbdd_refs_key->rend = mtbdd_refs_key->rbegin + (size * 2);
return res;
}
void __attribute__((noinline))
mtbdd_refs_tasks_up(mtbdd_refs_internal_t mtbdd_refs_key)
{
long size = mtbdd_refs_key->send - mtbdd_refs_key->sbegin;
mtbdd_refs_key->sbegin = (mtbdd_refs_task_t)realloc(mtbdd_refs_key->sbegin, sizeof(struct mtbdd_refs_task) * size * 2);
mtbdd_refs_key->scur = mtbdd_refs_key->sbegin + size;
mtbdd_refs_key->send = mtbdd_refs_key->sbegin + (size * 2);
}
void __attribute__((unused))
mtbdd_refs_pushptr(const MTBDD *ptr)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
*mtbdd_refs_key->pcur++ = ptr;
if (mtbdd_refs_key->pcur == mtbdd_refs_key->pend) mtbdd_refs_ptrs_up(mtbdd_refs_key);
}
void __attribute__((unused))
mtbdd_refs_popptr(size_t amount)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->pcur -= amount;
}
MTBDD __attribute__((unused))
mtbdd_refs_push(MTBDD mtbdd)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
*(mtbdd_refs_key->rcur++) = mtbdd;
if (mtbdd_refs_key->rcur == mtbdd_refs_key->rend) return mtbdd_refs_refs_up(mtbdd_refs_key, mtbdd);
else return mtbdd;
}
void __attribute__((unused))
mtbdd_refs_pop(long amount)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->rcur -= amount;
}
void
mtbdd_refs_spawn(Task *t)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->scur->t = t;
mtbdd_refs_key->scur->f = t->f;
mtbdd_refs_key->scur += 1;
if (mtbdd_refs_key->scur == mtbdd_refs_key->send) mtbdd_refs_tasks_up(mtbdd_refs_key);
}
MTBDD
mtbdd_refs_sync(MTBDD result)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->scur -= 1;
return result;
}
/**
* Initialize and quit functions
*/
@ -394,6 +506,12 @@ mtbdd_makemapnode(uint32_t var, MTBDD low, MTBDD high)
return index;
}
MTBDD
mtbdd_ithvar(uint32_t var)
{
return mtbdd_makenode(var, mtbdd_false, mtbdd_true);
}
/* Operations */
/**
@ -449,31 +567,6 @@ mtbdd_fraction(int64_t nom, uint64_t denom)
return mtbdd_makeleaf(2, (nom<<32)|denom);
}
/**
* Create the cube of variables in arr.
*/
MTBDD
mtbdd_fromarray(uint32_t* arr, size_t length)
{
if (length == 0) return mtbdd_true;
else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true);
else return mtbdd_makenode(*arr, mtbdd_false, mtbdd_fromarray(arr+1, length-1));
}
/**
* Given a cube of variables, write each variable to arr.
* WARNING: arr must be sufficiently long!
*/
void
mtbdd_toarray(MTBDD set, uint32_t *arr)
{
while (set != mtbdd_true) {
mtbddnode_t n = MTBDD_GETNODE(set);
*arr++ = mtbddnode_getvariable(n);
set = node_gethigh(set, n);
}
}
/**
* Create a MTBDD cube representing the conjunction of variables in their positive or negative
* form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any).
@ -538,10 +631,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
if (va < v) {
MTBDD low = node_getlow(mtbdd, na);
MTBDD high = node_gethigh(mtbdd, na);
SPAWN(mtbdd_union_cube, high, vars, cube, terminal);
mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, high, vars, cube, terminal));
BDD new_low = mtbdd_union_cube(low, vars, cube, terminal);
mtbdd_refs_push(new_low);
BDD new_high = SYNC(mtbdd_union_cube);
BDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
mtbdd_refs_pop(1);
if (new_low != low || new_high != high) return mtbdd_makenode(va, new_low, new_high);
else return mtbdd;
@ -563,10 +656,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
}
case 2:
{
SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal);
mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal));
MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal);
mtbdd_refs_push(new_low);
MTBDD new_high = SYNC(mtbdd_union_cube);
MTBDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
mtbdd_refs_pop(1);
if (new_low != low || new_high != high) return mtbdd_makenode(v, new_low, new_high);
return mtbdd;
@ -592,10 +685,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
}
case 2:
{
SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal);
mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal));
MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal);
mtbdd_refs_push(new_low);
MTBDD new_high = SYNC(mtbdd_union_cube);
MTBDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
mtbdd_refs_pop(1);
return mtbdd_makenode(v, new_low, new_high);
}
@ -1272,6 +1365,36 @@ TASK_IMPL_2(MTBDD, mtbdd_op_max, MTBDD*, pa, MTBDD*, pb)
return mtbdd_invalid;
}
TASK_IMPL_2(MTBDD, mtbdd_op_cmpl, MTBDD, a, size_t, k)
{
// if a is false, then it is a partial function. Keep partial!
if (a == mtbdd_false) return mtbdd_false;
// a != constant
mtbddnode_t na = MTBDD_GETNODE(a);
if (mtbddnode_isleaf(na)) {
if (mtbddnode_gettype(na) == 0) {
int64_t v = mtbdd_getint64(a);
if (v == 0) return mtbdd_int64(1);
else return mtbdd_int64(0);
} else if (mtbddnode_gettype(na) == 1) {
double d = mtbdd_getdouble(a);
if (d == 0.0) return mtbdd_double(1.0);
else return mtbdd_double(0.0);
} else if (mtbddnode_gettype(na) == 2) {
uint64_t v = mtbddnode_getvalue(na);
if (v == 1) return mtbdd_fraction(1, 1);
else return mtbdd_fraction(0, 1);
} else {
assert(0); // failure
}
}
return mtbdd_invalid;
(void)k; // unused variable
}
TASK_IMPL_2(MTBDD, mtbdd_op_negate, MTBDD, a, size_t, k)
{
// if a is false, then it is a partial function. Keep partial!
@ -2347,7 +2470,17 @@ TASK_IMPL_2(double, mtbdd_satcount, MTBDD, dd, size_t, nvars)
{
/* Trivial cases */
if (dd == mtbdd_false) return 0.0;
if (mtbdd_isleaf(dd)) return powl(2.0L, nvars);
if (mtbdd_isleaf(dd)) {
// test if 0
mtbddnode_t dd_node = MTBDD_GETNODE(dd);
if (dd != mtbdd_true) {
if (mtbddnode_gettype(dd_node) == 0 && mtbdd_getint64(dd) == 0) return 0.0;
else if (mtbddnode_gettype(dd_node) == 1 && mtbdd_getdouble(dd) == 0.0) return 0.0;
else if (mtbddnode_gettype(dd_node) == 2 && mtbdd_getvalue(dd) == 1) return 0.0;
}
return powl(2.0L, nvars);
}
/* Perhaps execute garbage collection */
sylvan_gc_test();
@ -2726,8 +2859,6 @@ mtbdd_leafcount_more(const MTBDD *mtbdds, size_t count)
static size_t
mtbdd_nodecount_mark(MTBDD mtbdd)
{
if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf
if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf
mtbddnode_t n = MTBDD_GETNODE(mtbdd);
if (mtbddnode_getmark(n)) return 0;
mtbddnode_setmark(n, 1);
@ -3258,11 +3389,107 @@ TASK_IMPL_3(int, mtbdd_reader_frombinary, FILE*, in, MTBDD*, dds, int, count)
}
/**
* Implementation of convenience functions for handling variable sets, i.e., cubes.
* Implementation of variable sets, i.e., cubes of (positive) variables.
*/
/**
* Create a set of variables, represented as the conjunction of (positive) variables.
*/
MTBDD
mtbdd_set_from_array(uint32_t* arr, size_t length)
{
if (length == 0) return mtbdd_true;
else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true);
else return mtbdd_set_add(mtbdd_fromarray(arr+1, length-1), *arr);
}
/**
* Write all variables in a variable set to the given array.
* The array must be sufficiently large.
*/
void
mtbdd_set_to_array(MTBDD set, uint32_t *arr)
{
while (set != mtbdd_true) {
mtbddnode_t n = MTBDD_GETNODE(set);
*arr++ = mtbddnode_getvariable(n);
set = node_gethigh(set, n);
}
}
/**
* Add the variable <var> to <set>.
*/
MTBDD
mtbdd_set_add(MTBDD set, uint32_t var)
{
if (set == mtbdd_true) return mtbdd_makenode(var, mtbdd_false, mtbdd_true);
mtbddnode_t set_node = MTBDD_GETNODE(set);
uint32_t set_var = mtbddnode_getvariable(set_node);
if (var < set_var) return mtbdd_makenode(var, mtbdd_false, set);
else if (set_var == var) return set;
else {
MTBDD sub = mtbddnode_followhigh(set, set_node);
MTBDD res = mtbdd_set_add(sub, var);
res = sub == res ? set : mtbdd_makenode(set_var, mtbdd_false, res);
return res;
}
}
/**
* Remove the variable <var> from <set>.
*/
MTBDD
mtbdd_set_remove(MTBDD set, uint32_t var)
{
if (set == mtbdd_true) return mtbdd_true;
mtbddnode_t set_node = MTBDD_GETNODE(set);
uint32_t set_var = mtbddnode_getvariable(set_node);
if (var < set_var) return set;
else if (set_var == var) return mtbddnode_followhigh(set, set_node);
else {
MTBDD sub = mtbddnode_followhigh(set, set_node);
MTBDD res = mtbdd_set_remove(sub, var);
res = sub == res ? set : mtbdd_makenode(set_var, mtbdd_false, res);
return res;
}
}
/**
* Remove variables in <set2> from <set1>.
*/
TASK_IMPL_2(MTBDD, mtbdd_set_minus, MTBDD, set1, MTBDD, set2)
{
if (set1 == mtbdd_true) return mtbdd_true;
if (set2 == mtbdd_true) return set1;
if (set1 == set2) return mtbdd_true;
mtbddnode_t set1_node = MTBDD_GETNODE(set1);
mtbddnode_t set2_node = MTBDD_GETNODE(set2);
uint32_t set1_var = mtbddnode_getvariable(set1_node);
uint32_t set2_var = mtbddnode_getvariable(set2_node);
if (set1_var == set2_var) {
return mtbdd_set_minus(mtbddnode_followhigh(set1, set1_node), mtbddnode_followhigh(set2, set2_node));
}
if (set1_var > set2_var) {
return mtbdd_set_minus(set1, mtbddnode_followhigh(set2, set2_node));
}
/* set1_var < set2_var */
MTBDD sub = mtbddnode_followhigh(set1, set1_node);
MTBDD res = mtbdd_set_minus(sub, set2);
return res == sub ? set1 : mtbdd_makenode(set1_var, mtbdd_false, res);
}
/**
* Return 1 if <set> contains <var>, 0 otherwise.
*/
int
mtbdd_set_in(MTBDD set, uint32_t var)
mtbdd_set_contains(MTBDD set, uint32_t var)
{
while (set != mtbdd_true) {
mtbddnode_t n = MTBDD_GETNODE(set);
@ -3274,6 +3501,9 @@ mtbdd_set_in(MTBDD set, uint32_t var)
return 0;
}
/**
* Compute the number of variables in a given set of variables.
*/
size_t
mtbdd_set_count(MTBDD set)
{
@ -3285,6 +3515,10 @@ mtbdd_set_count(MTBDD set)
return result;
}
/**
* Sanity check if the given MTBDD is a conjunction of positive variables,
* and if all nodes are marked in the nodes table (detects violations after garbage collection).
*/
void
mtbdd_test_isset(MTBDD set)
{
@ -3336,7 +3570,9 @@ mtbdd_map_count(MTBDDMAP map)
MTBDDMAP
mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value)
{
if (mtbdd_map_isempty(map)) return mtbdd_makemapnode(key, mtbdd_map_empty(), value);
if (mtbdd_map_isempty(map)) {
return mtbdd_makemapnode(key, mtbdd_map_empty(), value);
}
mtbddnode_t n = MTBDD_GETNODE(map);
uint32_t k = mtbddnode_getvariable(n);
@ -3357,7 +3593,7 @@ mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value)
* Add all values from map2 to map1, overwrites if key already in map1.
*/
MTBDDMAP
mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2)
mtbdd_map_update(MTBDDMAP map1, MTBDDMAP map2)
{
if (mtbdd_map_isempty(map1)) return map2;
if (mtbdd_map_isempty(map2)) return map1;
@ -3369,13 +3605,13 @@ mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2)
MTBDDMAP result;
if (k1 < k2) {
MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), map2);
MTBDDMAP low = mtbdd_map_update(node_getlow(map1, n1), map2);
result = mtbdd_makemapnode(k1, low, node_gethigh(map1, n1));
} else if (k1 > k2) {
MTBDDMAP low = mtbdd_map_addall(map1, node_getlow(map2, n2));
MTBDDMAP low = mtbdd_map_update(map1, node_getlow(map2, n2));
result = mtbdd_makemapnode(k2, low, node_gethigh(map2, n2));
} else {
MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), node_getlow(map2, n2));
MTBDDMAP low = mtbdd_map_update(node_getlow(map1, n1), node_getlow(map2, n2));
result = mtbdd_makemapnode(k2, low, node_gethigh(map2, n2));
}

425
resources/3rdparty/sylvan/src/sylvan_mtbdd.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -39,8 +39,6 @@
#ifndef SYLVAN_MTBDD_H
#define SYLVAN_MTBDD_H
#include <sylvan_mt.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
@ -48,29 +46,38 @@ extern "C" {
/**
* An MTBDD is a 64-bit value. The low 40 bits are an index into the unique table.
* The highest 1 bit is the complement edge, indicating negation.
* For Boolean MTBDDs, this means "not X", for Integer and Real MTBDDs, this means "-X".
*
* Currently, negation using complement edges is only implemented for Boolean MTBDDs.
* For Integer/Real MTBDDs, negation is not well-defined, as "-0" = "0".
*
* A MTBDD node has 24 bits for the variable.
* A set of MTBDD variables is represented by the MTBDD of the conjunction of these variables.
* A MTBDDMAP uses special "MAP" nodes in the MTBDD nodes table.
*/
typedef uint64_t MTBDD;
typedef uint64_t BDD;
typedef MTBDD MTBDDMAP;
/**
* mtbdd_true is only used in Boolean MTBDDs. mtbdd_false has multiple roles (see above).
* mtbdd_true and mtbdd_false are the Boolean leaves representing True and False.
* False is also used in Integer/Real/Fraction MTBDDs for partially defined functions.
*/
#define mtbdd_complement ((MTBDD)0x8000000000000000LL)
#define mtbdd_false ((MTBDD)0)
#define mtbdd_true (mtbdd_false|mtbdd_complement)
#define mtbdd_invalid ((MTBDD)0xffffffffffffffffLL)
static const MTBDD mtbdd_complement = 0x8000000000000000LL;
static const MTBDD mtbdd_false = 0;
static const MTBDD mtbdd_true = 0x8000000000000000LL;
static const MTBDD mtbdd_invalid = 0xffffffffffffffffLL;
/* Compatibility */
// #define BDD MTBDD
#define BDDMAP MTBDDMAP
#define BDDSET MTBDD
#define BDDVAR uint32_t
#define sylvan_complement mtbdd_complement
#define sylvan_false mtbdd_false
#define sylvan_true mtbdd_true
#define sylvan_invalid mtbdd_invalid
/**
* Definitions for backward compatibility...
* We now consider BDDs to be a special case of MTBDDs.
*/
typedef MTBDD BDD;
typedef MTBDDMAP BDDMAP;
typedef MTBDD BDDSET;
typedef uint32_t BDDVAR;
static const MTBDD sylvan_complement = 0x8000000000000000LL;
static const MTBDD sylvan_false = 0;
static const MTBDD sylvan_true = 0x8000000000000000LL;
static const MTBDD sylvan_invalid = 0xffffffffffffffffLL;
#define sylvan_init_bdd sylvan_init_mtbdd
#define sylvan_ref mtbdd_ref
#define sylvan_deref mtbdd_deref
@ -79,7 +86,9 @@ typedef MTBDD MTBDDMAP;
#define sylvan_unprotect mtbdd_unprotect
#define sylvan_count_protected mtbdd_count_protected
#define sylvan_gc_mark_rec mtbdd_gc_mark_rec
#define sylvan_notify_ondead mtbdd_notify_ondead
#define sylvan_ithvar mtbdd_ithvar
#define bdd_refs_pushptr mtbdd_refs_pushptr
#define bdd_refs_popptr mtbdd_refs_popptr
#define bdd_refs_push mtbdd_refs_push
#define bdd_refs_pop mtbdd_refs_pop
#define bdd_refs_spawn mtbdd_refs_spawn
@ -147,59 +156,199 @@ static inline MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high)
}
/**
* Returns 1 is the MTBDD is a terminal, or 0 otherwise.
* Return 1 if the MTBDD is a terminal, or 0 otherwise.
*/
int mtbdd_isleaf(MTBDD mtbdd);
#define mtbdd_isnode(mtbdd) (mtbdd_isleaf(mtbdd) ? 0 : 1)
/**
* For MTBDD terminals, returns <type> and <value>
* Return 1 if the MTBDD is an internal node, or 0 otherwise.
*/
static inline int mtbdd_isnode(MTBDD mtbdd) { return mtbdd_isleaf(mtbdd) ? 0 : 1; }
/**
* Return the <type> field of the given leaf.
*/
uint32_t mtbdd_gettype(MTBDD leaf);
/**
* Return the <value> field of the given leaf.
*/
uint32_t mtbdd_gettype(MTBDD terminal);
uint64_t mtbdd_getvalue(MTBDD terminal);
uint64_t mtbdd_getvalue(MTBDD leaf);
/**
* For internal MTBDD nodes, returns <var>, <low> and <high>
* Return the variable field of the given internal node.
*/
uint32_t mtbdd_getvar(MTBDD node);
/**
* Follow the low/false edge of the given internal node.
* Also takes complement edges into account.
*/
MTBDD mtbdd_getlow(MTBDD node);
/**
* Follow the high/true edge of the given internal node.
* Also takes complement edges into account.
*/
MTBDD mtbdd_gethigh(MTBDD node);
/**
* Compute the complement of the MTBDD.
* For Boolean MTBDDs, this means "not X".
* Obtain the complement of the MTBDD.
* This is only valid for Boolean MTBDDs or custom implementations that support it.
*/
#define mtbdd_hascomp(dd) ((dd & mtbdd_complement) ? 1 : 0)
#define mtbdd_comp(dd) (dd ^ mtbdd_complement)
#define mtbdd_not(dd) (dd ^ mtbdd_complement)
static inline int
mtbdd_hascomp(MTBDD dd)
{
return (dd & mtbdd_complement) ? 1 : 0;
}
static inline MTBDD
mtbdd_comp(MTBDD dd)
{
return dd ^ mtbdd_complement;
}
static inline MTBDD
mtbdd_not(MTBDD dd)
{
return dd ^ mtbdd_complement;
}
/**
* Create terminals representing int64_t (type 0), double (type 1), or fraction (type 2) values
* Create an Integer leaf with the given value.
*/
MTBDD mtbdd_int64(int64_t value);
/**
* Create a Real leaf with the given value.
*/
MTBDD mtbdd_double(double value);
/**
* Create a Fraction leaf with the given numerator and denominator.
*/
MTBDD mtbdd_fraction(int64_t numer, uint64_t denom);
/**
* Get the value of a terminal (for Integer, Real and Fraction terminals, types 0, 1 and 2)
* Obtain the value of an Integer leaf.
*/
int64_t mtbdd_getint64(MTBDD terminal);
/**
* Obtain the value of a Real leaf.
*/
double mtbdd_getdouble(MTBDD terminal);
#define mtbdd_getnumer(terminal) ((int32_t)(mtbdd_getvalue(terminal)>>32))
#define mtbdd_getdenom(terminal) ((uint32_t)(mtbdd_getvalue(terminal)&0xffffffff))
/**
* Create the conjunction of variables in arr,
* i.e. arr[0] \and arr[1] \and ... \and arr[length-1]
* The variable in arr must be ordered.
* Obtain the numerator of a Fraction leaf.
*/
static inline int32_t
mtbdd_getnumer(MTBDD terminal)
{
return (int32_t)(mtbdd_getvalue(terminal)>>32);
}
/**
* Obtain the denominator of a Fraction leaf.
*/
static inline uint32_t
mtbdd_getdenom(MTBDD terminal)
{
return (uint32_t)(mtbdd_getvalue(terminal)&0xffffffff);
}
/**
* Create the Boolean MTBDD representing "if <var> then True else False"
*/
MTBDD mtbdd_ithvar(uint32_t var);
/**
* Functions to manipulate sets of MTBDD variables.
*
* A set of variables is represented by a cube/conjunction of (positive) variables.
*/
static inline MTBDD
mtbdd_set_empty()
{
return mtbdd_true;
}
static inline int
mtbdd_set_isempty(MTBDD set)
{
return (set == mtbdd_true) ? 1 : 0;
}
static inline uint32_t
mtbdd_set_first(MTBDD set)
{
return mtbdd_getvar(set);
}
static inline MTBDD
mtbdd_set_next(MTBDD set)
{
return mtbdd_gethigh(set);
}
/**
* Create a set of variables, represented as the conjunction of (positive) variables.
*/
MTBDD mtbdd_set_from_array(uint32_t* arr, size_t length);
/**
* Write all variables in a variable set to the given array.
* The array must be sufficiently large.
*/
void mtbdd_set_to_array(MTBDD set, uint32_t *arr);
/**
* Compute the number of variables in a given set of variables.
*/
size_t mtbdd_set_count(MTBDD set);
/**
* Compute the union of <set1> and <set2>
*/
#define mtbdd_set_union(set1, set2) sylvan_and(set1, set2)
/**
* Remove variables in <set2> from <set1>
*/
MTBDD mtbdd_fromarray(uint32_t* arr, size_t length);
#define mtbdd_set_minus(set1, set2) CALL(mtbdd_set_minus, set1, set2)
TASK_DECL_2(MTBDD, mtbdd_set_minus, MTBDD, MTBDD);
/**
* Given a cube of variables, write each variable to arr.
* WARNING: arr must be sufficiently long!
* Return 1 if <set> contains <var>, 0 otherwise.
*/
void mtbdd_toarray(MTBDD set, uint32_t *arr);
int mtbdd_set_contains(MTBDD set, uint32_t var);
/**
* Add the variable <var> to <set>.
*/
MTBDD mtbdd_set_add(MTBDD set, uint32_t var);
/**
* Remove the variable <var> from <set>.
*/
MTBDD mtbdd_set_remove(MTBDD set, uint32_t var);
/**
* Sanity check if the given MTBDD is a conjunction of positive variables,
* and if all nodes are marked in the nodes table (detects violations after garbage collection).
*/
void mtbdd_test_isset(MTBDD set);
/**
* Definitions for backwards compatibility
*/
#define mtbdd_fromarray mtbdd_set_from_array
#define mtbdd_set_fromarray mtbdd_set_from_array
#define mtbdd_set_toarray mtbdd_set_to_array
#define mtbdd_set_addall mtbdd_set_union
#define mtbdd_set_removeall mtbdd_set_minus
#define mtbdd_set_in mtbdd_set_contains
/**
* Create a MTBDD cube representing the conjunction of variables in their positive or negative
@ -292,6 +441,12 @@ TASK_DECL_3(MTBDD, mtbdd_abstract, MTBDD, MTBDD, mtbdd_abstract_op);
*/
TASK_DECL_2(MTBDD, mtbdd_op_negate, MTBDD, size_t);
/**
* Unary opeation Complement.
* Supported domains: Integer, Real, Fraction
*/
TASK_DECL_2(MTBDD, mtbdd_op_cmpl, MTBDD, size_t);
/**
* Binary operation Plus (for MTBDDs of same type)
* Only for MTBDDs where either all leaves are Boolean, or Integer, or Double.
@ -336,9 +491,17 @@ TASK_DECL_3(MTBDD, mtbdd_abstract_op_max, MTBDD, MTBDD, int);
/**
* Compute -a
* (negation, where 0 stays 0, and x into -x)
*/
#define mtbdd_negate(a) mtbdd_uapply(a, TASK(mtbdd_op_negate), 0)
/**
* Compute ~a for partial MTBDDs.
* Does not negate Boolean True/False.
* (complement, where 0 is turned into 1, and non-0 into 0)
*/
#define mtbdd_cmpl(a) mtbdd_uapply(a, TASK(mtbdd_op_cmpl), 0)
/**
* Compute a + b
*/
@ -776,34 +939,39 @@ MTBDD mtbdd_reader_get(uint64_t* arr, uint64_t identifier);
*/
void mtbdd_reader_end(uint64_t *arr);
/**
* MTBDDSET
* Just some convenience functions for handling sets of variables represented as a
* cube (conjunction) of positive literals
*/
#define mtbdd_set_empty() mtbdd_true
#define mtbdd_set_isempty(set) (set == mtbdd_true)
#define mtbdd_set_add(set, var) sylvan_and(set, sylvan_ithvar(var))
#define mtbdd_set_addall(set, set2) sylvan_and(set, set2)
#define mtbdd_set_remove(set, var) sylvan_exists(set, var)
#define mtbdd_set_removeall(set, set2) sylvan_exists(set, set2)
#define mtbdd_set_first(set) sylvan_var(set)
#define mtbdd_set_next(set) sylvan_high(set)
#define mtbdd_set_fromarray(arr, count) mtbdd_fromarray(arr, count)
#define mtbdd_set_toarray(set, arr) mtbdd_toarray(set, arr)
int mtbdd_set_in(BDDSET set, BDDVAR var);
size_t mtbdd_set_count(BDDSET set);
void mtbdd_test_isset(BDDSET set);
/**
* MTBDDMAP, maps uint32_t variables to MTBDDs.
* A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD.
*/
#define mtbdd_map_empty() mtbdd_false
#define mtbdd_map_isempty(map) (map == mtbdd_false ? 1 : 0)
#define mtbdd_map_key(map) mtbdd_getvar(map)
#define mtbdd_map_value(map) mtbdd_gethigh(map)
#define mtbdd_map_next(map) mtbdd_getlow(map)
static inline MTBDD
mtbdd_map_empty()
{
return mtbdd_false;
}
static inline int
mtbdd_map_isempty(MTBDD map)
{
return (map == mtbdd_false) ? 1 : 0;
}
static inline uint32_t
mtbdd_map_key(MTBDD map)
{
return mtbdd_getvar(map);
}
static inline MTBDD
mtbdd_map_value(MTBDD map)
{
return mtbdd_gethigh(map);
}
static inline MTBDD
mtbdd_map_next(MTBDD map)
{
return mtbdd_getlow(map);
}
/**
* Return 1 if the map contains the key, 0 otherwise.
@ -823,7 +991,8 @@ MTBDDMAP mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value);
/**
* Add all values from map2 to map1, overwrites if key already in map1.
*/
MTBDDMAP mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2);
MTBDDMAP mtbdd_map_update(MTBDDMAP map1, MTBDDMAP map2);
#define mtbdd_map_addall mtbdd_map_update
/**
* Remove the key <key> from the map and return the result
@ -850,85 +1019,87 @@ VOID_TASK_DECL_1(mtbdd_gc_mark_rec, MTBDD);
#define mtbdd_gc_mark_rec(mtbdd) CALL(mtbdd_gc_mark_rec, mtbdd)
/**
* Default external referencing. During garbage collection, MTBDDs marked with mtbdd_ref will
* be kept in the forest.
* It is recommended to prefer mtbdd_protect and mtbdd_unprotect.
* Infrastructure for external references using a hash table.
* Two hash tables store external references: a pointers table and a values table.
* The pointers table stores pointers to MTBDD variables, manipulated with protect and unprotect.
* The values table stores MTBDDs, manipulated with ref and deref.
* We strongly recommend using the pointers table whenever possible.
*/
MTBDD mtbdd_ref(MTBDD a);
void mtbdd_deref(MTBDD a);
size_t mtbdd_count_refs(void);
/**
* Default external pointer referencing. During garbage collection, the pointers are followed and the MTBDD
* that they refer to are kept in the forest.
* Store the pointer <ptr> in the pointers table.
*/
void mtbdd_protect(MTBDD* ptr);
/**
* Delete the pointer <ptr> from the pointers table.
*/
void mtbdd_unprotect(MTBDD* ptr);
/**
* Compute the number of pointers in the pointers table.
*/
size_t mtbdd_count_protected(void);
/**
* If mtbdd_set_ondead is set to a callback, then this function marks MTBDDs (terminals).
* When they are dead after the mark phase in garbage collection, the callback is called for marked MTBDDs.
* The ondead callback can either perform cleanup or resurrect dead terminals.
* Store the MTBDD <dd> in the values table.
*/
#define mtbdd_notify_ondead(dd) llmsset_notify_ondead(nodes, dd&~mtbdd_complement)
MTBDD mtbdd_ref(MTBDD dd);
/**
* Infrastructure for internal references (per-thread, e.g. during MTBDD operations)
* Use mtbdd_refs_push and mtbdd_refs_pop to put MTBDDs on a thread-local reference stack.
* Use mtbdd_refs_spawn and mtbdd_refs_sync around SPAWN and SYNC operations when the result
* of the spawned Task is a MTBDD that must be kept during garbage collection.
* Delete the MTBDD <dd> from the values table.
*/
typedef struct mtbdd_refs_internal
{
size_t r_size, r_count;
size_t s_size, s_count;
MTBDD *results;
Task **spawns;
} *mtbdd_refs_internal_t;
void mtbdd_deref(MTBDD dd);
extern DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
/**
* Compute the number of values in the values table.
*/
size_t mtbdd_count_refs(void);
static inline MTBDD
mtbdd_refs_push(MTBDD mtbdd)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
if (mtbdd_refs_key->r_count >= mtbdd_refs_key->r_size) {
mtbdd_refs_key->r_size *= 2;
mtbdd_refs_key->results = (MTBDD*)realloc(mtbdd_refs_key->results, sizeof(MTBDD) * mtbdd_refs_key->r_size);
}
mtbdd_refs_key->results[mtbdd_refs_key->r_count++] = mtbdd;
return mtbdd;
}
/**
* Infrastructure for internal references.
* Every thread has its own reference stacks. There are three stacks: pointer, values, tasks stack.
* The pointers stack stores pointers to MTBDD variables, manipulated with pushptr and popptr.
* The values stack stores MTBDDs, manipulated with push and pop.
* The tasks stack stores Lace tasks (that return MTBDDs), manipulated with spawn and sync.
*
* It is recommended to use the pointers stack for local variables and the tasks stack for tasks.
*/
static inline void
mtbdd_refs_pop(int amount)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->r_count-=amount;
}
/**
* Push a MTBDD variable to the pointer reference stack.
* During garbage collection the variable will be inspected and the contents will be marked.
*/
void mtbdd_refs_pushptr(const MTBDD *ptr);
static inline void
mtbdd_refs_spawn(Task *t)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
if (mtbdd_refs_key->s_count >= mtbdd_refs_key->s_size) {
mtbdd_refs_key->s_size *= 2;
mtbdd_refs_key->spawns = (Task**)realloc(mtbdd_refs_key->spawns, sizeof(Task*) * mtbdd_refs_key->s_size);
}
mtbdd_refs_key->spawns[mtbdd_refs_key->s_count++] = t;
}
/**
* Pop the last <amount> MTBDD variables from the pointer reference stack.
*/
void mtbdd_refs_popptr(size_t amount);
static inline MTBDD
mtbdd_refs_sync(MTBDD result)
{
LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
mtbdd_refs_key->s_count--;
return result;
}
/**
* Push an MTBDD to the values reference stack.
* During garbage collection the references MTBDD will be marked.
*/
MTBDD mtbdd_refs_push(MTBDD mtbdd);
/**
* Pop the last <amount> MTBDDs from the values reference stack.
*/
void mtbdd_refs_pop(long amount);
/**
* Push a Task that returns an MTBDD to the tasks reference stack.
* Usage: mtbdd_refs_spawn(SPAWN(function, ...));
*/
void mtbdd_refs_spawn(Task *t);
/**
* Pop a Task from the task reference stack.
* Usage: MTBDD result = mtbdd_refs_sync(SYNC(function));
*/
MTBDD mtbdd_refs_sync(MTBDD mtbdd);
#include "sylvan_mtbdd_storm.h"
#ifdef __cplusplus
}
#endif /* __cplusplus */

49
resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,6 +15,8 @@
* limitations under the License.
*/
/* Do not include this file directly. Instead, include sylvan_int.h */
/**
* Internals for MTBDDs
*/
@ -29,17 +31,48 @@ typedef struct __attribute__((packed)) mtbddnode {
uint64_t a, b;
} * mtbddnode_t; // 16 bytes
#define MTBDD_GETNODE(mtbdd) ((mtbddnode_t)llmsset_index_to_ptr(nodes, mtbdd&0x000000ffffffffff))
static inline mtbddnode_t
MTBDD_GETNODE(MTBDD dd)
{
return (mtbddnode_t)llmsset_index_to_ptr(nodes, dd&0x000000ffffffffff);
}
/**
* Complement handling macros
*/
#define MTBDD_HASMARK(s) (s&mtbdd_complement?1:0)
#define MTBDD_TOGGLEMARK(s) (s^mtbdd_complement)
#define MTBDD_STRIPMARK(s) (s&~mtbdd_complement)
#define MTBDD_TRANSFERMARK(from, to) (to ^ (from & mtbdd_complement))
// Equal under mark
#define MTBDD_EQUALM(a, b) ((((a)^(b))&(~mtbdd_complement))==0)
static inline int
MTBDD_HASMARK(MTBDD dd)
{
return (dd & mtbdd_complement) ? 1 : 0;
}
static inline MTBDD
MTBDD_TOGGLEMARK(MTBDD dd)
{
return dd ^ mtbdd_complement;
}
static inline MTBDD
MTBDD_STRIPMARK(MTBDD dd)
{
return dd & (~mtbdd_complement);
}
static inline MTBDD
MTBDD_TRANSFERMARK(MTBDD from, MTBDD to)
{
return (to ^ (from & mtbdd_complement));
}
/**
* Are two MTBDDs equal modulo mark?
*/
static inline int
MTBDD_EQUALM(MTBDD a, MTBDD b)
{
return ((a^b)&(~mtbdd_complement)) ? 0 : 1;
}
// Leaf: a = L=1, M, type; b = value
// Node: a = L=0, C, M, high; b = variable, low

10
resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c

@ -1,6 +1,9 @@
#include <sylvan_mtbdd_int.h>
#include <stdint.h>
#include <math.h>
#include "sylvan_int.h"
#include "storm_wrapper.h"
#include "sylvan_mtbdd_storm.h"
// Import the types created for rational numbers and functions.
extern uint32_t srn_type;
@ -554,11 +557,6 @@ int mtbdd_isnonzero(MTBDD dd) {
return mtbdd_iszero(dd) ? 0 : 1;
}
MTBDD
mtbdd_ithvar(uint32_t level) {
return mtbdd_makenode(level, mtbdd_false, mtbdd_true);
}
TASK_IMPL_2(MTBDD, mtbdd_op_complement, MTBDD, a, size_t, k)
{
// if a is false, then it is a partial function. Keep partial!

8
resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h

@ -1,3 +1,7 @@
#ifdef __cplusplus
extern "C" {
#endif
/**
* Binary operation Divide (for MTBDDs of same type)
* Only for MTBDDs where all leaves are Integer or Double.
@ -148,3 +152,7 @@ TASK_DECL_3(BDD, mtbdd_max_abstract_representative, MTBDD, MTBDD, uint32_t);
TASK_DECL_3(MTBDD, mtbdd_uapply_nocache, MTBDD, mtbdd_uapply_op, size_t);
#define mtbdd_uapply_nocache(dd, op, param) CALL(mtbdd_uapply_nocache, dd, op, param)
#ifdef __cplusplus
}
#endif

1
resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp

@ -1,4 +1,5 @@
#include "storm_wrapper.h"
#include "sylvan_mtbdd_storm.h"
#include "sylvan_storm_rational_number.h"
#include "sylvan_storm_rational_function.h"

11
resources/3rdparty/sylvan/src/sylvan_refs.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,18 +15,13 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan.h>
#include <sylvan_refs.h>
#include <assert.h> // for assert
#include <errno.h> // for errno
#include <stdio.h> // for fprintf
#include <stdint.h> // for uint32_t etc
#include <stdlib.h> // for exit
#include <string.h> // for strerror
#include <sys/mman.h> // for mmap
#include <sylvan_refs.h>
#ifndef compiler_barrier
#define compiler_barrier() { asm volatile("" ::: "memory"); }
#endif

5
resources/3rdparty/sylvan/src/sylvan_refs.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,8 +15,7 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <stdint.h> // for uint32_t etc
/* Do not include this file directly. Instead, include sylvan.h */
#ifndef REFS_INLINE_H
#define REFS_INLINE_H

8
resources/3rdparty/sylvan/src/sylvan_sl.c

@ -1,5 +1,5 @@
/*
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,13 +14,11 @@
* limitations under the License.
*/
#include <assert.h>
#include <stdio.h>
#include <sys/mman.h> // for mmap, munmap, etc
#include <sylvan.h>
#include <sylvan_sl.h>
#include <sys/mman.h> // for mmap, munmap, etc
/* A SL_DEPTH of 6 means 32 bytes per bucket, of 14 means 64 bytes per bucket.
However, there is a very large performance drop with only 6 levels. */
#define SL_DEPTH 14

2
resources/3rdparty/sylvan/src/sylvan_sl.h

@ -1,5 +1,5 @@
/*
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

17
resources/3rdparty/sylvan/src/sylvan_stats.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,14 +15,13 @@
* limitations under the License.
*/
#include <sylvan_int.h>
#include <errno.h> // for errno
#include <string.h> // memset
#include <sylvan_stats.h>
#include <sys/mman.h>
#include <inttypes.h>
#include <sylvan_int.h>
#if SYLVAN_STATS
#ifdef __ELF__
@ -31,9 +30,6 @@ __thread sylvan_stats_t sylvan_stats;
pthread_key_t sylvan_stats_key;
#endif
#include <hwloc.h>
static hwloc_topology_t topo;
/**
* Instructions for sylvan_stats_report
*/
@ -127,11 +123,8 @@ VOID_TASK_0(sylvan_stats_reset_perthread)
fprintf(stderr, "sylvan_stats: Unable to allocate memory: %s!\n", strerror(errno));
exit(1);
}
// Ensure the stats object is on our pu
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, LACE_WORKER_PU);
hwloc_set_area_membind(topo, sylvan_stats, sizeof(sylvan_stats_t), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
pthread_setspecific(sylvan_stats_key, sylvan_stats);
}
pthread_setspecific(sylvan_stats_key, sylvan_stats);
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
sylvan_stats->counters[i] = 0;
}
@ -146,8 +139,6 @@ VOID_TASK_IMPL_0(sylvan_stats_init)
#ifndef __ELF__
pthread_key_create(&sylvan_stats_key, NULL);
#endif
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
TOGETHER(sylvan_stats_reset_perthread);
}

10
resources/3rdparty/sylvan/src/sylvan_stats.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,8 +15,7 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <lace.h>
/* Do not include this file directly. Instead, include sylvan.h */
#ifndef SYLVAN_STATS_H
#define SYLVAN_STATS_H
@ -93,6 +92,8 @@ typedef enum {
SYLVAN_COUNTER_COUNTER
} Sylvan_Counters;
#undef OPCOUNTER
typedef enum
{
SYLVAN_GC,
@ -134,10 +135,8 @@ void sylvan_stats_report(FILE* target);
#if SYLVAN_STATS
#ifdef __MACH__
#include <mach/mach_time.h>
#define getabstime() mach_absolute_time()
#else
#include <time.h>
static uint64_t
getabstime(void)
{
@ -153,7 +152,6 @@ getabstime(void)
#ifdef __ELF__
extern __thread sylvan_stats_t sylvan_stats;
#else
#include <pthread.h>
extern pthread_key_t sylvan_stats_key;
#endif

201
resources/3rdparty/sylvan/src/sylvan_table.c

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,23 +15,12 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h>
#include <errno.h> // for errno
#include <stdint.h> // for uint64_t etc
#include <stdio.h> // for printf
#include <stdlib.h>
#include <string.h> // memset
#include <sys/mman.h> // for mmap
#include <sylvan_table.h>
#include <sylvan_stats.h>
#include <sylvan_tls.h>
#include <hwloc.h>
static hwloc_topology_t topo;
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
@ -120,15 +109,178 @@ is_custom_bucket(const llmsset_t dbs, uint64_t index)
return (*ptr & mask) ? 1 : 0;
}
/**
* This tricks the compiler into generating the bit-wise rotation instruction
*/
static uint64_t __attribute__((unused))
rotr64 (uint64_t n, unsigned int c)
{
return (n >> c) | (n << (64-c));
}
/**
* Pseudo-RNG for initializing the hashtab tables.
* Implementation of xorshift128+ by Vigna 2016, which is
* based on "Xorshift RNGs", Marsaglia 2003
*/
static uint64_t __attribute__((unused))
xor64(void)
{
// For the initial state of s, we select two numbers:
// - the initializer of Marsaglia's original xorshift
// - the FNV-1a 64-bit offset basis
static uint64_t s[2] = {88172645463325252LLU, 14695981039346656037LLU};
uint64_t s1 = s[0];
const uint64_t s0 = s[1];
const uint64_t result = s0 + s1;
s[0] = s0;
s1 ^= s1 << 23; // a
s[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c
return result;
}
/**
* The table for tabulation hashing
*/
static uint64_t hashtab[256*16];
/**
* Implementation of simple tabulation.
* Proposed by e.g. Thorup 2017 "Fast and Powerful Hashing using Tabulation"
*/
uint64_t
llmsset_tabhash(uint64_t a, uint64_t b, uint64_t seed)
{
// we use the seed as base
uint64_t *t = hashtab;
for (int i=0; i<8; i++) {
seed ^= t[(uint8_t)a];
t += 256; // next table
a >>= 8;
}
for (int i=0; i<8; i++) {
seed ^= t[(uint8_t)b];
t += 256; // next table
b >>= 8;
}
return seed;
}
/**
* Encoding of the prime 2^89-1 for CWhash
*/
static const uint64_t Prime89_0 = (((uint64_t)1)<<32)-1;
static const uint64_t Prime89_1 = (((uint64_t)1)<<32)-1;
static const uint64_t Prime89_2 = (((uint64_t)1)<<25)-1;
static const uint64_t Prime89_21 = (((uint64_t)1)<<57)-1;
typedef uint64_t INT96[3];
/**
* Computes (r mod Prime89) mod 2ˆ64
* (for CWhash, implementation by Thorup et al.)
*/
static uint64_t
Mod64Prime89(INT96 r)
{
uint64_t r0, r1, r2;
r2 = r[2];
r1 = r[1];
r0 = r[0] + (r2>>25);
r2 &= Prime89_2;
return (r2 == Prime89_2 && r1 == Prime89_1 && r0 >= Prime89_0) ? (r0 - Prime89_0) : (r0 + (r1<<32));
}
/**
* Computes a 96-bit r such that r = ax+b (mod Prime89)
* (for CWhash, implementation by Thorup et al.)
*/
static void
MultAddPrime89(INT96 r, uint64_t x, const INT96 a, const INT96 b)
{
#define LOW(x) ((x)&0xFFFFFFFF)
#define HIGH(x) ((x)>>32)
uint64_t x1, x0, c21, c20, c11, c10, c01, c00;
uint64_t d0, d1, d2, d3;
uint64_t s0, s1, carry;
x1 = HIGH(x);
x0 = LOW(x);
c21 = a[2]*x1;
c11 = a[1]*x1;
c01 = a[0]*x1;
c20 = a[2]*x0;
c10 = a[1]*x0;
c00 = a[0]*x0;
d0 = (c20>>25)+(c11>>25)+(c10>>57)+(c01>>57);
d1 = (c21<<7);
d2 = (c10&Prime89_21) + (c01&Prime89_21);
d3 = (c20&Prime89_2) + (c11&Prime89_2) + (c21>>57);
s0 = b[0] + LOW(c00) + LOW(d0) + LOW(d1);
r[0] = LOW(s0);
carry = HIGH(s0);
s1 = b[1] + HIGH(c00) + HIGH(d0) + HIGH(d1) + LOW(d2) + carry;
r[1] = LOW(s1);
carry = HIGH(s1);
r[2] = b[2] + HIGH(d2) + d3 + carry;
#undef LOW
#undef HIGH
}
/**
* Compute Carter/Wegman k-independent hash
* Implementation by Thorup et al.
* - compute polynomial on prime field of 2^89-1 (10th Marsenne prime)
* - random coefficients from random.org
*/
static uint64_t
CWhash(uint64_t x)
{
INT96 A = {0xcf90094b0ab9939e, 0x817f998697604ff3, 0x1a6e6f08b65440ea};
INT96 B = {0xb989a05a5dcf57f1, 0x7c007611f28daee7, 0xd8bd809d68c26854};
INT96 C = {0x1041070633a92679, 0xba9379fd71cd939d, 0x271793709e1cd781};
INT96 D = {0x5c240a710b0c6beb, 0xc24ac3b68056ea1c, 0xd46c9c7f2adfaf71};
INT96 E = {0xa527cea74b053a87, 0x69ba4a5e23f90577, 0x707b6e053c7741e7};
INT96 F = {0xa6c0812cdbcdb982, 0x8cb0c8b73f701489, 0xee08c4dc1dbef243};
INT96 G = {0xcf3ab0ec9d538853, 0x982a8457b6db03a9, 0x8659cf6b636c9d37};
INT96 H = {0x905d5d14efefc0dd, 0x7e9870e018ead6a2, 0x47e2c9af0ea9325a};
INT96 I = {0xc59351a9bf283b09, 0x4a39e35dbc280c7f, 0xc5f160732996be4f};
INT96 J = {0x4d58e0b7a57ccddf, 0xc362a25c267d1db4, 0x7c79d2fcd89402b2};
INT96 K = {0x62ac342c4393930c, 0xdb2fd2740ebef2a0, 0xc672fd5e72921377};
INT96 L = {0xbdae267838862c6d, 0x0e0ee206fdbaf1d1, 0xc270e26fd8dfbae7};
INT96 r;
MultAddPrime89(r, x, A, B);
MultAddPrime89(r, x, r, C);
MultAddPrime89(r, x, r, D);
MultAddPrime89(r, x, r, E);
MultAddPrime89(r, x, r, F);
MultAddPrime89(r, x, r, G);
MultAddPrime89(r, x, r, H);
MultAddPrime89(r, x, r, I);
MultAddPrime89(r, x, r, J);
MultAddPrime89(r, x, r, K);
MultAddPrime89(r, x, r, L);
return Mod64Prime89(r);
}
/**
* The well-known FNV-1a hash for 64 bits.
* Typical seed value (base offset) is 14695981039346656037LLU.
*
* NOTE: this particular hash is bad for certain nodes, resulting in
* early garbage collection and failure. We xor with shifted hash which
* suffices as a band-aid, but this is obviously not an ideal solution.
*/
uint64_t
llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed)
llmsset_fnvhash(const uint64_t a, const uint64_t b, const uint64_t seed)
{
// The FNV-1a hash for 64 bits
const uint64_t prime = 1099511628211;
uint64_t hash = seed;
hash = (hash ^ a) * prime;
hash = (hash ^ b) * prime;
return hash;
return hash ^ (hash>>32);
}
/*
@ -247,6 +399,7 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
const int custom = is_custom_bucket(dbs, d_idx) ? 1 : 0;
if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
else hash_rehash = llmsset_hash(a, b, hash_rehash);
const uint64_t step = (((hash_rehash >> 20) | 1) << 3);
const uint64_t new_v = (hash_rehash & MASK_HASH) | d_idx;
int i=0;
@ -271,8 +424,7 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
}
// go to next cache line in probe sequence
if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
else hash_rehash = llmsset_hash(a, b, hash_rehash);
hash_rehash += step;
#if LLMSSET_MASK
last = idx = hash_rehash & dbs->mask;
@ -286,9 +438,6 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
llmsset_t
llmsset_create(size_t initial_size, size_t max_size)
{
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
llmsset_t dbs = NULL;
if (posix_memalign((void**)&dbs, LINE_SIZE, sizeof(struct llmsset)) != 0) {
fprintf(stderr, "llmsset_create: Unable to allocate memory!\n");
@ -347,12 +496,6 @@ llmsset_create(size_t initial_size, size_t max_size)
madvise(dbs->table, dbs->max_size * 8, MADV_RANDOM);
#endif
hwloc_set_area_membind(topo, dbs->table, dbs->max_size * 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
hwloc_set_area_membind(topo, dbs->data, dbs->max_size * 16, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
hwloc_set_area_membind(topo, dbs->bitmapc, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
// forbid first two positions (index 0 and 1)
dbs->bitmap2[0] = 0xc000000000000000LL;
@ -369,6 +512,9 @@ llmsset_create(size_t initial_size, size_t max_size)
INIT_THREAD_LOCAL(my_region);
TOGETHER(llmsset_reset_region);
// initialize hashtab
for (int i=0; i<256*16; i++) hashtab[i] = CWhash(i);
return dbs;
}
@ -392,13 +538,11 @@ VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs)
VOID_TASK_IMPL_1(llmsset_clear_data, llmsset_t, dbs)
{
if (mmap(dbs->bitmap1, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
} else {
memset(dbs->bitmap1, 0, dbs->max_size / (512*8));
}
if (mmap(dbs->bitmap2, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
} else {
memset(dbs->bitmap2, 0, dbs->max_size / 8);
}
@ -416,7 +560,6 @@ VOID_TASK_IMPL_1(llmsset_clear_hashes, llmsset_t, dbs)
#if defined(madvise) && defined(MADV_RANDOM)
madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM);
#endif
hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
} else {
// reallocate failed... expensive fallback
memset(dbs->table, 0, dbs->max_size * 8);

31
resources/3rdparty/sylvan/src/sylvan_table.h

@ -1,6 +1,6 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,24 +15,15 @@
* limitations under the License.
*/
#include <sylvan_config.h>
/* Do not include this file directly. Instead, include sylvan_int.h */
#include <stdint.h>
#include <unistd.h>
#include <lace.h>
#ifndef LLMSSET_H
#define LLMSSET_H
#ifndef SYLVAN_TABLE_H
#define SYLVAN_TABLE_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef LLMSSET_MASK
#define LLMSSET_MASK 0 // set to 1 to use bit mask instead of modulo
#endif
/**
* Lockless hash table (set) to store 16-byte keys.
* Each unique key is associated with a 42-bit number.
@ -210,9 +201,19 @@ VOID_TASK_DECL_1(llmsset_destroy_unmarked, llmsset_t);
void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb);
/**
* Default hashing function
* Default hashing functions.
*/
#define llmsset_hash llmsset_tabhash
/**
* FNV-1a hash
*/
uint64_t llmsset_fnvhash(uint64_t a, uint64_t b, uint64_t seed);
/**
* Twisted tabulation hash
*/
uint64_t llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed);
uint64_t llmsset_tabhash(uint64_t a, uint64_t b, uint64_t seed);
#ifdef __cplusplus
}

3
resources/3rdparty/sylvan/src/sylvan_tls.h

@ -5,7 +5,6 @@
* A platform independant wrapper around thread-local storage. On platforms that don't support
* __thread variables (e.g. Mac OS X), we have to use the pthreads library for thread-local storage
*/
#include <assert.h>
#ifndef TLS_H
#define TLS_H
@ -18,8 +17,6 @@
#else//!__ELF__
#include <pthread.h>
#define DECLARE_THREAD_LOCAL(name, type) pthread_key_t name##_KEY
#define INIT_THREAD_LOCAL(name) \

2
src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp

@ -213,7 +213,7 @@ namespace storm {
uint_fast64_t InternalBdd<DdType::Sylvan>::getNodeCount() const {
// We have to add one to also count the false-leaf, which is the only leaf appearing in BDDs.
return static_cast<uint_fast64_t>(this->sylvanBdd.NodeCount()) + 1;
return static_cast<uint_fast64_t>(this->sylvanBdd.NodeCount());
}
bool InternalBdd<DdType::Sylvan>::isOne() const {

1
src/storm/utility/sylvan.h

@ -13,6 +13,7 @@
#pragma GCC system_header // Only way to suppress some warnings atm.
#include "sylvan_obj.hpp"
#include "sylvan_mtbdd_storm.h"
#include "sylvan_storm_rational_number.h"
#include "sylvan_storm_rational_function.h"

Loading…
Cancel
Save