Browse Source

moved to new sylvan version and made everything work again

main
dehnert 8 years ago
parent
commit
0354c9024a
  1. 5
      resources/3rdparty/CMakeLists.txt
  2. 36
      resources/3rdparty/sylvan/.gitignore
  3. 82
      resources/3rdparty/sylvan/.travis.yml
  4. 53
      resources/3rdparty/sylvan/CHANGELOG.md
  5. 86
      resources/3rdparty/sylvan/CMakeLists.txt
  6. 0
      resources/3rdparty/sylvan/LICENSE
  7. 5
      resources/3rdparty/sylvan/Makefile.am
  8. 111
      resources/3rdparty/sylvan/README.md
  9. 68
      resources/3rdparty/sylvan/cmake/FindGMP.cmake
  10. 24
      resources/3rdparty/sylvan/cmake/FindHwloc.cmake
  11. 72
      resources/3rdparty/sylvan/cmake/FindSphinx.cmake
  12. 76
      resources/3rdparty/sylvan/cmake/UpdateGHPages.cmake
  13. 21
      resources/3rdparty/sylvan/configure.ac
  14. 58
      resources/3rdparty/sylvan/docs/conf.py.in
  15. 282
      resources/3rdparty/sylvan/docs/index.rst
  16. 20
      resources/3rdparty/sylvan/examples/CMakeLists.txt
  17. 0
      resources/3rdparty/sylvan/examples/getrss.c
  18. 0
      resources/3rdparty/sylvan/examples/getrss.h
  19. 777
      resources/3rdparty/sylvan/examples/ldd2bdd.c
  20. 83
      resources/3rdparty/sylvan/examples/lddmc.c
  21. 23
      resources/3rdparty/sylvan/examples/mc.c
  22. 328
      resources/3rdparty/sylvan/examples/nqueens.c
  23. 7
      resources/3rdparty/sylvan/examples/simple.cpp
  24. 127
      resources/3rdparty/sylvan/examples/storm.cpp
  25. 5
      resources/3rdparty/sylvan/m4/.gitignore
  26. 72
      resources/3rdparty/sylvan/m4/m4_ax_check_compile_flag.m4
  27. 0
      resources/3rdparty/sylvan/models/at.5.8-rgs.bdd
  28. 0
      resources/3rdparty/sylvan/models/at.6.8-rgs.bdd
  29. 0
      resources/3rdparty/sylvan/models/at.7.8-rgs.bdd
  30. 0
      resources/3rdparty/sylvan/models/blocks.2.ldd
  31. 0
      resources/3rdparty/sylvan/models/blocks.4.ldd
  32. 0
      resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd
  33. 0
      resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd
  34. 0
      resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd
  35. 0
      resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd
  36. 111
      resources/3rdparty/sylvan/src/CMakeLists.txt
  37. 39
      resources/3rdparty/sylvan/src/Makefile.am
  38. 5
      resources/3rdparty/sylvan/src/avl.h
  39. 352
      resources/3rdparty/sylvan/src/lace.c
  40. 139
      resources/3rdparty/sylvan/src/lace.h
  41. 15
      resources/3rdparty/sylvan/src/sha2.c
  42. 0
      resources/3rdparty/sylvan/src/sha2.h
  43. 245
      resources/3rdparty/sylvan/src/stats.c
  44. 11
      resources/3rdparty/sylvan/src/storm_function_wrapper.cpp
  45. 2
      resources/3rdparty/sylvan/src/storm_function_wrapper.h
  46. 162
      resources/3rdparty/sylvan/src/sylvan.h
  47. 1478
      resources/3rdparty/sylvan/src/sylvan_bdd.c
  48. 277
      resources/3rdparty/sylvan/src/sylvan_bdd.h
  49. 87
      resources/3rdparty/sylvan/src/sylvan_bdd_int.h
  50. 6
      resources/3rdparty/sylvan/src/sylvan_bdd_storm.c
  51. 101
      resources/3rdparty/sylvan/src/sylvan_cache.c
  52. 37
      resources/3rdparty/sylvan/src/sylvan_cache.h
  53. 415
      resources/3rdparty/sylvan/src/sylvan_common.c
  54. 242
      resources/3rdparty/sylvan/src/sylvan_common.h
  55. 0
      resources/3rdparty/sylvan/src/sylvan_config.h
  56. 211
      resources/3rdparty/sylvan/src/sylvan_gmp.c
  57. 16
      resources/3rdparty/sylvan/src/sylvan_gmp.h
  58. 106
      resources/3rdparty/sylvan/src/sylvan_int.h
  59. 491
      resources/3rdparty/sylvan/src/sylvan_ldd.c
  60. 9
      resources/3rdparty/sylvan/src/sylvan_ldd.h
  61. 125
      resources/3rdparty/sylvan/src/sylvan_ldd_int.h
  62. 266
      resources/3rdparty/sylvan/src/sylvan_mt.c
  63. 132
      resources/3rdparty/sylvan/src/sylvan_mt.h
  64. 1437
      resources/3rdparty/sylvan/src/sylvan_mtbdd.c
  65. 408
      resources/3rdparty/sylvan/src/sylvan_mtbdd.h
  66. 74
      resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h
  67. 134
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
  68. 3
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
  69. 68
      resources/3rdparty/sylvan/src/sylvan_obj.cpp
  70. 59
      resources/3rdparty/sylvan/src/sylvan_obj.hpp
  71. 6
      resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp
  72. 19
      resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp
  73. 1
      resources/3rdparty/sylvan/src/sylvan_obj_sylvan_storm.hpp
  74. 5
      resources/3rdparty/sylvan/src/sylvan_refs.c
  75. 3
      resources/3rdparty/sylvan/src/sylvan_refs.h
  76. 172
      resources/3rdparty/sylvan/src/sylvan_sl.c
  77. 70
      resources/3rdparty/sylvan/src/sylvan_sl.h
  78. 296
      resources/3rdparty/sylvan/src/sylvan_stats.c
  79. 179
      resources/3rdparty/sylvan/src/sylvan_stats.h
  80. 42
      resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c
  81. 87
      resources/3rdparty/sylvan/src/sylvan_table.c
  82. 23
      resources/3rdparty/sylvan/src/sylvan_table.h
  83. 0
      resources/3rdparty/sylvan/src/sylvan_tls.h
  84. 10
      resources/3rdparty/sylvan/sylvan.pc.cmake.in
  85. 5
      resources/3rdparty/sylvan/test/.gitignore
  86. 7
      resources/3rdparty/sylvan/test/CMakeLists.txt
  87. 350
      resources/3rdparty/sylvan/test/main.c
  88. 0
      resources/3rdparty/sylvan/test/test_assert.h
  89. 260
      resources/3rdparty/sylvan/test/test_basic.c
  90. 13
      resources/3rdparty/sylvan/test/test_cxx.cpp
  91. 2
      src/storm/modelchecker/results/ExplicitQualitativeCheckResult.cpp
  92. 2
      src/storm/modelchecker/results/ExplicitQualitativeCheckResult.h
  93. 2
      src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp
  94. 1
      src/storm/storage/bisimulation/BisimulationDecomposition.h
  95. 24
      src/storm/storage/dd/Bdd.cpp
  96. 21
      src/storm/storage/dd/Bdd.h
  97. 53
      src/storm/storage/dd/cudd/InternalCuddBdd.cpp
  98. 36
      src/storm/storage/dd/cudd/InternalCuddBdd.h
  99. 52
      src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp
  100. 35
      src/storm/storage/dd/sylvan/InternalSylvanBdd.h

5
resources/3rdparty/CMakeLists.txt

@ -370,7 +370,7 @@ ExternalProject_Add(
DOWNLOAD_COMMAND ""
PREFIX "sylvan"
SOURCE_DIR ${STORM_3RDPARTY_SOURCE_DIR}/sylvan
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DSYLVAN_BUILD_TEST=Off -DSYLVAN_BUILD_EXAMPLES=Off -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DUSE_CARL=ON -Dcarl_INCLUDE_DIR=${carl_INCLUDE_DIR} -DSYLVAN_PORTABLE=${STORM_PORTABLE} -Dcarl_LIBRARIES=${carl_LIBRARIES}
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DSYLVAN_BUILD_DOCS=OFF -DSYLVAN_BUILD_EXAMPLES=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DUSE_CARL=ON -Dcarl_INCLUDE_DIR=${carl_INCLUDE_DIR} -DSYLVAN_PORTABLE=${STORM_PORTABLE} -Dcarl_LIBRARIES=${carl_LIBRARIES} -DBUILD_SHARED_LIBS=OFF -DSYLVAN_BUILD_TESTS=OFF
BINARY_DIR ${STORM_3RDPARTY_BINARY_DIR}/sylvan
BUILD_IN_SOURCE 0
INSTALL_COMMAND ""
@ -378,6 +378,9 @@ ExternalProject_Add(
LOG_CONFIGURE ON
LOG_BUILD ON
BUILD_BYPRODUCTS ${STORM_3RDPARTY_BINARY_DIR}/sylvan/src/libsylvan${STATIC_EXT}
# TODO: REMOVE AFTER DEBUGGING
BUILD_ALWAYS 1
)
ExternalProject_Get_Property(sylvan source_dir)
ExternalProject_Get_Property(sylvan binary_dir)

36
resources/3rdparty/sylvan/.gitignore

@ -1,36 +1,3 @@
# autotools
**/Makefile
/autom4te.cache/
config.*
.dirstamp
aclocal.m4
configure
m4/*
tools
Makefile.in
# cmake
**/CMakeCache.txt
**/CMakeFiles
**/cmake_install.cmake
# libtool
.deps/
.libs/
/libtool
# object files
*.lo
*.o
*.la
# output files
examples/mc
examples/lddmc
test/sylvan_test
test/test_cxx
src/libsylvan.a
# MacOS file
.DS_Store
@ -38,6 +5,3 @@ src/libsylvan.a
.cproject
.project
.settings
# coverage output
coverage

82
resources/3rdparty/sylvan/.travis.yml

@ -3,83 +3,53 @@ sudo: false
matrix:
include:
- os: linux
env: TOOLSET=gcc CC=gcc-4.7 CXX=g++-4.7 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Release" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-4.7", "g++-4.7", "libstd++-4.7-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["gcc-4.9", "g++-4.9", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-4.8 CXX=g++-4.8 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-4.8", "g++-4.8", "libstd++-4.8-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["gcc-5", "g++-5", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
env: TOOLSET=gcc CC=gcc-6 CXX=g++-6 BUILD_TYPE="Release" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-4.9", "g++-4.9", "libstd++-4.9-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["gcc-6", "g++-6", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="OFF" SYLVAN_STATS="OFF"
env: TOOLSET=gcc CC=gcc-6 CXX=g++-6 BUILD_TYPE="Debug" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["gcc-6", "g++-6", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="ON"
env: TOOLSET=gcc CC=gcc-6 CXX=g++-6 BUILD_TYPE="Release" SYLVAN_STATS="OFF" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["gcc-6", "g++-6", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
env: TOOLSET=clang CC=clang-3.7 CXX=clang++-3.7 BUILD_TYPE="Release" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
packages: ["clang-3.7", "libstdc++-5-dev", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.7", "george-edison55-precise-backports"]
- os: linux
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF"
env: TOOLSET=clang CC=clang-3.8 CXX=clang++-3.8 BUILD_TYPE="Release" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
addons:
apt:
packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
- os: linux
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" VARIANT="coverage"
addons:
apt:
packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test"]
- os: linux
env: TOOLSET=clang CC=/usr/local/clang-3.4/bin/clang CXX=/usr/local/clang-3.4/bin/clang++ BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
addons:
apt:
packages: ["clang-3.4", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.4"]
- os: linux
env: TOOLSET=clang CC=clang-3.6 CXX=clang++-3.6 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
addons:
apt:
packages: ["clang-3.6", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.6"]
- os: linux
env: TOOLSET=clang CC=clang-3.7 CXX=clang++-3.7 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON"
addons:
apt:
packages: ["clang-3.7", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.7"]
- os: osx
env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="ON"
- os: osx
env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF"
- os: osx
env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="OFF"
packages: ["clang-3.8", "libstdc++-5-dev", "libgmp-dev", "cmake", "cmake-data", "libhwloc-dev"]
sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.8", "george-edison55-precise-backports"]
- os: osx
env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF"
env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Debug" SYLVAN_STATS="ON" SYLVAN_BUILD_DOCS="OFF"
- os: osx
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="OFF"
env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Release" SYLVAN_STATS="OFF" SYLVAN_BUILD_DOCS="OFF"
- os: osx
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF"
env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" SYLVAN_STATS="OFF" SYLVAN_BUILD_DOCS="OFF"
install:
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; brew install argp-standalone homebrew/science/hwloc; fi
@ -88,7 +58,7 @@ install:
script:
- ${CC} --version
- ${CXX} --version
- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DUSE_HWLOC=${HWLOC} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE}
- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} -DSYLVAN_BUILD_DOCS=${SYLVAN_BUILD_DOCS}
- make -j 2
- make test
- examples/simple
@ -98,3 +68,5 @@ script:
notifications:
email: false
branches:
only: master

53
resources/3rdparty/sylvan/CHANGELOG.md

@ -0,0 +1,53 @@
# Change Log
All notable changes to Sylvan will be documented in this file.
## [Unreleased]
### Added
- The embedded work-stealing framework now explicitly checks for stack overflows and aborts with an appropriate error message written to stderr.
- New functions `sylvan_project` and `sylvan_and_project` for BDDs, a dual of existential quantification, where instead of the variables to remove, the given set of variables are the variables to keep.
### Changed
- Rewritten initialization of Sylvan. Before the call to `sylvan_init_package`, table sizes must be initialized either using `sylvan_set_sizes` or with the new function `sylvan_set_limits`. This new function allows the user to set a maximum number of bytes allocated for the nodes table and for the operation cache.
## [1.2.0] - 2017-02-03
### Added
- Added documentation in the docs directory using Sphinx. Some documentation is removed from the README.md file.
### Changed
- The custom terminal/leaf API is slightly modified. The `read_binary_cb` has a different signature to remove the dependency upon MTBDD functionality.
- The custom terminal/leaf API functions have been renamed and moved to a separate file.
- Lace has been updated with a new version. The new version has rewritten the hardware locality code that pins worker threads and memory.
### Fixed
- A bug in `mtbdd_reader_readbinary` has been fixed.
## [1.1.2] - 2017-01-11
### Fixed
- The pkg-config file is slightly improved.
- A critical bug in `sylvan_collect` has been fixed.
## [1.1.1] - 2017-01-10
### Fixed
- The pkg-config file now includes hwloc as a requirement
## [1.1.0] - 2017-01-09
### Added
- This CHANGELOG file.
- Custom leaves can now implement custom callbacks for writing/reading to/from files.
- Implemented GMP leaf writing/reading to/from file.
- Method `mtbdd_eval_compose` for proper function composition (after partial evaluation).
- Method `mtbdd_enum_par_*` for parallel path enumeration.
- LDD methods `relprod` and `relprev` now support action labels (meta 5).
- Examples program `ldd2bdd` now converts LDD transition systems to BDDs transition systems.
- Methods `cache_get6` and `cache_put6` for operation cache entries that require two buckets.
- File `sylvan.pc` for pkg-config.
### Changed
- The API to register a custom MTBDD leaf now requires multiple calls, which is better design for future extensions.
- When rehashing during garbage collection fails (due to finite length probe sequences), Sylvan now increases the probe sequence length instead of aborting with an error message. However, Sylvan will probably still abort due to the table being full, since this error is typically triggered when garbage collection does not remove many dead nodes.
### Fixed
- Methods `mtbdd_enum_all_*` fixed and rewritten.
### Removed
- We no longer use both autoconf makefiles and CMake. Instead, we removed the autoconf files and rely solely on CMake now.

86
resources/3rdparty/sylvan/CMakeLists.txt

@ -1,18 +1,39 @@
cmake_minimum_required(VERSION 2.6)
project(sylvan C CXX)
enable_testing()
cmake_minimum_required(VERSION 3.1)
project(sylvan VERSION 1.2.0)
set(PROJECT_DESCRIPTION "Sylvan, a parallel decision diagram library")
set(PROJECT_URL "https://github.com/trolando/sylvan")
message(STATUS "CMake build configuration for Sylvan ${PROJECT_VERSION}")
enable_language(C CXX)
set(CMAKE_C_STANDARD 11)
set(CMAKE_C_STANDARD_REQUIRED ON)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE)
ENDIF(NOT CMAKE_BUILD_TYPE)
# Some info
message(STATUS "OS: ${CMAKE_SYSTEM_NAME}")
message(STATUS "Compiler: ${CMAKE_CXX_COMPILER_ID}")
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/")
option(SYLVAN_PORTABLE "If set, the created library will be portable." OFF)
option(USE_CARL "Sets whether carl should be included." ON)
set(CMAKE_C_FLAGS "-O3 -Wextra -Wall -fno-strict-aliasing -std=gnu11 -fPIC")
set(CMAKE_CXX_FLAGS "-O3 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated-register -std=c++14 -fPIC")
set(CMAKE_C_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -fPIC ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated -fPIC ${CMAKE_CXX_FLAGS}")
if (NOT SYLVAN_PORTABLE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native")
endif()
option(USE_CARL "Sets whether carl should be included." ON)
option(WITH_COVERAGE "Add generation of test coverage" OFF)
if(WITH_COVERAGE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -coverage")
@ -29,7 +50,7 @@ if(WITH_COVERAGE)
COMMAND make test
# Capture counters
COMMAND ${LCOV_PATH} --gcov-tool ${GCOV_PATH} --directory . --capture --output-file coverage.info
COMMAND ${LCOV_PATH} --remove coverage.info 'test/*' '/usr/*' 'examples/*' 'src/sylvan_mtbdd*' 'src/lace*' 'src/sylvan_ldd*' 'src/avl.h' 'src/sha2.c' --output-file coverage.info.cleaned
COMMAND ${LCOV_PATH} --remove coverage.info 'test/*' '/usr/*' 'examples/*' 'src/lace*' 'src/sha2.c' --output-file coverage.info.cleaned
COMMAND ${GENHTML_PATH} -o coverage coverage.info.cleaned
COMMAND ${CMAKE_COMMAND} -E remove coverage.info coverage.info.cleaned
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
@ -37,32 +58,55 @@ if(WITH_COVERAGE)
endif()
if(USE_CARL)
add_definitions(-DSYLVAN_HAVE_CARL)
include_directories("${carl_INCLUDE_DIR}")
message(STATUS "Sylvan - using CARL.")
add_definitions(-DSYLVAN_HAVE_CARL)
include_directories("${carl_INCLUDE_DIR}")
message(STATUS "Sylvan - Using CArL.")
else()
message(STATUS "Sylvan - not using CARL.")
message(STATUS "Sylvan - Not using CArL.")
endif()
include_directories(${CMAKE_SOURCE_DIR}/src)
include(GNUInstallDirs)
include(CTest)
set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
find_package(GMP REQUIRED)
include_directories(${GMP_INCLUDE_DIR})
include_directories("${PROJECT_SOURCE_DIR}/../../../src")
include_directories("${PROJECT_BINARY_DIR}/../../../include")
include_directories(src)
add_subdirectory(src)
option(SYLVAN_BUILD_TEST "Build test programs" ON)
if(SYLVAN_BUILD_TEST)
option(SYLVAN_BUILD_TESTS "Build example tools" ON)
if(SYLVAN_BUILD_TESTS)
add_subdirectory(test)
endif()
option(SYLVAN_BUILD_EXAMPLES "Build example tools" OFF)
option(SYLVAN_BUILD_EXAMPLES "Build example tools" ON)
if(SYLVAN_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()
option(SYLVAN_BUILD_DOCS "Build documentation" ON)
if(SYLVAN_BUILD_DOCS)
configure_file("docs/conf.py.in" "docs/conf.py" @ONLY)
find_package(Sphinx REQUIRED)
Sphinx_add_targets(sylvan ${CMAKE_CURRENT_BINARY_DIR}/docs ${CMAKE_CURRENT_SOURCE_DIR}/docs ${CMAKE_CURRENT_BINARY_DIR})
add_custom_target(update_gh_pages
COMMAND "${CMAKE_COMMAND}" -P "${CMAKE_MODULE_PATH}/UpdateGHPages.cmake")
add_dependencies(update_gh_pages sylvan_html)
endif()
set(CPACK_GENERATOR "DEB")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Multi-core implementation of decision diagrams")
set(CPACK_PACKAGE_VENDOR "Tom van Dijk")
set(CPACK_PACKAGE_CONTACT "Tom van Dijk <tom@tvandijk.nl>")
set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_SOURCE_DIR}/README.md")
set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_SOURCE_DIR}/LICENSE")
set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION})
set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_MAJOR_VERSION})
set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_MINOR_VERSION})
set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_PATCH_VERSION})
include(CPack)
# pkg-config
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/sylvan.pc.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/sylvan.pc" @ONLY)
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/sylvan.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")

0
resources/3rdparty/sylvan/LICENSE

5
resources/3rdparty/sylvan/Makefile.am

@ -1,5 +0,0 @@
ACLOCAL_AMFLAGS = -I m4
AM_CFLAGS = -g -O2 -Wall -Wextra -Werror -std=gnu11
SUBDIRS = src

111
resources/3rdparty/sylvan/README.md

@ -1,97 +1,40 @@
Sylvan [![Build Status](https://travis-ci.org/trolando/sylvan.svg?branch=master)](https://travis-ci.org/trolando/sylvan)
======
Sylvan is a parallel (multi-core) BDD library in C. Sylvan allows both sequential and parallel BDD-based algorithms to benefit from parallelism. Sylvan uses the work-stealing framework Lace and a scalable lockless hashtable to implement scalable multi-core BDD operations.
Sylvan is a parallel (multi-core) MTBDD library written in C. Sylvan
implements parallelized operations on BDDs, MTBDDs and LDDs. Both
sequential and parallel BDD-based algorithms can benefit from
parallelism. Sylvan uses the work-stealing framework Lace and parallel
datastructures to implement scalable multi-core operations on decision
diagrams.
Sylvan is developed (&copy; 2011-2016) by the [Formal Methods and Tools](http://fmt.ewi.utwente.nl/) group at the University of Twente as part of the MaDriD project, which is funded by NWO. Sylvan is licensed with the Apache 2.0 license.
Sylvan is developed (&copy; 2011-2016) by the [Formal Methods and Tools](http://fmt.ewi.utwente.nl/)
group at the University of Twente as part of the MaDriD project, which
was funded by NWO, and (&copy; 2016-2017) by the [Formal Methods and Verification](http://fmv.jku.at/)
group at the Johannes Kepler University Linz as part of the RiSE project.
Sylvan is licensed with the Apache 2.0 license.
You can contact the main author of Sylvan at <t.vandijk@utwente.nl>. Please let us know if you use Sylvan in your projects.
The main author of Sylvan is Tom van Dijk who can be reached via <tom@tvandijk.nl>.
Please let us know if you use Sylvan in your projects and if you need
decision diagram operations that are currently not implemented in Sylvan.
Sylvan is available at: https://github.com/utwente-fmt/sylvan
Java/JNI bindings: https://github.com/trolando/jsylvan
Haskell bindings: https://github.com/adamwalker/sylvan-haskell
The main repository of Sylvan is https://github.com/trolando/sylvan. A
mirror is available at https://github.com/utwente-fmt/sylvan.
Publications
------------
T. van Dijk and J. van de Pol (2015) [Sylvan: Multi-core Decision Diagrams](http://dx.doi.org/10.1007/978-3-662-46681-0_60). In: TACAS 2015, LNCS 9035. Springer.
T. van Dijk and A.W. Laarman and J. van de Pol (2012) [Multi-Core BDD Operations for Symbolic Reachability](http://eprints.eemcs.utwente.nl/22166/). In: PDMC 2012, ENTCS. Elsevier.
Usage
-----
Simple examples can be found in the `examples` subdirectory. The file `simple.cpp` contains a toy program that
uses the C++ objects to perform basic BDD manipulation.
The `mc.c` and `lddmc.c` programs are more advanced examples of symbolic model checking (with example models in the `models` subdirectory).
Sylvan depends on the [work-stealing framework Lace](http://fmt.ewi.utwente.nl/tools/lace) for its implementation. Lace is embedded in the Sylvan distribution.
To use Sylvan, Lace must be initialized first.
For more details, see the comments in `src/sylvan.h`.
### Basic functionality
To create new BDDs, you can use:
- `sylvan_true`: representation of constant `true`.
- `sylvan_false`: representation of constant `false`.
- `sylvan_ithvar(var)`: representation of literal &lt;var&gt; (negated: `sylvan_nithvar(var)`)
To follow the BDD edges and obtain the variable at the root of a BDD, you can use:
- `sylvan_var(bdd)`: obtain variable of the root node of &lt;bdd&gt; - requires that &lt;bdd&gt; is not constant `true` or `false`.
- `sylvan_high(bdd)`: follow high edge of &lt;bdd&gt;.
- `sylvan_low(bdd)`: follow low edge of &lt;bdd&gt;.
Bindings for other languages than C/C++ also exist:
You need to manually reference BDDs that you want to keep during garbage collection:
- `sylvan_ref(bdd)`: add reference to &lt;bdd&gt;.
- `sylvan_deref(bdd)`: remove reference to &lt;bdd&gt;.
- `sylvan_protect(bddptr)`: add a pointer reference to the BDD variable &lt;bddptr&gt;
- `sylvan_unprotect(bddptr)`: remove a pointer reference to the BDD variable &lt;bddptr&gt;
- Java/JNI bindings: https://github.com/utwente-fmt/jsylvan
- Haskell bindings: https://github.com/adamwalker/sylvan-haskell
- Python bindings: https://github.com/johnyf/dd
It is recommended to use `sylvan_protect` and `sylvan_unprotect`.
The C++ objects handle this automatically.
**Documentation** is available [at GitHub Pages](https://trolando.github.com/sylvan).
The following 'primitives' are implemented:
- `sylvan_not(bdd)`: negation of &lt;bdd&gt;.
- `sylvan_ite(a,b,c)`: calculate 'if &lt;a&gt; then &lt;b&gt; else &lt;c&gt;'.
- `sylvan_and(a, b)`: calculate a and b
- `sylvan_or(a, b)`: calculate a or b
- `sylvan_nand(a, b)`: calculate not (a and b)
- `sylvan_nor(a, b)`: calculate not (a or b)
- `sylvan_imp(a, b)`: calculate a implies b
- `sylvan_invimp(a, b)`: calculate implies a
- `sylvan_xor(a, b)`: calculate a xor b
- `sylvan_equiv(a, b)`: calculate a = b
- `sylvan_diff(a, b)`: calculate a and not b
- `sylvan_less(a, b)`: calculate b and not a
- `sylvan_exists(bdd, vars)`: existential quantification of &lt;bdd&gt; with respect to variables &lt;vars&gt;. Here, &lt;vars&gt; is a conjunction of literals.
- `sylvan_forall(bdd, vars)`: universal quantification of &lt;bdd&gt; with respect to variables &lt;vars&gt;. Here, &lt;vars&gt; is a conjunction of literals.
### Other BDD operations
See `src/sylvan_bdd.h`, `src/sylvan_mtbdd.h` and `src/sylvan_ldd.h` for other implemented operations.
See `src/sylvan_obj.hpp` for the C++ interface.
### Garbage collection
Garbage collection is triggered when trying to insert a new node and no new bucket can be found within a reasonable upper bound.
Garbage collection is stop-the-world and all workers must cooperate on garbage collection. (Beware of deadlocks if you use Sylvan operations in critical sections!)
- `sylvan_gc()`: manually trigger garbage collection.
- `sylvan_gc_enable()`: enable garbage collection.
- `sylvan_gc_disable()`: disable garbage collection.
### Table resizing
During garbage collection, it is possible to resize the nodes table and the cache.
Sylvan provides two default implementations: an agressive version that resizes every time garbage collection is performed,
and a less agressive version that only resizes when at least half the table is full.
This can be configured in `src/sylvan_config.h`
It is not possible to decrease the size of the nodes table and the cache.
Publications
------------
T. van Dijk (2016) [Sylvan: Multi-core Decision Diagrams](http://dx.doi.org/10.3990/1.9789036541602). PhD Thesis.
### Dynamic reordering
T. van Dijk and J.C. van de Pol (2016) [Sylvan: Multi-core Framework for Decision Diagrams](http://dx.doi.org/10.1007/s10009-016-0433-2>). In: STTT (Special Issue), Springer.
Dynamic reordening is currently not supported.
For now, we suggest users find a good static variable ordering.
T. van Dijk and J. van de Pol (2015) [Sylvan: Multi-core Decision Diagrams](http://dx.doi.org/10.1007/978-3-662-46681-0_60). In: TACAS 2015, LNCS 9035. Springer.
Troubleshooting
---------------
Sylvan may require a larger than normal program stack. You may need to increase the program stack size on your system using `ulimit -s`. Segmentation faults on large computations typically indicate a program stack overflow.
T. van Dijk and A.W. Laarman and J. van de Pol (2012) [Multi-Core BDD Operations for Symbolic Reachability](http://eprints.eemcs.utwente.nl/22166/). In: PDMC 2012, ENTCS. Elsevier.
### I am getting the error "unable to allocate memory: ...!"
Sylvan allocates virtual memory using mmap. If you specify a combined size for the cache and node table larger than your actual available memory you may need to set `vm.overcommit_memory` to `1`. E.g. `echo 1 > /proc/sys/vm/overcommit_memory`. You can make this setting permanent with `echo "vm.overcommit_memory = 1" > /etc/sysctl.d/99-sylvan.conf`. You can verify the setting with `cat /proc/sys/vm/overcommit_memory`. It should report `1`.

68
resources/3rdparty/sylvan/cmake/FindGMP.cmake

@ -1,56 +1,24 @@
# FindGMP.cmake can be found at https://code.google.com/p/origin/source/browse/trunk/cmake/FindGMP.cmake
# Copyright (c) 2008-2010 Kent State University
# Copyright (c) 2011-2012 Texas A&M University
#
# This file is distributed under the MIT License. See the accompanying file
# LICENSE.txt or http://www.opensource.org/licenses/mit-license.php for terms
# and conditions.
# Modified by David Korzeniewski to also find MPIR as an alternative.
# Try to find GMP
# Once done this will define:
# - GMP_FOUND - True if the system has GMP
# - GMP_INCLUDE_DIRS - include directories for compiling
# - GMP_LIBRARIES - libraries for linking
# - GMP_DEFINITIONS - cflags suggested by pkg-config
# FIXME: How do I find the version of GMP that I want to use?
# What versions are available?
find_package(PkgConfig)
pkg_check_modules(PC_GMP QUIET gmp)
# NOTE: GMP prefix is understood to be the path to the root of the GMP
# installation library.
set(GMP_PREFIX "" CACHE PATH "The path to the prefix of a GMP installation")
set(GMP_DEFINITIONS ${PC_GMP_CFLAGS_OTHER})
find_path(GMP_INCLUDE_DIR gmp.h
HINTS ${PC_GMP_INCLUDEDIR} ${PC_GMP_INCLUDE_DIRS})
find_path(GMP_INCLUDE_DIR gmp.h
PATHS ${GMP_PREFIX}/include /usr/include /usr/local/include)
find_library(GMP_LIBRARIES NAMES gmp libgmp
HINTS ${PC_GMP_LIBDIR} ${PC_GMP_LIBRARY_DIRS})
find_library(GMP_LIBRARY NAMES gmp
PATHS ${GMP_PREFIX}/lib /usr/lib /usr/local/lib)
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set GMP_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args(GMP DEFAULT_MSG GMP_LIBRARIES GMP_INCLUDE_DIR)
find_library(GMP_MPIR_LIBRARY NAMES mpir
PATHS ${GMP_PREFIX}/lib /usr/lib /usr/local/lib)
find_library(GMP_MPIRXX_LIBRARY NAMES mpirxx
PATHS ${GMP_PREFIX}/lib /usr/lib /usr/local/lib)
if(GMP_INCLUDE_DIR AND GMP_LIBRARY)
get_filename_component(GMP_LIBRARY_DIR ${GMP_LIBRARY} PATH)
set(GMP_FOUND TRUE)
endif()
if(GMP_INCLUDE_DIR AND GMP_MPIR_LIBRARY AND GMP_MPIRXX_LIBRARY)
get_filename_component(GMP_MPIR_LIBRARY_DIR ${GMP_MPIR_LIBRARY} PATH)
get_filename_component(GMP_MPIRXX_LIBRARY_DIR ${GMP_MPIRXX_LIBRARY} PATH)
set(MPIR_FOUND TRUE)
endif()
if(GMP_FOUND)
if(NOT GMP_FIND_QUIETLY)
MESSAGE(STATUS "Found GMP: ${GMP_LIBRARY}")
endif()
elseif(MPIR_FOUND)
if(NOT GMP_FIND_QUIETLY)
MESSAGE(STATUS "Found GMP alternative MPIR: ${GMP_MPIR_LIBRARY} and ${GMP_MPIRXX_LIBRARY}")
endif()
elseif(GMP_FOUND)
if(GMP_FIND_REQUIRED)
message(FATAL_ERROR "Could not find GMP")
endif()
endif()
MARK_AS_ADVANCED(GMP_MPIRXX_LIBRARY GMP_MPIR_LIBRARY GMP_INCLUDE_DIR GMP_LIBRARY)
mark_as_advanced(GMP_INCLUDE_DIR GMP_LIBRARIES)

24
resources/3rdparty/sylvan/cmake/FindHwloc.cmake

@ -0,0 +1,24 @@
# Try to find HWLOC
# Once done this will define:
# - HWLOC_FOUND - True if the system has HWLOC
# - HWLOC_INCLUDE_DIRS - include directories for compiling
# - HWLOC_LIBRARIES - libraries for linking
# - HWLOC_DEFINITIONS - cflags suggested by pkg-config
find_package(PkgConfig)
pkg_check_modules(PC_HWLOC QUIET hwloc)
set(HWLOC_DEFINITIONS ${PC_HWLOC_FLAGS_OTHER})
find_path(HWLOC_INCLUDE_DIR hwloc.h
HINTS ${PC_HWLOC_INCLUDEDIR} ${PC_HWLOC_INCLUDE_DIRS})
find_library(HWLOC_LIBRARIES NAMES hwloc
HINTS ${PC_HWLOC_LIBDIR} ${PC_HWLOC_LIBRARY_DIRS})
include(FindPackageHandleStandardArgs)
# handle the QUIETLY and REQUIRED arguments and set HWLOC_FOUND to TRUE
# if all listed variables are TRUE
find_package_handle_standard_args(HWLOC DEFAULT_MSG HWLOC_LIBRARIES HWLOC_INCLUDE_DIR)
mark_as_advanced(HWLOC_INCLUDE_DIR HWLOC_LIBRARIES)

72
resources/3rdparty/sylvan/cmake/FindSphinx.cmake

@ -0,0 +1,72 @@
# This modules defines
# SPHINX_EXECUTABLE
# SPHINX_FOUND
find_program(SPHINX_EXECUTABLE
NAMES sphinx-build sphinx-build2
HINTS $ENV{SPHINX_DIR}
PATHS
/usr/bin
/usr/local/bin
/opt/local/bin
DOC "Sphinx documentation generator"
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Sphinx DEFAULT_MSG SPHINX_EXECUTABLE)
option( SPHINX_HTML_OUTPUT "Build a single HTML with the whole content." ON )
option( SPHINX_EPUB_OUTPUT "Build HTML pages with additional information for building a documentation collection in epub." OFF )
option( SPHINX_LATEX_OUTPUT "Build LaTeX sources that can be compiled to a PDF document using pdflatex." OFF )
option( SPHINX_MAN_OUTPUT "Build manual pages in groff format for UNIX systems." OFF )
option( SPHINX_TEXT_OUTPUT "Build plain text files." OFF )
mark_as_advanced(
SPHINX_EXECUTABLE
SPHINX_HTML_OUTPUT
SPHINX_EPUB_OUTPUT
SPHINX_LATEX_OUTPUT
SPHINX_MAN_OUTPUT
SPHINX_TEXT_OUTPUT
)
function( Sphinx_add_target target_name builder conf source destination )
add_custom_target( ${target_name} ALL
COMMAND ${SPHINX_EXECUTABLE} -b ${builder}
-c ${conf}
${source}
${destination}
COMMENT "Generating sphinx documentation: ${builder}"
)
set_property(
DIRECTORY APPEND PROPERTY
ADDITIONAL_MAKE_CLEAN_FILES
${destination}
)
endfunction()
# Target dependencies can be optionally listed at the end.
function( Sphinx_add_targets target_base_name conf source base_destination )
if( ${SPHINX_HTML_OUTPUT} )
Sphinx_add_target( ${target_base_name}_html html ${conf} ${source} ${base_destination}/html )
endif()
if( ${SPHINX_EPUB_OUTPUT} )
Sphinx_add_target( ${target_base_name}_epub epub ${conf} ${source} ${base_destination}/epub )
endif()
if( ${SPHINX_LATEX_OUTPUT} )
Sphinx_add_target( ${target_base_name}_latex latex ${conf} ${source} ${base_destination}/latex )
endif()
if( ${SPHINX_MAN_OUTPUT} )
Sphinx_add_target( ${target_base_name}_man man ${conf} ${source} ${base_destination}/man )
endif()
if( ${SPHINX_TEXT_OUTPUT} )
Sphinx_add_target( ${target_base_name}_text text ${conf} ${source} ${base_destination}/text )
endif()
endfunction()

76
resources/3rdparty/sylvan/cmake/UpdateGHPages.cmake

@ -0,0 +1,76 @@
# Copyright (c) 2011-2013 Thomas Heller
# Modified by Tom van Dijk
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
find_package(Git)
if(NOT GIT_FOUND)
message(FATAL_ERROR "Git not found!")
endif()
if(NOT GHPAGES_REPOSITORY)
set(GHPAGES_REPOSITORY git@github.com:trolando/sylvan.git --branch gh-pages)
endif()
if(EXISTS "${CMAKE_CURRENT_BINARY_DIR}/gh-pages")
execute_process(
COMMAND "${GIT_EXECUTABLE}" pull --rebase
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
RESULT_VARIABLE git_pull_result)
if(NOT "${git_pull_result}" EQUAL "0")
message(FATAL_ERROR "Updating the GitHub pages branch failed.")
endif()
else()
execute_process(
COMMAND "${GIT_EXECUTABLE}" clone ${GHPAGES_REPOSITORY} gh-pages
RESULT_VARIABLE git_clone_result)
if(NOT "${git_clone_result}" EQUAL "0")
message(FATAL_ERROR "Cloning the GitHub pages branch failed. Trying to clone ${GHPAGES_REPOSITORY}")
endif()
endif()
# first delete all files
file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/gh-pages/*")
# copy all documentation files to target branch
file(COPY "${CMAKE_CURRENT_BINARY_DIR}/html/"
DESTINATION "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
PATTERN ".doctrees" EXCLUDE
PATTERN ".buildinfo" EXCLUDE
)
# git add -A *
execute_process(
COMMAND "${GIT_EXECUTABLE}" add -A *
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
RESULT_VARIABLE git_add_result)
if(NOT "${git_add_result}" EQUAL "0")
message(FATAL_ERROR "Adding files to the GitHub pages branch failed.")
endif()
# check if there are changes to commit
execute_process(
COMMAND "${GIT_EXECUTABLE}" diff-index --quiet HEAD
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
RESULT_VARIABLE git_diff_index_result)
if(NOT "${git_diff_index_result}" EQUAL "0")
# commit changes
execute_process(
COMMAND "${GIT_EXECUTABLE}" commit -m "Updated documentation"
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
RESULT_VARIABLE git_commit_result)
if(NOT "${git_commit_result}" EQUAL "0")
message(FATAL_ERROR "Commiting to the GitHub pages branch failed.")
endif()
# push everything up to github
execute_process(
COMMAND "${GIT_EXECUTABLE}" push
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/gh-pages"
RESULT_VARIABLE git_push_result)
if(NOT "${git_push_result}" EQUAL "0")
message(FATAL_ERROR "Pushing to the GitHub pages branch failed.")
endif()
endif()

21
resources/3rdparty/sylvan/configure.ac

@ -1,21 +0,0 @@
AC_PREREQ([2.60])
AC_INIT([sylvan], [1.0])
AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_AUX_DIR([tools])
AM_INIT_AUTOMAKE([foreign])
AC_PROG_CC
AX_CHECK_COMPILE_FLAG([-std=c11],,[AC_MSG_FAILURE([no acceptable C11 compiler found.])])
AC_PROG_CXX
LT_INIT
AC_CHECKING([for any suitable hwloc installation])
AC_CHECK_LIB([hwloc], [hwloc_topology_init], [AC_CHECK_HEADER([hwloc.h], [hwloc=yes])])
AM_CONDITIONAL([HAVE_LIBHWLOC], [test "$hwloc" = "yes"])
AC_CANONICAL_HOST
AM_CONDITIONAL([DARWIN], [case $host_os in darwin*) true;; *) false;; esac])
# test x$(uname) == "xDarwin"])
AC_CONFIG_FILES([Makefile src/Makefile])
AC_OUTPUT

58
resources/3rdparty/sylvan/docs/conf.py.in

@ -0,0 +1,58 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
extensions = ['sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.githubpages']
# templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
project = 'Sylvan'
copyright = '2017, Tom van Dijk'
author = 'Tom van Dijk'
version = '@PROJECT_VERSION@'
release = '@PROJECT_VERSION@'
language = None
exclude_patterns = ['Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# html_theme = 'alabaster'
# html_theme = 'default'
import os
if os.environ.get('READTHEDOCS', None) != 'True':
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Sylvan.tex', 'Sylvan Documentation',
'Tom van Dijk', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sylvan', 'Sylvan Documentation',
[author], 1)
]

282
resources/3rdparty/sylvan/docs/index.rst

@ -0,0 +1,282 @@
Sylvan
=====================
Sylvan is a parallel (multi-core) MTBDD library written in C. Sylvan
implements parallelized operations on BDDs, MTBDDs and LDDs. Both
sequential and parallel BDD-based algorithms can benefit from
parallelism. Sylvan uses the work-stealing framework Lace and parallel
datastructures to implement scalable multi-core operations on decision
diagrams.
Sylvan is developed (© 2011-2016) by the `Formal Methods and
Tools <http://fmt.ewi.utwente.nl/>`__ group at the University of Twente
as part of the MaDriD project, which is funded by NWO, and (© 2016-2017)
by the `Formal Methods and Verification <http://fmv.jku.at/>`__ group at
the Johannes Kepler University Linz as part of the RiSE project. Sylvan
is licensed with the Apache 2.0 license.
The main author of the project is Tom van Dijk who can be reached via
tom@tvandijk.nl.
Please let us know if you use Sylvan in your projects and if you need
decision diagram operations that are currently not implemented in Sylvan.
The main repository of Sylvan is https://github.com/trolando/sylvan. A
mirror is available at https://github.com/utwente-fmt/sylvan.
Bindings for other languages than C/C++ also exist:
- Java/JNI bindings: https://github.com/utwente-fmt/jsylvan
- Haskell bindings: https://github.com/adamwalker/sylvan-haskell
- Python bindings: https://github.com/johnyf/dd
Dependencies
------------
Sylvan has the following required dependencies:
- **CMake** for compiling.
- **gmp** (``libgmp-dev``) for the GMP leaves in MTBDDs.
- **hwloc** (``libhwloc-dev``) for pinning worker threads to processors.
Sylvan depends on the `work-stealing framework
Lace <http://fmt.ewi.utwente.nl/tools/lace>`__ for its implementation.
Lace is embedded in the Sylvan distribution.
Building
--------
It is recommended to build Sylvan in a separate build directory:
.. code:: bash
mkdir build
cd build
cmake ..
make && make test && make install
It is recommended to use ``ccmake`` to configure the build settings of Sylvan. For example,
you can choose whether you want shared/static libraries, whether you want to enable
statistics gathering and whether you want a ``Debug`` or a ``Release`` build.
Using Sylvan
------------
To use Sylvan, the library and its dependency Lace must be initialized:
.. code:: c
#include <sylvan.h>
main() {
int n_workers = 0; // auto-detect
lace_init(n_workers, 0);
lace_startup(0, NULL, NULL);
size_t nodes_minsize = 1LL<<22;
size_t nodes_maxsize = 1LL<<26;
size_t cache_minsize = 1LL<<23;
size_t cache_maxsize = 1LL<<27;
sylvan_init_package(nodes_minsize, nodes_maxsize, cache_minsize, cache_maxsize);
sylvan_init_mtbdd();
...
sylvan_stats_report(stdout);
sylvan_quit();
lace_exit();
}
The call to ``lace_init`` initializes the Lace framework, which sets up the data structures
for work-stealing. The parameter ``n_workers`` can be set to 0 for auto-detection. The
function ``lace_startup`` then creates all other worker threads. The worker threads run
until ``lace_exit`` is called. Lace must be started before Sylvan can be initialized.
Sylvan is initialized with a call to ``sylvan_init_package``. Here we choose the initial
and maximum sizes of the nodes table and the operation cache. In the example, we choose a maximum
nodes table size of 2^26 and a maximum cache size of 2^27. The initial sizes are
set to 2^22 and 2^23, respectively. The sizes must be powers of 2.
Sylvan allocates memory for the maximum sizes *in virtual memory* but only uses the space
needed for the initial sizes. The sizes are doubled during garbage collection, until the maximum
size has been reached.
After ``sylvan_init_package``, the subpackages ``mtbdd`` and ``ldd`` can be initialized with
``sylvan_init_mtbdd`` and ``sylvan_init_ldd``. This mainly allocates auxiliary datastructures for
garbage collection.
If you enable statistics generation (via CMake) then you can use ``sylvan_stats_report`` to report
the obtained statistics to a given ``FILE*``.
The Lace framework
~~~~~~~~~~~~~~~~~~
Sylvan uses the Lace framework to offer 'automatic' parallelization of decision diagram operations.
Many functions in Sylvan are Lace tasks. To call a Lace task, the variables
``__lace_worker`` and ``__lace_dq_head`` must be initialized **locally**.
Use the macro ``LACE_ME`` to initialize the variables in every function that calls Sylvan functions
and is not itself a Lace task.
Garbage collection and referencing nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like all decision diagram implementations, Sylvan performs garbage collection.
Garbage collection is triggered when trying to insert a new node and no
empty space can be found in the table within a reasonable upper bound.
To ensure that no decision diagram nodes are overwritten, you must ensure that
Sylvan knows which decision diagrams you care about.
The easiest way to do this is with ``sylvan_protect`` and ``sylvan_unprotect`` to protect
a given pointer.
These functions protect the decision diagram referenced to by that pointer at the time
that garbage collection is performed.
Unlike some other implementations of decision diagrams,
you can modify the variable between the calls to ``sylvan_protect`` and ``sylvan_unprotect``
without explicitly changing the reference.
To manually trigger garbage collection, call ``sylvan_gc``.
You can use ``sylvan_gc_disable`` and ``sylvan_gc_enable`` to disable garbage collection or
enable it again. If garbage collection is disabled, the program will abort when the nodes table
is full.
**Warning**: Sylvan is a multi-threaded library and all workers must cooperate for garbage collection. If you use locking mechanisms in your code, beware of deadlocks!
Basic BDD functionality
~~~~~~~~~~~~~~~~~~~~~~~
To create new BDDs, you can use:
- ``sylvan_true``: representation of constant ``true``.
- ``sylvan_false``: representation of constant ``false``.
- ``sylvan_ithvar(var)``: representation of literal <var> (negated: ``sylvan_nithvar(var)``)
To follow the BDD edges and obtain the variable at the root of a BDD,
you can use (only for internal nodes, not for leaves ``sylvan_true`` and ``sylvan_false``):
- ``sylvan_var(bdd)``: obtain the variable of the root node of <bdd>.
- ``sylvan_high(bdd)``: follow the high edge of <bdd>.
- ``sylvan_low(bdd)``: follow the low edge of <bdd>.
You need to manually reference BDDs that you want to keep during garbage
collection:
- ``sylvan_protect(bddptr)``: add a pointer reference to <bddptr>.
- ``sylvan_unprotect(bddptr)``: remove a pointer reference to <bddptr>.
- ``sylvan_ref(bdd)``: add a reference to <bdd>.
- ``sylvan_deref(bdd)``: remove a reference to <bdd>.
It is recommended to use ``sylvan_protect`` and ``sylvan_unprotect``.
The C++ objects (defined in ``sylvan_obj.hpp``) handle this automatically.
The following basic operations are implemented:
- ``sylvan_not(bdd)``: compute the negation of <bdd>.
- ``sylvan_ite(a,b,c)``: compute 'if <a> then <b> else <c>'.
- ``sylvan_and(a, b)``: compute '<a> and <b>'
- ``sylvan_or(a, b)``: compute '<a> or <b>'
- ``sylvan_nand(a, b)``: compute 'not (<a> and <b>)'
- ``sylvan_nor(a, b)``: compute 'not (<a> or <b>)'
- ``sylvan_imp(a, b)``: compute '<a> then <b>'
- ``sylvan_invimp(a, b)``: compute '<b> then <a>'
- ``sylvan_xor(a, b)``: compute '<a> xor <b>'
- ``sylvan_equiv(a, b)``: compute '<a> = <b>'
- ``sylvan_diff(a, b)``: compute '<a> and not <b>'
- ``sylvan_less(a, b)``: compute '<b> and not <a>'
- ``sylvan_exists(bdd, vars)``: existential quantification of <bdd> with respect to variables <vars>.
- ``sylvan_forall(bdd, vars)``: universal quantification of <bdd> with respect to variables <vars>.
A set of variables (like <vars> above) is a BDD representing the conjunction of the variables.
Other BDD operations
~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_bdd.h`` for other operations on BDDs, especially operations
that are relevant for model checking.
Basic MTBDD functionality
~~~~~~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_mtbdd.h`` for operations on multi-terminal BDDs.
Basic LDD functionality
~~~~~~~~~~~~~~~~~~~~~~~
See ``src/sylvan_ldd.h`` for operations on List DDs.
Support for C++
~~~~~~~~~~~~~~~
See ``src/sylvan_obj.hpp`` for the C++ interface.
.. Adding custom decision diagram operations
.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Table resizing
~~~~~~~~~~~~~~
During garbage collection, it is possible to resize the nodes table and
the cache. Sylvan provides two default implementations: an aggressive
version that resizes every time garbage collection is performed, and a
less aggressive version that only resizes when at least half the table is
full. This can be configured in ``src/sylvan_config.h``. It is not
possible to decrease the size of the nodes table and the cache.
Dynamic reordering
~~~~~~~~~~~~~~~~~~
Dynamic reordening is not yet supported. For now, we suggest users
find a good static variable ordering.
Examples
--------
Simple examples can be found in the ``examples`` subdirectory. The file
``simple.cpp`` contains a toy program that uses the C++ objects to
perform basic BDD manipulation. The ``mc.c`` and ``lddmc.c`` programs
are more advanced examples of symbolic model checking (with example
models in the ``models`` subdirectory).
Troubleshooting
---------------
Sylvan may require a larger than normal program stack. You may need to
increase the program stack size on your system using ``ulimit -s``.
Segmentation faults on large computations typically indicate a program
stack overflow.
I am getting the error "unable to allocate memory: ...!"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sylvan allocates virtual memory using mmap. If you specify a combined
size for the cache and node table larger than your actual available
memory you may need to set ``vm.overcommit_memory`` to ``1``. E.g.
``echo 1 > /proc/sys/vm/overcommit_memory``. You can make this setting
permanent with
``echo "vm.overcommit_memory = 1" > /etc/sysctl.d/99-sylvan.conf``. You
can verify the setting with ``cat /proc/sys/vm/overcommit_memory``. It
should report ``1``.
I get errors about ``__lace_worker`` and ``__lace_dq_head``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Many Sylvan operations are implemented as Lace tasks. To call a Lace
task, the variables ``__lace_worker`` and ``__lace_dq_head`` must be
initialized. Use the macro ``LACE_ME`` to do this. Only use ``LACE_ME``
locally (in a function), never globally!
Publications
------------
T. van Dijk (2016) `Sylvan: Multi-core Decision
Diagrams <http://dx.doi.org/10.3990/1.9789036541602>`__. PhD Thesis.
T. van Dijk and J.C. van de Pol (2016) `Sylvan: Multi-core Framework
for Decision Diagrams <http://dx.doi.org/10.1007/s10009-016-0433-2>`__.
In: STTT (Special Issue), Springer.
T. van Dijk and J.C. van de Pol (2015) `Sylvan: Multi-core Decision
Diagrams <http://dx.doi.org/10.1007/978-3-662-46681-0_60>`__. In: TACAS
2015, LNCS 9035. Springer.
T. van Dijk and A.W. Laarman and J.C. van de Pol (2012) `Multi-Core BDD
Operations for Symbolic
Reachability <http://eprints.eemcs.utwente.nl/22166/>`__. In: PDMC 2012,
ENTCS. Elsevier.

20
resources/3rdparty/sylvan/examples/CMakeLists.txt

@ -1,6 +1,3 @@
cmake_minimum_required(VERSION 2.6)
project(sylvan C CXX)
include_directories(.)
add_executable(mc mc.c getrss.h getrss.c)
@ -9,15 +6,15 @@ target_link_libraries(mc sylvan)
add_executable(lddmc lddmc.c getrss.h getrss.c)
target_link_libraries(lddmc sylvan)
add_executable(ldd2bdd ldd2bdd.c)
target_link_libraries(ldd2bdd sylvan)
add_executable(nqueens nqueens.c)
target_link_libraries(nqueens sylvan)
add_executable(simple simple.cpp)
target_link_libraries(simple sylvan stdc++)
if(USE_CARL)
message(STATUS "Sylvan - Example for Storm enabled.")
add_executable(storm-rf storm.cpp)
target_link_libraries(storm-rf sylvan stdc++ ${carl_LIBRARIES})
endif(USE_CARL)
include(CheckIncludeFiles)
check_include_files("gperftools/profiler.h" HAVE_PROFILER)
@ -27,12 +24,17 @@ if(HAVE_PROFILER)
set_target_properties(lddmc PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER")
target_link_libraries(lddmc profiler)
set_target_properties(nqueens PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER")
target_link_libraries(nqueens profiler)
endif()
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
# add argp library for OSX
target_link_libraries(mc argp)
target_link_libraries(lddmc argp)
target_link_libraries(ldd2bdd argp)
target_link_libraries(nqueens argp)
endif()

0
resources/3rdparty/sylvan/examples/getrss.c

0
resources/3rdparty/sylvan/examples/getrss.h

777
resources/3rdparty/sylvan/examples/ldd2bdd.c

@ -0,0 +1,777 @@
#include <argp.h>
#include <assert.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <sylvan_int.h>
/* Configuration */
static int workers = 0; // autodetect
static int verbose = 0;
static char* model_filename = NULL; // filename of model
static char* bdd_filename = NULL; // filename of output BDD
static char* sizes = "22,27,21,26"; // default sizes
static int check_results = 0;
/* argp configuration */
static struct argp_option options[] =
{
{"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
{"table-sizes", 1, "<tablesize>,<tablemax>,<cachesize>,<cachemax>", 0, "Sizes of nodes table and operation cache as powers of 2", 0},
{"check-results", 2, 0, 0, "Check new transition relations ", 0},
{"verbose", 'v', 0, 0, "Set verbose", 0},
{0, 0, 0, 0, 0, 0}
};
static error_t
parse_opt(int key, char *arg, struct argp_state *state)
{
switch (key) {
case 'w':
workers = atoi(arg);
break;
case 'v':
verbose = 1;
break;
case 1:
sizes = arg;
break;
case 2:
check_results = 1;
break;
case ARGP_KEY_ARG:
if (state->arg_num == 0) model_filename = arg;
if (state->arg_num == 1) bdd_filename = arg;
if (state->arg_num >= 2) argp_usage(state);
break;
case ARGP_KEY_END:
if (state->arg_num < 2) argp_usage(state);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static struct argp argp = { options, parse_opt, "<model> [<output-bdd>]", 0, 0, 0, 0 };
/* Globals */
typedef struct set
{
MDD mdd;
MDD proj;
} *set_t;
typedef struct relation
{
MDD mdd;
MDD meta;
} *rel_t;
static size_t vector_size; // size of vector
static int next_count; // number of partitions of the transition relation
static rel_t *next; // each partition of the transition relation
static int actionbits = 0;
static int has_actions = 0;
#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
/* Load a set from file */
#define set_load(f) CALL(set_load, f)
TASK_1(set_t, set_load, FILE*, f)
{
lddmc_serialize_fromfile(f);
size_t mdd;
size_t proj;
int size;
if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
set_t set = (set_t)malloc(sizeof(struct set));
set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj));
return set;
}
/* Load a relation from file */
#define rel_load(f) CALL(rel_load, f)
TASK_1(rel_t, rel_load, FILE*, f)
{
lddmc_serialize_fromfile(f);
size_t mdd;
size_t meta;
if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
rel_t rel = (rel_t)malloc(sizeof(struct relation));
rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta));
return rel;
}
/**
* Compute the highest value for each variable level.
* This method is called for the set of reachable states.
*/
static uint64_t compute_highest_id;
#define compute_highest(dd, arr) CALL(compute_highest, dd, arr)
VOID_TASK_2(compute_highest, MDD, dd, uint32_t*, arr)
{
if (dd == lddmc_true || dd == lddmc_false) return;
uint64_t result = 1;
if (cache_get3(compute_highest_id, dd, 0, 0, &result)) return;
cache_put3(compute_highest_id, dd, 0, 0, result);
mddnode_t n = LDD_GETNODE(dd);
SPAWN(compute_highest, mddnode_getright(n), arr);
CALL(compute_highest, mddnode_getdown(n), arr+1);
SYNC(compute_highest);
if (!mddnode_getcopy(n)) {
const uint32_t v = mddnode_getvalue(n);
while (1) {
const uint32_t cur = *(volatile uint32_t*)arr;
if (v <= cur) break;
if (__sync_bool_compare_and_swap(arr, cur, v)) break;
}
}
}
/**
* Compute the highest value for the action label.
* This method is called for each transition relation.
*/
static uint64_t compute_highest_action_id;
#define compute_highest_action(dd, meta, arr) CALL(compute_highest_action, dd, meta, arr)
VOID_TASK_3(compute_highest_action, MDD, dd, MDD, meta, uint32_t*, target)
{
if (dd == lddmc_true || dd == lddmc_false) return;
if (meta == lddmc_true) return;
uint64_t result = 1;
if (cache_get3(compute_highest_action_id, dd, meta, 0, &result)) return;
cache_put3(compute_highest_action_id, dd, meta, 0, result);
/* meta:
* 0 is skip
* 1 is read
* 2 is write
* 3 is only-read
* 4 is only-write
* 5 is action label (at end, before -1)
* -1 is end
*/
const mddnode_t n = LDD_GETNODE(dd);
const mddnode_t nmeta = LDD_GETNODE(meta);
const uint32_t vmeta = mddnode_getvalue(nmeta);
if (vmeta == (uint32_t)-1) return;
SPAWN(compute_highest_action, mddnode_getright(n), meta, target);
CALL(compute_highest_action, mddnode_getdown(n), mddnode_getdown(nmeta), target);
SYNC(compute_highest_action);
if (vmeta == 5) {
has_actions = 1;
const uint32_t v = mddnode_getvalue(n);
while (1) {
const uint32_t cur = *(volatile uint32_t*)target;
if (v <= cur) break;
if (__sync_bool_compare_and_swap(target, cur, v)) break;
}
}
}
/**
* Compute the BDD equivalent of the LDD of a set of states.
*/
static uint64_t bdd_from_ldd_id;
#define bdd_from_ldd(dd, bits, firstvar) CALL(bdd_from_ldd, dd, bits, firstvar)
TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
{
/* simple for leaves */
if (dd == lddmc_false) return mtbdd_false;
if (dd == lddmc_true) return mtbdd_true;
MTBDD result;
/* get from cache */
/* note: some assumptions about the encoding... */
if (cache_get3(bdd_from_ldd_id, dd, bits_mdd, firstvar, &result)) return result;
mddnode_t n = LDD_GETNODE(dd);
mddnode_t nbits = LDD_GETNODE(bits_mdd);
int bits = (int)mddnode_getvalue(nbits);
/* spawn right, same bits_mdd and firstvar */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd, mddnode_getright(n), bits_mdd, firstvar));
/* call down, with next bits_mdd and firstvar */
MTBDD down = CALL(bdd_from_ldd, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits);
/* encode current value */
uint32_t val = mddnode_getvalue(n);
for (int i=0; i<bits; i++) {
/* encode with high bit first */
int bit = bits-i-1;
if (val & (1LL<<i)) down = mtbdd_makenode(firstvar + 2*bit, mtbdd_false, down);
else down = mtbdd_makenode(firstvar + 2*bit, down, mtbdd_false);
}
/* sync right */
mtbdd_refs_push(down);
MTBDD right = mtbdd_refs_sync(SYNC(bdd_from_ldd));
/* take union of current and right */
mtbdd_refs_push(right);
result = sylvan_or(down, right);
mtbdd_refs_pop(2);
/* put in cache */
cache_put3(bdd_from_ldd_id, dd, bits_mdd, firstvar, result);
return result;
}
/**
* Compute the BDD equivalent of an LDD transition relation.
*/
static uint64_t bdd_from_ldd_rel_id;
#define bdd_from_ldd_rel(dd, bits, firstvar, meta) CALL(bdd_from_ldd_rel, dd, bits, firstvar, meta)
TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD, meta)
{
if (dd == lddmc_false) return mtbdd_false;
if (dd == lddmc_true) return mtbdd_true;
assert(meta != lddmc_false && meta != lddmc_true);
/* meta:
* -1 is end
* 0 is skip
* 1 is read
* 2 is write
* 3 is only-read
* 4 is only-write
*/
MTBDD result;
/* note: assumptions */
if (cache_get4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, &result)) return result;
const mddnode_t n = LDD_GETNODE(dd);
const mddnode_t nmeta = LDD_GETNODE(meta);
const mddnode_t nbits = LDD_GETNODE(bits_mdd);
const int bits = (int)mddnode_getvalue(nbits);
const uint32_t vmeta = mddnode_getvalue(nmeta);
assert(vmeta != (uint32_t)-1);
if (vmeta == 0) {
/* skip level */
result = bdd_from_ldd_rel(dd, mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
} else if (vmeta == 1) {
/* read level */
assert(!mddnode_getcopy(n)); // do not process read copy nodes for now
assert(mddnode_getright(n) != mtbdd_true);
/* spawn right */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
/* compute down with same bits / firstvar */
MTBDD down = bdd_from_ldd_rel(mddnode_getdown(n), bits_mdd, firstvar, mddnode_getdown(nmeta));
mtbdd_refs_push(down);
/* encode read value */
uint32_t val = mddnode_getvalue(n);
MTBDD part = mtbdd_true;
for (int i=0; i<bits; i++) {
/* encode with high bit first */
int bit = bits-i-1;
if (val & (1LL<<i)) part = mtbdd_makenode(firstvar + 2*bit, mtbdd_false, part);
else part = mtbdd_makenode(firstvar + 2*bit, part, mtbdd_false);
}
/* intersect read value with down result */
mtbdd_refs_push(part);
down = sylvan_and(part, down);
mtbdd_refs_pop(2);
/* sync right */
mtbdd_refs_push(down);
MTBDD right = mtbdd_refs_sync(SYNC(bdd_from_ldd_rel));
/* take union of current and right */
mtbdd_refs_push(right);
result = sylvan_or(down, right);
mtbdd_refs_pop(2);
} else if (vmeta == 2 || vmeta == 4) {
/* write or only-write level */
/* spawn right */
assert(mddnode_getright(n) != mtbdd_true);
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
/* get recursive result */
MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
if (mddnode_getcopy(n)) {
/* encode a copy node */
for (int i=0; i<bits; i++) {
int bit = bits-i-1;
MTBDD low = mtbdd_makenode(firstvar + 2*bit + 1, down, mtbdd_false);
mtbdd_refs_push(low);
MTBDD high = mtbdd_makenode(firstvar + 2*bit + 1, mtbdd_false, down);
mtbdd_refs_pop(1);
down = mtbdd_makenode(firstvar + 2*bit, low, high);
}
} else {
/* encode written value */
uint32_t val = mddnode_getvalue(n);
for (int i=0; i<bits; i++) {
/* encode with high bit first */
int bit = bits-i-1;
if (val & (1LL<<i)) down = mtbdd_makenode(firstvar + 2*bit + 1, mtbdd_false, down);
else down = mtbdd_makenode(firstvar + 2*bit + 1, down, mtbdd_false);
}
}
/* sync right */
mtbdd_refs_push(down);
MTBDD right = mtbdd_refs_sync(SYNC(bdd_from_ldd_rel));
/* take union of current and right */
mtbdd_refs_push(right);
result = sylvan_or(down, right);
mtbdd_refs_pop(2);
} else if (vmeta == 3) {
/* only-read level */
assert(!mddnode_getcopy(n)); // do not process read copy nodes
/* spawn right */
mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
/* get recursive result */
MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
/* encode read value */
uint32_t val = mddnode_getvalue(n);
for (int i=0; i<bits; i++) {
/* encode with high bit first */
int bit = bits-i-1;
/* only-read, so write same value */
if (val & (1LL<<i)) down = mtbdd_makenode(firstvar + 2*bit + 1, mtbdd_false, down);
else down = mtbdd_makenode(firstvar + 2*bit + 1, down, mtbdd_false);
if (val & (1LL<<i)) down = mtbdd_makenode(firstvar + 2*bit, mtbdd_false, down);
else down = mtbdd_makenode(firstvar + 2*bit, down, mtbdd_false);
}
/* sync right */
mtbdd_refs_push(down);
MTBDD right = mtbdd_refs_sync(SYNC(bdd_from_ldd_rel));
/* take union of current and right */
mtbdd_refs_push(right);
result = sylvan_or(down, right);
mtbdd_refs_pop(2);
} else if (vmeta == 5) {
assert(!mddnode_getcopy(n)); // not allowed!
/* we assume this is the last value */
result = mtbdd_true;
/* encode action value */
uint32_t val = mddnode_getvalue(n);
for (int i=0; i<actionbits; i++) {
/* encode with high bit first */
int bit = actionbits-i-1;
/* only-read, so write same value */
if (val & (1LL<<i)) result = mtbdd_makenode(1000000 + bit, mtbdd_false, result);
else result = mtbdd_makenode(1000000 + bit, result, mtbdd_false);
}
} else {
assert(vmeta <= 5);
}
cache_put4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, result);
return result;
}
/**
* Compute the BDD equivalent of the meta variable (to a variables cube)
*/
MTBDD
meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
{
if (meta == lddmc_false || meta == lddmc_true) return mtbdd_true;
/* meta:
* -1 is end
* 0 is skip (no variables)
* 1 is read (variables added by write)
* 2 is write
* 3 is only-read
* 4 is only-write
*/
const mddnode_t nmeta = LDD_GETNODE(meta);
const uint32_t vmeta = mddnode_getvalue(nmeta);
if (vmeta == (uint32_t)-1) return mtbdd_true;
if (vmeta == 1) {
/* return recursive result, don't go down on bits */
return meta_to_bdd(mddnode_getdown(nmeta), bits_mdd, firstvar);
}
const mddnode_t nbits = LDD_GETNODE(bits_mdd);
const int bits = (int)mddnode_getvalue(nbits);
/* compute recursive result */
MTBDD res = meta_to_bdd(mddnode_getdown(nmeta), mddnode_getdown(nbits), firstvar + 2*bits);
/* add our variables if meta is 2,3,4 */
if (vmeta != 0 && vmeta != 5) {
for (int i=0; i<bits; i++) {
res = mtbdd_makenode(firstvar + 2*(bits-i-1) + 1, mtbdd_false, res);
res = mtbdd_makenode(firstvar + 2*(bits-i-1), mtbdd_false, res);
}
}
return res;
}
static char*
to_h(double size, char *buf)
{
const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
int i = 0;
for (;size>1024;size/=1024) i++;
sprintf(buf, "%.*f %s", i, size, units[i]);
return buf;
}
VOID_TASK_0(gc_start)
{
printf("Starting garbage collection\n");
}
VOID_TASK_0(gc_end)
{
printf("Garbage collection done\n");
}
int
main(int argc, char **argv)
{
argp_parse(&argp, argc, argv, 0, 0, 0);
// Parse table sizes
int tablesize, maxtablesize, cachesize, maxcachesize;
if (sscanf(sizes, "%d,%d,%d,%d", &tablesize, &maxtablesize, &cachesize, &maxcachesize) != 4) {
Abort("Invalid string for --table-sizes, try e.g. --table-sizes=23,28,22,27");
}
if (tablesize < 10 || maxtablesize < 10 || cachesize < 10 || maxcachesize < 10 ||
tablesize > 40 || maxtablesize > 40 || cachesize > 40 || maxcachesize > 40) {
Abort("Invalid string for --table-sizes, must be between 10 and 40");
}
if (tablesize > maxtablesize) {
Abort("Invalid string for --table-sizes, tablesize is larger than maxtablesize");
}
if (cachesize > maxcachesize) {
Abort("Invalid string for --table-sizes, cachesize is larger than maxcachesize");
}
// Report table sizes
char buf[32];
to_h((1ULL<<maxtablesize)*24+(1ULL<<maxcachesize)*36, buf);
printf("Sylvan allocates %s virtual memory for nodes table and operation cache.\n", buf);
to_h((1ULL<<tablesize)*24+(1ULL<<cachesize)*36, buf);
printf("Initial nodes table and operation cache requires %s.\n", buf);
// Init Lace
lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
LACE_ME;
// Init Sylvan
sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_init_package();
sylvan_init_ldd();
sylvan_init_mtbdd();
sylvan_gc_hook_pregc(TASK(gc_start));
sylvan_gc_hook_postgc(TASK(gc_end));
// Obtain operation ids for the operation cache
compute_highest_id = cache_next_opid();
compute_highest_action_id = cache_next_opid();
bdd_from_ldd_id = cache_next_opid();
bdd_from_ldd_rel_id = cache_next_opid();
// Open file
FILE *f = fopen(model_filename, "r");
if (f == NULL) Abort("Cannot open file '%s'!\n", model_filename);
// Read integers per vector
if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
// Read initial state
if (verbose) {
printf("Loading initial state... ");
fflush(stdout);
}
set_t initial = set_load(f);
if (verbose) printf("done.\n");
// Read number of transitions
if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
next = (rel_t*)malloc(sizeof(rel_t) * next_count);
// Read transitions
if (verbose) {
printf("Loading transition relations... ");
fflush(stdout);
}
int i;
for (i=0; i<next_count; i++) {
next[i] = rel_load(f);
if (verbose) {
printf("%d, ", i);
fflush(stdout);
}
}
if (verbose) printf("done.\n");
// Read whether reachable states are stored
int has_reachable = 0;
if (fread(&has_reachable, sizeof(int), 1, f) != 1) Abort("Input file missing reachable states!\n");
if (has_reachable == 0) Abort("Input file missing reachable states!\n");
// Read reachable states
if (verbose) {
printf("Loading reachable states... ");
fflush(stdout);
}
set_t states = set_load(f);
if (verbose) printf("done.\n");
// Read number of action labels
int action_labels_count = 0;
if (fread(&action_labels_count, sizeof(int), 1, f) != 1) Abort("Input file missing action label count!\n");
// Read action labels
char *action_labels[action_labels_count];
for (int i=0; i<action_labels_count; i++) {
uint32_t len;
if (fread(&len, sizeof(uint32_t), 1, f) != 1) Abort("Invalid input file!\n");
action_labels[i] = (char*)malloc(sizeof(char[len+1]));
if (fread(action_labels[i], sizeof(char), len, f) != len) Abort("Invalid input file!\n");
action_labels[i][len] = 0;
}
// Close file
fclose(f);
// Report that we have read the input file
printf("Read file %s.\n", argv[1]);
// Report statistics
if (verbose) {
printf("%zu integers per state, %d transition groups\n", vector_size, next_count);
printf("LDD nodes:\n");
printf("Initial states: %zu LDD nodes\n", lddmc_nodecount(initial->mdd));
for (i=0; i<next_count; i++) {
printf("Transition %d: %zu LDD nodes\n", i, lddmc_nodecount(next[i]->mdd));
}
}
// Report that we prepare BDD conversion
if (verbose) printf("Preparing conversion to BDD...\n");
// Compute highest value at each level (from reachable states)
uint32_t highest[vector_size];
for (size_t i=0; i<vector_size; i++) highest[i] = 0;
compute_highest(states->mdd, highest);
// Compute highest action label value (from transition relations)
uint32_t highest_action = 0;
for (int i=0; i<next_count; i++) {
compute_highest_action(next[i]->mdd, next[i]->meta, &highest_action);
}
// Report highest integers
/*
printf("Highest integer per level: ");
for (size_t i=0; i<vector_size; i++) {
if (i>0) printf(", ");
printf("%u", highest[i]);
}
printf("\n");
*/
// Compute number of bits for each level
int bits[vector_size];
for (size_t i=0; i<vector_size; i++) {
bits[i] = 0;
while (highest[i] != 0) {
bits[i]++;
highest[i]>>=1;
}
if (bits[i] == 0) bits[i] = 1;
}
// Compute number of bits for action label
actionbits = 0;
while (highest_action != 0) {
actionbits++;
highest_action>>=1;
}
if (actionbits == 0 && has_actions) actionbits = 1;
// Report number of bits
if (verbose) {
printf("Bits per level: ");
for (size_t i=0; i<vector_size; i++) {
if (i>0) printf(", ");
printf("%d", bits[i]);
}
printf("\n");
printf("Action bits: %d.\n", actionbits);
}
// Compute bits MDD
MDD bits_mdd = lddmc_true;
for (size_t i=0; i<vector_size; i++) {
bits_mdd = lddmc_makenode(bits[vector_size-i-1], bits_mdd, lddmc_false);
}
lddmc_ref(bits_mdd);
// Compute total number of bits
int totalbits = 0;
for (size_t i=0; i<vector_size; i++) {
totalbits += bits[i];
}
// Compute state variables
MTBDD state_vars = mtbdd_true;
for (int i=0; i<totalbits; i++) {
state_vars = mtbdd_makenode(2*(totalbits-i-1), mtbdd_false, state_vars);
}
mtbdd_protect(&state_vars);
// Report that we begin the actual conversion
if (verbose) printf("Converting to BDD...\n");
// Create BDD file
f = fopen(bdd_filename, "w");
if (f == NULL) Abort("Cannot open file '%s'!\n", bdd_filename);
// Write domain...
int vector_size = 1;
fwrite(&totalbits, sizeof(int), 1, f); // use number of bits as vector size
fwrite(&vector_size, sizeof(int), 1, f); // set each to 1
fwrite(&actionbits, sizeof(int), 1, f);
// Write initial state...
MTBDD new_initial = bdd_from_ldd(initial->mdd, bits_mdd, 0);
assert((size_t)mtbdd_satcount(new_initial, totalbits) == (size_t)lddmc_satcount_cached(initial->mdd));
mtbdd_refs_push(new_initial);
{
size_t a = sylvan_serialize_add(new_initial);
size_t b = sylvan_serialize_add(state_vars);
size_t s = totalbits;
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&s, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
}
// Custom operation that converts to BDD given number of bits for each level
MTBDD new_states = bdd_from_ldd(states->mdd, bits_mdd, 0);
assert((size_t)mtbdd_satcount(new_states, totalbits) == (size_t)lddmc_satcount_cached(states->mdd));
mtbdd_refs_push(new_states);
// Report size of BDD
if (verbose) {
printf("Initial states: %zu BDD nodes\n", mtbdd_nodecount(new_initial));
printf("Reachable states: %zu BDD nodes\n", mtbdd_nodecount(new_states));
}
// Write number of transitions
fwrite(&next_count, sizeof(int), 1, f);
// Write transitions
for (int i=0; i<next_count; i++) {
// Compute new transition relation
MTBDD new_rel = bdd_from_ldd_rel(next[i]->mdd, bits_mdd, 0, next[i]->meta);
mtbdd_refs_push(new_rel);
// Compute new <variables> for the current transition relation
MTBDD new_vars = meta_to_bdd(next[i]->meta, bits_mdd, 0);
mtbdd_refs_push(new_vars);
if (check_results) {
// Test if the transition is correctly converted
MTBDD test = sylvan_relnext(new_states, new_rel, new_vars);
mtbdd_refs_push(test);
MDD succ = lddmc_relprod(states->mdd, next[i]->mdd, next[i]->meta);
lddmc_refs_push(succ);
MTBDD test2 = bdd_from_ldd(succ, bits_mdd, 0);
if (test != test2) Abort("Conversion error!\n");
mtbdd_refs_pop(1);
lddmc_refs_pop(1);
}
// Report number of nodes
if (verbose) printf("Transition %d: %zu BDD nodes\n", i, mtbdd_nodecount(new_rel));
size_t a = sylvan_serialize_add(new_rel);
size_t b = sylvan_serialize_add(new_vars);
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
}
// Write reachable states
has_reachable = 1;
fwrite(&has_reachable, sizeof(int), 1, f);
{
size_t a = sylvan_serialize_add(new_states);
size_t b = sylvan_serialize_add(state_vars);
size_t s = totalbits;
sylvan_serialize_tofile(f);
fwrite(&a, sizeof(size_t), 1, f);
fwrite(&s, sizeof(size_t), 1, f);
fwrite(&b, sizeof(size_t), 1, f);
}
// Write action labels
fwrite(&action_labels_count, sizeof(int), 1, f);
for (int i=0; i<action_labels_count; i++) {
uint32_t len = strlen(action_labels[i]);
fwrite(&len, sizeof(uint32_t), 1, f);
fwrite(action_labels[i], sizeof(char), len, f);
}
// Close the file
fclose(f);
// Report to the user
printf("Written file %s.\n", bdd_filename);
// Report Sylvan statistics (if SYLVAN_STATS is set)
if (verbose) sylvan_stats_report(stdout);
return 0;
}

83
resources/3rdparty/sylvan/examples/lddmc.c

@ -12,7 +12,7 @@
#include <getrss.h>
#include <sylvan.h>
#include <llmsset.h>
#include <sylvan_table.h>
/* Configuration */
static int report_levels = 0; // report states at start of every level
@ -22,6 +22,7 @@ static int check_deadlocks = 0; // set to 1 to check for deadlocks
static int print_transition_matrix = 1; // print transition relation matrix
static int workers = 0; // autodetect
static char* model_filename = NULL; // filename of model
static char* out_filename = NULL; // filename of output BDD
#ifdef HAVE_PROFILER
static char* profile_filename = NULL; // filename for profiling
#endif
@ -39,6 +40,7 @@ static struct argp_option options[] =
{"count-table", 2, 0, 0, "Report table usage at each level", 1},
{0, 0, 0, 0, 0, 0}
};
static error_t
parse_opt(int key, char *arg, struct argp_state *state)
{
@ -67,8 +69,9 @@ parse_opt(int key, char *arg, struct argp_state *state)
break;
#endif
case ARGP_KEY_ARG:
if (state->arg_num >= 1) argp_usage(state);
model_filename = arg;
if (state->arg_num == 0) model_filename = arg;
if (state->arg_num == 1) out_filename = arg;
if (state->arg_num >= 2) argp_usage(state);
break;
case ARGP_KEY_END:
if (state->arg_num < 1) argp_usage(state);
@ -78,7 +81,8 @@ parse_opt(int key, char *arg, struct argp_state *state)
}
return 0;
}
static struct argp argp = { options, parse_opt, "<model>", 0, 0, 0, 0 };
static struct argp argp = { options, parse_opt, "<model> [<output-bdd>]", 0, 0, 0, 0 };
/* Globals */
typedef struct set
@ -125,6 +129,38 @@ set_load(FILE* f)
return set;
}
/* Save a set to file */
static void
set_save(FILE* f, set_t set)
{
size_t mdd = lddmc_serialize_add(set->mdd);
size_t proj = lddmc_serialize_add(set->proj);
lddmc_serialize_tofile(f);
fwrite(&mdd, sizeof(size_t), 1, f);
fwrite(&proj, sizeof(size_t), 1, f);
fwrite(&set->size, sizeof(int), 1, f);;
}
static void
rel_save(FILE* f, rel_t rel)
{
size_t mdd = lddmc_serialize_add(rel->mdd);
size_t meta = lddmc_serialize_add(rel->meta);
lddmc_serialize_tofile(f);
fwrite(&mdd, sizeof(size_t), 1, f);
fwrite(&meta, sizeof(size_t), 1, f);
}
static set_t
set_clone(set_t source)
{
set_t set = (set_t)malloc(sizeof(struct set));
set->mdd = lddmc_ref(source->mdd);
set->proj = lddmc_ref(source->proj);
set->size = source->size;
return set;
}
static int
calculate_size(MDD meta)
{
@ -424,8 +460,10 @@ main(int argc, char **argv)
// Nodes table size: 24 bytes * 2**N_nodes
// Cache table size: 36 bytes * 2**N_cache
// With: N_nodes=25, N_cache=24: 1.3 GB memory
sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_init_package();
sylvan_init_ldd();
sylvan_init_mtbdd();
// Read and report domain info (integers per vector and bits per integer)
if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
@ -435,7 +473,8 @@ main(int argc, char **argv)
// Read initial state
printf("Loading initial state... ");
fflush(stdout);
set_t states = set_load(f);
set_t initial = set_load(f);
set_t states = set_clone(initial);
printf("done.\n");
// Read transitions
@ -493,7 +532,37 @@ main(int argc, char **argv)
printf("Final states: %zu states\n", (size_t)lddmc_satcount_cached(states->mdd));
printf("Final states: %zu MDD nodes\n", lddmc_nodecount(states->mdd));
sylvan_stats_report(stdout, 1);
if (out_filename != NULL) {
printf("Writing to %s.\n", out_filename);
// Create LDD file
FILE *f = fopen(out_filename, "w");
lddmc_serialize_reset();
// Write domain...
fwrite(&vector_size, sizeof(size_t), 1, f);
// Write initial state...
set_save(f, initial);
// Write number of transitions
fwrite(&next_count, sizeof(int), 1, f);
// Write transitions
for (int i=0; i<next_count; i++) {
rel_save(f, next[i]);
}
// Write reachable states
int has_reachable = 1;
fwrite(&has_reachable, sizeof(int), 1, f);
set_save(f, states);
// Write action labels
fclose(f);
}
sylvan_stats_report(stdout);
return 0;
}

23
resources/3rdparty/sylvan/examples/mc.c

@ -11,7 +11,7 @@
#endif
#include <sylvan.h>
#include <llmsset.h>
#include <sylvan_table.h>
/* Configuration */
static int report_levels = 0; // report states at end of every level
@ -68,6 +68,9 @@ parse_opt(int key, char *arg, struct argp_state *state)
case 2:
report_table = 1;
break;
case 5:
report_nodes = 1;
break;
case 6:
merge_relations = 1;
break;
@ -420,7 +423,7 @@ TASK_2(BDD, extend_relation, BDD, relation, BDDSET, variables)
for (int i=0; i<statebits; i++) has[i] = 0;
BDDSET s = variables;
while (!sylvan_set_isempty(s)) {
BDDVAR v = sylvan_set_var(s);
BDDVAR v = sylvan_set_first(s);
if (v/2 >= (unsigned)statebits) break; // action labels
has[v/2] = 1;
s = sylvan_set_next(s);
@ -468,12 +471,12 @@ print_matrix(BDD vars)
fprintf(stdout, "-");
} else {
BDDVAR next_s = 2*((i+1)*bits_per_integer);
if (sylvan_set_var(vars) < next_s) {
if (sylvan_set_first(vars) < next_s) {
fprintf(stdout, "+");
for (;;) {
vars = sylvan_set_next(vars);
if (sylvan_set_isempty(vars)) break;
if (sylvan_set_var(vars) >= next_s) break;
if (sylvan_set_first(vars) >= next_s) break;
}
} else {
fprintf(stdout, "-");
@ -515,10 +518,12 @@ main(int argc, char **argv)
// Nodes table size: 24 bytes * 2**N_nodes
// Cache table size: 36 bytes * 2**N_cache
// With: N_nodes=25, N_cache=24: 1.3 GB memory
sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_init_bdd(6); // granularity 6 is decent default value - 1 means "use cache for every operation"
sylvan_gc_add_mark(0, TASK(gc_start));
sylvan_gc_add_mark(40, TASK(gc_end));
sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
sylvan_init_package();
sylvan_set_granularity(6); // granularity 6 is decent default value - 1 means "use cache for every operation"
sylvan_init_bdd();
sylvan_gc_hook_pregc(TASK(gc_start));
sylvan_gc_hook_postgc(TASK(gc_end));
/* Load domain information */
if ((fread(&vector_size, sizeof(int), 1, f) != 1) ||
@ -610,7 +615,7 @@ main(int argc, char **argv)
INFO("Final states: %'zu BDD nodes\n", sylvan_nodecount(states->bdd));
}
sylvan_stats_report(stdout, 1);
sylvan_stats_report(stdout);
return 0;
}

328
resources/3rdparty/sylvan/examples/nqueens.c

@ -0,0 +1,328 @@
/**
* N-queens example.
* Based on work by Robert Meolic, released by him into the public domain.
*/
#include <argp.h>
#include <inttypes.h>
#include <locale.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#ifdef HAVE_PROFILER
#include <gperftools/profiler.h>
#endif
#include <sylvan.h>
#include <sylvan_table.h>
/* Configuration */
static int report_minterms = 0; // report minterms at every major step
static int report_minor = 0; // report minor steps
static int report_stats = 0; // report stats at end
static int workers = 0; // autodetect number of workers by default
static size_t size = 0; // will be set by caller
#ifdef HAVE_PROFILER
static char* profile_filename = NULL; // filename for profiling
#endif
/* argp configuration */
static struct argp_option options[] =
{
{"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
#ifdef HAVE_PROFILER
{"profiler", 'p', "<filename>", 0, "Filename for profiling", 0},
#endif
{"report-minterms", 1, 0, 0, "Report #minterms at every major step", 1},
{"report-minor", 2, 0, 0, "Report minor steps", 1},
{"report-stats", 3, 0, 0, "Report statistics at end", 1},
{0, 0, 0, 0, 0, 0}
};
static error_t
parse_opt(int key, char *arg, struct argp_state *state)
{
switch (key) {
case 'w':
workers = atoi(arg);
break;
case 1:
report_minterms = 1;
break;
case 2:
report_minor = 1;
break;
case 3:
report_stats = 1;
break;
#ifdef HAVE_PROFILER
case 'p':
profile_filename = arg;
break;
#endif
case ARGP_KEY_ARG:
if (state->arg_num >= 1) argp_usage(state);
size = atoi(arg);
break;
case ARGP_KEY_END:
if (state->arg_num < 1) argp_usage(state);
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
static struct argp argp = { options, parse_opt, "<size>", 0, 0, 0, 0 };
/* Obtain current wallclock time */
static double
wctime()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec + 1E-6 * tv.tv_usec);
}
static double t_start;
#define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__)
#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
VOID_TASK_0(gc_start)
{
if (report_minor) {
printf("\n");
}
INFO("(GC) Starting garbage collection...\n");
}
VOID_TASK_0(gc_end)
{
INFO("(GC) Garbage collection done.\n");
}
int
main(int argc, char** argv)
{
argp_parse(&argp, argc, argv, 0, 0, 0);
setlocale(LC_NUMERIC, "en_US.utf-8");
t_start = wctime();
// Init Lace
lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
// Lace is initialized, now set local variables
LACE_ME;
// Init Sylvan
// Nodes table size of 1LL<<20 is 1048576 entries
// Cache size of 1LL<<18 is 262144 entries
// Nodes table size: 24 bytes * nodes
// Cache table size: 36 bytes * cache entries
// With 2^20 nodes and 2^18 cache entries, that's 33 MB
// With 2^24 nodes and 2^22 cache entries, that's 528 MB
sylvan_set_sizes(1LL<<20, 1LL<<24, 1LL<<18, 1LL<<22);
sylvan_init_package();
sylvan_set_granularity(3); // granularity 3 is decent value for this small problem - 1 means "use cache for every operation"
sylvan_init_bdd();
// Before and after garbage collection, call gc_start and gc_end
sylvan_gc_hook_pregc(TASK(gc_start));
sylvan_gc_hook_postgc(TASK(gc_end));
#ifdef HAVE_PROFILER
if (profile_filename != NULL) ProfilerStart(profile_filename);
#endif
double t1 = wctime();
BDD zero = sylvan_false;
BDD one = sylvan_true;
// Variables 0 ... (SIZE*SIZE-1)
BDD board[size*size];
for (size_t i=0; i<size*size; i++) {
board[i] = sylvan_ithvar(i);
sylvan_protect(board+i);
}
BDD res = one, temp = one;
// we use sylvan's "protect" marking mechanism...
// that means we hardly need to do manual ref/deref when the variables change
sylvan_protect(&res);
sylvan_protect(&temp);
// Old satcount function still requires a silly variables cube
BDD vars = one;
sylvan_protect(&vars);
for (size_t i=0; i<size*size; i++) vars = sylvan_and(vars, board[i]);
INFO("Initialisation complete!\n");
if (report_minor) {
INFO("Encoding rows... ");
} else {
INFO("Encoding rows...\n");
}
for (size_t i=0; i<size; i++) {
if (report_minor) {
printf("%zu... ", i);
fflush(stdout);
}
for (size_t j=0; j<size; j++) {
// compute "\BigAnd (!board[i][k]) \or !board[i][j]" with k != j
temp = one;
for (size_t k=0; k<size; k++) {
if (j==k) continue;
temp = sylvan_and(temp, sylvan_not(board[i*size+k]));
}
temp = sylvan_or(temp, sylvan_not(board[i*size+j]));
// add cube to "res"
res = sylvan_and(res, temp);
}
}
if (report_minor) {
printf("\n");
}
if (report_minterms) {
INFO("We have %.0f minterms\n", sylvan_satcount(res, vars));
}
if (report_minor) {
INFO("Encoding columns... ");
} else {
INFO("Encoding columns...\n");
}
for (size_t j=0; j<size; j++) {
if (report_minor) {
printf("%zu... ", j);
fflush(stdout);
}
for (size_t i=0; i<size; i++) {
// compute "\BigAnd (!board[k][j]) \or !board[i][j]" with k != i
temp = one;
for (size_t k=0; k<size; k++) {
if (i==k) continue;
temp = sylvan_and(temp, sylvan_not(board[k*size+j]));
}
temp = sylvan_or(temp, sylvan_not(board[i*size+j]));
// add cube to "res"
res = sylvan_and(res, temp);
}
}
if (report_minor) {
printf("\n");
}
if (report_minterms) {
INFO("We have %.0f minterms\n", sylvan_satcount(res, vars));
}
if (report_minor) {
INFO("Encoding rising diagonals... ");
} else {
INFO("Encoding rising diagonals...\n");
}
for (size_t i=0; i<size; i++) {
if (report_minor) {
printf("%zu... ", i);
fflush(stdout);
}
for (size_t j=0; j<size; j++) {
temp = one;
for (size_t k=0; k<size; k++) {
// if (j+k-i >= 0 && j+k-i < size && k != i)
if (j+k >= i && j+k < size+i && k != i) {
temp = sylvan_and(temp, sylvan_not(board[k*size + (j+k-i)]));
}
}
temp = sylvan_or(temp, sylvan_not(board[i*size+j]));
// add cube to "res"
res = sylvan_and(res, temp);
}
}
if (report_minor) {
printf("\n");
}
if (report_minterms) {
INFO("We have %.0f minterms\n", sylvan_satcount(res, vars));
}
if (report_minor) {
INFO("Encoding falling diagonals... ");
} else {
INFO("Encoding falling diagonals...\n");
}
for (size_t i=0; i<size; i++) {
if (report_minor) {
printf("%zu... ", i);
fflush(stdout);
}
for (size_t j=0; j<size; j++) {
temp = one;
for (size_t k=0; k<size; k++) {
// if (j+i-k >= 0 && j+i-k < size && k != i)
if (j+i >= k && j+i < size+k && k != i) {
temp = sylvan_and(temp, sylvan_not(board[k*size + (j+i-k)]));
}
}
temp = sylvan_or(temp, sylvan_not(board[i*size + j]));
// add cube to "res"
res = sylvan_and(res, temp);
}
}
if (report_minor) {
printf("\n");
}
if (report_minterms) {
INFO("We have %.0f minterms\n", sylvan_satcount(res, vars));
}
if (report_minor) {
INFO("Final computation to place a queen on every row... ");
} else {
INFO("Final computation to place a queen on every row...\n");
}
for (size_t i=0; i<size; i++) {
if (report_minor) {
printf("%zu... ", i);
fflush(stdout);
}
temp = zero;
for (size_t j=0; j<size; j++) {
temp = sylvan_or(temp, board[i*size+j]);
}
res = sylvan_and(res, temp);
}
if (report_minor) {
printf("\n");
}
double t2 = wctime();
#ifdef HAVE_PROFILER
if (profile_filename != NULL) ProfilerStop();
#endif
INFO("Result: NQueens(%zu) has %.0f solutions.\n", size, sylvan_satcount(res, vars));
INFO("Result BDD has %zu nodes.\n", sylvan_nodecount(res));
INFO("Computation time: %f sec.\n", t2-t1);
if (report_stats) {
sylvan_stats_report(stdout);
}
sylvan_quit();
lace_exit();
}

7
resources/3rdparty/sylvan/examples/simple.cpp

@ -81,17 +81,18 @@ VOID_TASK_1(_main, void*, arg)
// - 1<<25 cache: 1152 MB
// - 1<<26 cache: 2304 MB
// - 1<<27 cache: 4608 MB
sylvan_init_package(1LL<<22, 1LL<<26, 1LL<<22, 1LL<<26);
sylvan_set_sizes(1LL<<22, 1LL<<26, 1LL<<22, 1LL<<26);
sylvan_init_package();
// Initialize the BDD module with granularity 1 (cache every operation)
// A higher granularity (e.g. 6) often results in better performance in practice
sylvan_init_bdd(1);
sylvan_init_bdd();
// Now we can do some simple stuff using the C++ objects.
CALL(simple_cxx);
// Report statistics (if SYLVAN_STATS is 1 in the configuration)
sylvan_stats_report(stdout, 1);
sylvan_stats_report(stdout);
// And quit, freeing memory
sylvan_quit();

127
resources/3rdparty/sylvan/examples/storm.cpp

@ -1,127 +0,0 @@
#ifdef NDEBUG
#undef NDEBUG
#endif
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <sylvan.h>
#include <sylvan_obj.hpp>
#include <storm_function_wrapper.h>
#include <sylvan_storm_rational_function.h>
using namespace sylvan;
VOID_TASK_0(storm_rf)
{
Bdd one = Bdd::bddOne(); // the True terminal
Bdd zero = Bdd::bddZero(); // the False terminal
// check if they really are the True/False terminal
assert(one.GetBDD() == sylvan_true);
assert(zero.GetBDD() == sylvan_false);
Bdd a = Bdd::bddVar(0); // create a BDD variable x_0
Bdd b = Bdd::bddVar(1); // create a BDD variable x_1
// check if a really is the Boolean formula "x_0"
assert(!a.isConstant());
assert(a.TopVar() == 0);
assert(a.Then() == one);
assert(a.Else() == zero);
// check if b really is the Boolean formula "x_1"
assert(!b.isConstant());
assert(b.TopVar() == 1);
assert(b.Then() == one);
assert(b.Else() == zero);
// compute !a
Bdd not_a = !a;
// check if !!a is really a
assert((!not_a) == a);
// compute a * b and !(!a + !b) and check if they are equivalent
Bdd a_and_b = a * b;
Bdd not_not_a_or_not_b = !(!a + !b);
assert(a_and_b == not_not_a_or_not_b);
// perform some simple quantification and check the results
Bdd ex = a_and_b.ExistAbstract(a); // \exists a . a * b
assert(ex == b);
Bdd andabs = a.AndAbstract(b, a); // \exists a . a * b using AndAbstract
assert(ex == andabs);
Bdd univ = a_and_b.UnivAbstract(a); // \forall a . a * b
assert(univ == zero);
// alternative method to get the cube "ab" using bddCube
BddSet variables = a * b;
std::vector<unsigned char> vec = {1, 1};
assert(a_and_b == Bdd::bddCube(variables, vec));
// test the bddCube method for all combinations
assert((!a * !b) == Bdd::bddCube(variables, std::vector<uint8_t>({0, 0})));
assert((!a * b) == Bdd::bddCube(variables, std::vector<uint8_t>({0, 1})));
assert((!a) == Bdd::bddCube(variables, std::vector<uint8_t>({0, 2})));
assert((a * !b) == Bdd::bddCube(variables, std::vector<uint8_t>({1, 0})));
assert((a * b) == Bdd::bddCube(variables, std::vector<uint8_t>({1, 1})));
assert((a) == Bdd::bddCube(variables, std::vector<uint8_t>({1, 2})));
assert((!b) == Bdd::bddCube(variables, std::vector<uint8_t>({2, 0})));
assert((b) == Bdd::bddCube(variables, std::vector<uint8_t>({2, 1})));
assert(one == Bdd::bddCube(variables, std::vector<uint8_t>({2, 2})));
}
VOID_TASK_1(_main, void*, arg)
{
// Initialize Sylvan
// With starting size of the nodes table 1 << 21, and maximum size 1 << 27.
// With starting size of the cache table 1 << 20, and maximum size 1 << 20.
// Memory usage: 24 bytes per node, and 36 bytes per cache bucket
// - 1<<24 nodes: 384 MB
// - 1<<25 nodes: 768 MB
// - 1<<26 nodes: 1536 MB
// - 1<<27 nodes: 3072 MB
// - 1<<24 cache: 576 MB
// - 1<<25 cache: 1152 MB
// - 1<<26 cache: 2304 MB
// - 1<<27 cache: 4608 MB
sylvan_init_package(1LL<<22, 1LL<<26, 1LL<<22, 1LL<<26);
// Initialize the BDD module with granularity 1 (cache every operation)
// A higher granularity (e.g. 6) often results in better performance in practice
sylvan_init_bdd(1);
// Now we can do some simple stuff using the C++ objects.
CALL(storm_rf);
// Report statistics (if SYLVAN_STATS is 1 in the configuration)
sylvan_stats_report(stdout, 1);
// And quit, freeing memory
sylvan_quit();
// We didn't use arg
(void)arg;
}
int
main (int argc, char *argv[])
{
int n_workers = 0; // automatically detect number of workers
size_t deque_size = 0; // default value for the size of task deques for the workers
size_t program_stack_size = 0; // default value for the program stack of each pthread
// Initialize the Lace framework for <n_workers> workers.
lace_init(n_workers, deque_size);
// Spawn and start all worker pthreads; suspends current thread until done.
lace_startup(program_stack_size, TASK(_main), NULL);
// The lace_startup command also exits Lace after _main is completed.
return 0;
(void)argc; // unused variable
(void)argv; // unused variable
}

5
resources/3rdparty/sylvan/m4/.gitignore

@ -1,5 +0,0 @@
# Ignore everything in this directory
*
# Except:
!.gitignore
!m4_ax_check_compile_flag.m4

72
resources/3rdparty/sylvan/m4/m4_ax_check_compile_flag.m4

@ -1,72 +0,0 @@
# ===========================================================================
# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html
# ===========================================================================
#
# SYNOPSIS
#
# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS])
#
# DESCRIPTION
#
# Check whether the given FLAG works with the current language's compiler
# or gives an error. (Warnings, however, are ignored)
#
# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
# success/failure.
#
# If EXTRA-FLAGS is defined, it is added to the current language's default
# flags (e.g. CFLAGS) when the check is done. The check is thus made with
# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to
# force the compiler to issue an error when a bad flag is given.
#
# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this
# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG.
#
# LICENSE
#
# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As a special exception, the respective Autoconf Macro's copyright owner
# gives unlimited permission to copy, distribute and modify the configure
# scripts that are the output of Autoconf when processing the Macro. You
# need not follow the terms of the GNU General Public License when using
# or distributing such scripts, even though portions of the text of the
# Macro appear in them. The GNU General Public License (GPL) does govern
# all other use of the material that constitutes the Autoconf Macro.
#
# This special exception to the GPL applies to versions of the Autoconf
# Macro released by the Autoconf Archive. When you make and distribute a
# modified version of the Autoconf Macro, you may extend this special
# exception to the GPL to apply to your modified version as well.
#serial 2
AC_DEFUN([AX_CHECK_COMPILE_FLAG],
[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX
AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl
AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [
ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
_AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM()],
[AS_VAR_SET(CACHEVAR,[yes])],
[AS_VAR_SET(CACHEVAR,[no])])
_AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags])
AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes],
[m4_default([$2], :)],
[m4_default([$3], :)])
AS_VAR_POPDEF([CACHEVAR])dnl
])dnl AX_CHECK_COMPILE_FLAGS

0
resources/3rdparty/sylvan/models/at.5.8-rgs.bdd

0
resources/3rdparty/sylvan/models/at.6.8-rgs.bdd

0
resources/3rdparty/sylvan/models/at.7.8-rgs.bdd

0
resources/3rdparty/sylvan/models/blocks.2.ldd

0
resources/3rdparty/sylvan/models/blocks.4.ldd

0
resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd

0
resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd

0
resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd

0
resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd

111
resources/3rdparty/sylvan/src/CMakeLists.txt

@ -1,90 +1,79 @@
cmake_minimum_required(VERSION 2.6)
project(sylvan C CXX)
add_library(sylvan
avl.h
lace.h
set(SOURCES
lace.c
llmsset.c
llmsset.h
refs.h
refs.c
sha2.h
sha2.c
stats.h
stats.c
storm_function_wrapper.h
sylvan_bdd.c
sylvan_cache.c
sylvan_common.c
sylvan_gmp.c
sylvan_ldd.c
sylvan_mt.c
sylvan_mtbdd.c
sylvan_obj.cpp
sylvan_refs.c
sylvan_sl.c
sylvan_stats.c
sylvan_table.c
storm_function_wrapper.cpp
sylvan_storm_rational_function.c
)
set(HEADERS
lace.h
sylvan.h
sylvan_bdd.h
sylvan_bdd.c
sylvan_cache.h
sylvan_cache.c
sylvan_config.h
sylvan_common.h
sylvan_common.c
sylvan_gmp.h
sylvan_gmp.c
sylvan_int.h
sylvan_ldd.h
sylvan_ldd.c
sylvan_ldd_int.h
sylvan_mt.h
sylvan_mtbdd.h
sylvan_mtbdd.c
sylvan_mtbdd_int.h
sylvan_obj.hpp
sylvan_obj.cpp
sylvan_stats.h
sylvan_table.h
sylvan_tls.h
storm_function_wrapper.h
sylvan_storm_rational_function.h
sylvan_storm_rational_function.c
tls.h
)
# We need to make sure that the binary is put into a folder that is independent of the
# build type. Otherwise -- for example when using Xcode -- the binary might end up in a
# sub-folder "Debug" or "Release".
set_target_properties(sylvan PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_CURRENT_BINARY_DIR}
ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_CURRENT_BINARY_DIR})
option(BUILD_SHARED_LIBS "Enable/disable creation of shared libraries" ON)
option(BUILD_STATIC_LIBS "Enable/disable creation of static libraries" ON)
target_link_libraries(sylvan m pthread gmp)
add_library(sylvan ${SOURCES})
if(USE_CARL)
message(STATUS "Sylvan - linking CARL.")
target_link_libraries(sylvan ${carl_LIBRARIES})
endif(USE_CARL)
find_package(GMP REQUIRED)
find_package(Hwloc REQUIRED)
include_directories(sylvan ${HWLOC_INCLUDE_DIR} ${GMP_INCLUDE_DIR})
target_link_libraries(sylvan m pthread ${GMP_LIBRARIES} ${HWLOC_LIBRARIES})
if(UNIX AND NOT APPLE)
target_link_libraries(sylvan rt)
endif()
option(USE_HWLOC "Use HWLOC library if available" ON)
if(USE_HWLOC)
include(CheckIncludeFiles)
check_include_files(hwloc.h HAVE_HWLOC)
if(HAVE_HWLOC)
set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "USE_HWLOC=1")
target_link_libraries(sylvan hwloc)
endif()
endif()
option(SYLVAN_STATS "Collect statistics" OFF)
if(SYLVAN_STATS)
set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "SYLVAN_STATS")
endif()
set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "STORM_SILENCE_WARNINGS")
install(TARGETS
sylvan
DESTINATION "lib")
install(TARGETS sylvan DESTINATION "${CMAKE_INSTALL_LIBDIR}")
install(FILES ${HEADERS} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
install(FILES
lace.h
llmsset.h
sylvan.h
sylvan_cache.h
sylvan_common.h
sylvan_config.h
sylvan_bdd.h
sylvan_ldd.h
sylvan_mtbdd.h
sylvan_obj.hpp
tls.h
DESTINATION "include")
# MODIFICATIONS NEEDED MADE FOR STORM
# We need to make sure that the binary is put into a folder that is independent of the
# build type. Otherwise -- for example when using Xcode -- the binary might end up in a
# sub-folder "Debug" or "Release".
set_target_properties(sylvan PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_CURRENT_BINARY_DIR}
ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_CURRENT_BINARY_DIR})
if(USE_CARL)
message(STATUS "Sylvan - linking CArL.")
target_link_libraries(sylvan ${carl_LIBRARIES})
endif(USE_CARL)

39
resources/3rdparty/sylvan/src/Makefile.am

@ -1,39 +0,0 @@
lib_LTLIBRARIES = libsylvan.la
libsylvan_la_CFLAGS = $(AM_CFLAGS) -fno-strict-aliasing -std=gnu11
libsylvan_la_SOURCES = \
avl.h \
lace.c \
lace.h \
llmsset.c \
llmsset.h \
refs.h \
refs.c \
sha2.c \
sha2.h \
stats.h \
stats.c \
sylvan.h \
sylvan_config.h \
sylvan_bdd.h \
sylvan_bdd.c \
sylvan_ldd.h \
sylvan_ldd.c \
sylvan_cache.h \
sylvan_cache.c \
sylvan_common.c \
sylvan_common.h \
sylvan_mtbdd.h \
sylvan_mtbdd.c \
sylvan_mtbdd_int.h \
sylvan_obj.hpp \
sylvan_obj.cpp \
tls.h
libsylvan_la_LIBADD = -lm
if HAVE_LIBHWLOC
libsylvan_la_LIBADD += -lhwloc
libsylvan_la_CFLAGS += -DUSE_HWLOC=1
endif

5
resources/3rdparty/sylvan/src/avl.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2014 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -282,7 +283,7 @@ NAME##_put(avl_node_t **root, TYPE *data, int *inserted)
static __attribute__((unused)) int \
NAME##_insert(avl_node_t **root, TYPE *data) \
{ \
int inserted; \
int inserted = 0; \
NAME##_put(root, data, &inserted); \
return inserted; \
} \

352
resources/3rdparty/sylvan/src/lace.c

@ -1,5 +1,6 @@
/*
* Copyright 2013-2015 Formal Methods and Tools, University of Twente
* Copyright 2013-2016 Formal Methods and Tools, University of Twente
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -26,30 +27,32 @@
#include <assert.h>
#include <lace.h>
#ifndef USE_HWLOC
#define USE_HWLOC 0
#endif
#if USE_HWLOC
#include <hwloc.h>
#endif
// public Worker data
static Worker **workers;
static Worker **workers = NULL;
static size_t default_stacksize = 0; // set by lace_init
static size_t default_dqsize = 100000;
#if USE_HWLOC
static hwloc_topology_t topo;
static unsigned int n_nodes, n_cores, n_pus;
#endif
static int verbosity = 0;
static int n_workers = 0;
static int enabled_workers = 0;
typedef struct {
Worker worker_public;
char pad1[PAD(sizeof(Worker), LINE_SIZE)];
WorkerP worker_private;
char pad2[PAD(sizeof(WorkerP), LINE_SIZE)];
Task deque[];
} worker_data;
static worker_data **workers_memory = NULL;
static size_t workers_memory_size = 0;
// private Worker data (just for stats at end )
static WorkerP **workers_p;
@ -160,38 +163,11 @@ us_elapsed(void)
}
#endif
#if USE_HWLOC
// Lock used only during parallel lace_init_worker...
static volatile int __attribute__((aligned(64))) lock = 0;
static inline void
lock_acquire()
{
while (1) {
while (lock) {}
if (cas(&lock, 0, 1)) return;
}
}
static inline void
lock_release()
{
lock=0;
}
#endif
/* Barrier */
#define BARRIER_MAX_THREADS 128
typedef union __attribute__((__packed__))
{
volatile size_t val;
char pad[LINE_SIZE];
} asize_t;
typedef struct {
volatile int __attribute__((aligned(LINE_SIZE))) count;
volatile int __attribute__((aligned(LINE_SIZE))) leaving;
volatile int __attribute__((aligned(LINE_SIZE))) wait;
/* the following is needed only for destroy: */
asize_t entered[BARRIER_MAX_THREADS];
} barrier_t;
barrier_t lace_bar;
@ -199,25 +175,21 @@ barrier_t lace_bar;
void
lace_barrier()
{
int id = lace_get_worker()->worker;
lace_bar.entered[id].val = 1; // signal entry
int wait = lace_bar.wait;
if (enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) {
lace_bar.count = 0; // reset counter
lace_bar.count = 0;
lace_bar.leaving = enabled_workers;
lace_bar.wait = 1 - wait; // flip wait
lace_bar.entered[id].val = 0; // signal exit
} else {
while (wait == lace_bar.wait) {} // wait
lace_bar.entered[id].val = 0; // signal exit
}
__sync_add_and_fetch(&lace_bar.leaving, -1);
}
static void
lace_barrier_init()
{
assert(n_workers <= BARRIER_MAX_THREADS);
memset(&lace_bar, 0, sizeof(barrier_t));
}
@ -225,44 +197,108 @@ static void
lace_barrier_destroy()
{
// wait for all to exit
for (int i=0; i<n_workers; i++) {
while (1 == lace_bar.entered[i].val) {}
while (lace_bar.leaving != 0) continue;
}
static void
lace_check_memory(void)
{
// get our current worker
WorkerP *w = lace_get_worker();
void* mem = workers_memory[w->worker];
// get pinned PUs
hwloc_cpuset_t cpuset = hwloc_bitmap_alloc();
hwloc_get_cpubind(topo, cpuset, HWLOC_CPUBIND_THREAD);
// get nodes of pinned PUs
hwloc_nodeset_t cpunodes = hwloc_bitmap_alloc();
hwloc_cpuset_to_nodeset(topo, cpuset, cpunodes);
// get location of memory
hwloc_nodeset_t memlocation = hwloc_bitmap_alloc();
#ifdef hwloc_get_area_memlocation
hwloc_get_area_memlocation(topo, mem, sizeof(worker_data), memlocation, HWLOC_MEMBIND_BYNODESET);
#else
hwloc_membind_policy_t policy;
int res = hwloc_get_area_membind_nodeset(topo, mem, sizeof(worker_data), memlocation, &policy, HWLOC_MEMBIND_STRICT);
if (res == -1) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: hwloc_get_area_membind_nodeset returned -1!\n");
#endif
}
if (policy != HWLOC_MEMBIND_BIND) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: Lace worker memory not bound with BIND policy!\n");
#endif
}
#endif
// check if CPU and node are on the same place
if (!hwloc_bitmap_isincluded(memlocation, cpunodes)) {
fprintf(stderr, "Lace warning: Lace thread not on same memory domain as data!\n");
char *strp, *strp2, *strp3;
hwloc_bitmap_list_asprintf(&strp, cpuset);
hwloc_bitmap_list_asprintf(&strp2, cpunodes);
hwloc_bitmap_list_asprintf(&strp3, memlocation);
fprintf(stderr, "Worker %d is pinned on PUs %s, node %s; memory is pinned on node %s\n", w->worker, strp, strp2, strp3);
free(strp);
free(strp2);
free(strp3);
}
// free allocated memory
hwloc_bitmap_free(cpuset);
hwloc_bitmap_free(cpunodes);
hwloc_bitmap_free(memlocation);
}
void
lace_init_worker(int worker, size_t dq_size)
WorkerP *
lace_init_worker(int worker)
{
Worker *wt = NULL;
WorkerP *w = NULL;
// Get our core
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, worker % n_cores);
if (dq_size == 0) dq_size = default_dqsize;
// Get our copy of the bitmap
hwloc_cpuset_t bmp = hwloc_bitmap_dup(pu->cpuset);
#if USE_HWLOC
// Get our logical processor
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus);
// Get number of PUs in set
int n = -1, count=0;
while ((n=hwloc_bitmap_next(bmp, n)) != -1) count++;
// Pin our thread...
hwloc_set_cpubind(topo, pu->cpuset, HWLOC_CPUBIND_THREAD);
// Allocate memory on our node...
lock_acquire();
wt = (Worker *)hwloc_alloc_membind(topo, sizeof(Worker), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
w = (WorkerP *)hwloc_alloc_membind(topo, sizeof(WorkerP), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
if (wt == NULL || w == NULL || (w->dq = (Task*)hwloc_alloc_membind(topo, dq_size * sizeof(Task), pu->cpuset, HWLOC_MEMBIND_BIND, 0)) == NULL) {
fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
exit(1);
// Check if we actually have logical processors
if (count == 0) {
fprintf(stderr, "Lace error: trying to pin a worker on an empty core?\n");
exit(-1);
}
lock_release();
#else
// Allocate memory...
if (posix_memalign((void**)&wt, LINE_SIZE, sizeof(Worker)) ||
posix_memalign((void**)&w, LINE_SIZE, sizeof(WorkerP)) ||
posix_memalign((void**)&w->dq, LINE_SIZE, dq_size * sizeof(Task))) {
fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
exit(1);
// Select the correct PU on the core (in case of hyperthreading)
int idx = worker / n_cores;
if (idx >= count) {
fprintf(stderr, "Lace warning: more workers than available logical processors!\n");
idx %= count;
}
// Find index of PU and restrict bitmap
n = -1;
for (int i=0; i<=idx; i++) n = hwloc_bitmap_next(bmp, n);
hwloc_bitmap_only(bmp, n);
// Pin our thread...
if (hwloc_set_cpubind(topo, bmp, HWLOC_CPUBIND_THREAD) == -1) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace warning: hwloc_set_cpubind returned -1!\n");
#endif
}
// Free allocated memory
hwloc_bitmap_free(bmp);
// Get allocated memory
Worker *wt = &workers_memory[worker]->worker_public;
WorkerP *w = &workers_memory[worker]->worker_private;
w->dq = workers_memory[worker]->deque;
// Initialize public worker data
wt->dq = w->dq;
@ -272,21 +308,18 @@ lace_init_worker(int worker, size_t dq_size)
// Initialize private worker data
w->_public = wt;
w->end = w->dq + dq_size;
w->end = w->dq + default_dqsize;
w->split = w->dq;
w->allstolen = 0;
w->worker = worker;
#if USE_HWLOC
w->pu = worker % n_pus;
#else
w->pu = -1;
#endif
w->pu = worker % n_cores;
w->enabled = 1;
if (workers_init[worker].stack != 0) {
w->stack_trigger = ((size_t)workers_init[worker].stack) + workers_init[worker].stacksize/20;
} else {
w->stack_trigger = 0;
}
w->rng = (((uint64_t)rand())<<32 | rand());
#if LACE_COUNT_EVENTS
// Reset counters
@ -299,8 +332,9 @@ lace_init_worker(int worker, size_t dq_size)
#else
pthread_setspecific(worker_key, w);
#endif
workers[worker] = wt;
workers_p[worker] = w;
// Check if everything is on the correct node
lace_check_memory();
// Synchronize with others
lace_barrier();
@ -309,6 +343,8 @@ lace_init_worker(int worker, size_t dq_size)
w->time = gethrtime();
w->level = 0;
#endif
return w;
}
#if defined(__APPLE__) && !defined(pthread_barrier_t)
@ -433,7 +469,8 @@ lace_set_workers(int workercount)
enabled_workers = workercount;
int self = lace_get_worker()->worker;
if (self >= workercount) workercount--;
for (int i=0; i<n_workers; i++) {
int i;
for (i=0; i<n_workers; i++) {
workers_p[i]->enabled = (i < workercount || i == self) ? 1 : 0;
}
}
@ -457,7 +494,7 @@ rng(uint32_t *seed, int max)
return next % max;
}
VOID_TASK_IMPL_0(lace_steal_random)
VOID_TASK_0(lace_steal_random)
{
Worker *victim = workers[(__lace_worker->worker + 1 + rng(&__lace_worker->seed, n_workers-1)) % n_workers];
@ -472,7 +509,7 @@ VOID_TASK_IMPL_0(lace_steal_random)
}
}
VOID_TASK_IMPL_1(lace_steal_random_loop, int*, quit)
VOID_TASK_1(lace_steal_random_loop, int*, quit)
{
while(!(*(volatile int*)quit)) {
lace_steal_random();
@ -491,22 +528,21 @@ static lace_startup_cb main_cb;
static void*
lace_main_wrapper(void *arg)
{
lace_init_worker(0, 0);
WorkerP *self = lace_get_worker();
#if LACE_PIE_TIMES
self->time = gethrtime();
#endif
lace_time_event(self, 1);
main_cb(self, self->dq, arg);
lace_init_main();
LACE_ME;
WRAP(main_cb, arg);
lace_exit();
// Now signal that we're done
pthread_mutex_lock(&wait_until_done_mutex);
pthread_cond_broadcast(&wait_until_done);
pthread_mutex_unlock(&wait_until_done_mutex);
return NULL;
}
VOID_TASK_IMPL_1(lace_steal_loop, int*, quit)
#define lace_steal_loop(quit) CALL(lace_steal_loop, quit)
VOID_TASK_1(lace_steal_loop, int*, quit)
{
// Determine who I am
const int worker_id = __lace_worker->worker;
@ -555,15 +591,43 @@ VOID_TASK_IMPL_1(lace_steal_loop, int*, quit)
}
}
static void*
lace_default_worker(void* arg)
/**
* Initialize worker 0.
*/
void
lace_init_main()
{
lace_init_worker((size_t)arg, 0);
WorkerP *__lace_worker = lace_get_worker();
WorkerP * __attribute__((unused)) __lace_worker = lace_init_worker(0);
lace_time_event(__lace_worker, 1);
}
/**
* Initialize the current thread as a Lace thread, and perform work-stealing
* as worker <worker> until lace_exit() is called.
*
* For worker 0, use lace_init_main
*/
void
lace_run_worker(int worker)
{
// Initialize local datastructure
WorkerP *__lace_worker = lace_init_worker(worker);
Task *__lace_dq_head = __lace_worker->dq;
// Steal for a while
lace_steal_loop(&lace_quits);
// Time the quit event
lace_time_event(__lace_worker, 9);
// Synchronize with lace_exit
lace_barrier();
}
static void*
lace_default_worker_thread(void* arg)
{
lace_run_worker((int)(size_t)arg);
return NULL;
}
@ -576,25 +640,15 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
size_t pagesize = sysconf(_SC_PAGESIZE);
stacksize = (stacksize + pagesize - 1) & ~(pagesize - 1); // ceil(stacksize, pagesize)
#if USE_HWLOC
// Get our logical processor
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus);
// Allocate memory for the program stack
lock_acquire();
void *stack_location = hwloc_alloc_membind(topo, stacksize + pagesize, pu->cpuset, HWLOC_MEMBIND_BIND, 0);
lock_release();
if (stack_location == 0) {
fprintf(stderr, "Lace error: Unable to allocate memory for the pthread stack!\n");
exit(1);
}
#else
void *stack_location = mmap(NULL, stacksize + pagesize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
if (stack_location == MAP_FAILED) {
fprintf(stderr, "Lace error: Cannot allocate program stack: %s!\n", strerror(errno));
exit(1);
}
#endif
if (0 != mprotect(stack_location, pagesize, PROT_NONE)) {
fprintf(stderr, "Lace error: Unable to protect the allocated program stack with a guard page!\n");
@ -610,7 +664,7 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
workers_init[worker].stacksize = stacksize;
if (fun == 0) {
fun = lace_default_worker;
fun = lace_default_worker_thread;
arg = (void*)(size_t)worker;
}
@ -622,21 +676,7 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
static int
get_cpu_count()
{
#if USE_HWLOC
int count = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
#elif defined(sched_getaffinity)
/* Best solution: find actual available cpus */
cpu_set_t cs;
CPU_ZERO(&cs);
sched_getaffinity(0, sizeof(cs), &cs);
int count = CPU_COUNT(&cs);
#elif defined(_SC_NPROCESSORS_ONLN)
/* Fallback */
int count = sysconf(_SC_NPROCESSORS_ONLN);
#else
/* Okay... */
int count = 1;
#endif
return count < 1 ? 1 : count;
}
@ -647,19 +687,18 @@ lace_set_verbosity(int level)
}
void
lace_init(int n, size_t dqsize)
lace_init(int _n_workers, size_t dqsize)
{
#if USE_HWLOC
// Initialize topology and information about cpus
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
n_nodes = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE);
n_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
n_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
#endif
// Initialize globals
n_workers = n;
n_workers = _n_workers;
if (n_workers == 0) n_workers = get_cpu_count();
enabled_workers = n_workers;
if (dqsize != 0) default_dqsize = dqsize;
@ -673,11 +712,43 @@ lace_init(int n, size_t dqsize)
// Allocate array with all workers
if (posix_memalign((void**)&workers, LINE_SIZE, n_workers*sizeof(Worker*)) != 0 ||
posix_memalign((void**)&workers_p, LINE_SIZE, n_workers*sizeof(WorkerP*)) != 0) {
posix_memalign((void**)&workers_p, LINE_SIZE, n_workers*sizeof(WorkerP*)) != 0 ||
posix_memalign((void**)&workers_memory, LINE_SIZE, n_workers*sizeof(worker_data*)) != 0) {
fprintf(stderr, "Lace error: unable to allocate memory!\n");
exit(1);
}
// Allocate memory for each worker
workers_memory_size = sizeof(worker_data) + sizeof(Task) * dqsize;
for (int i=0; i<n_workers; i++) {
workers_memory[i] = mmap(NULL, workers_memory_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if (workers_memory[i] == MAP_FAILED) {
fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
exit(1);
}
workers[i] = &workers_memory[i]->worker_public;
workers_p[i] = &workers_memory[i]->worker_private;
}
// Pin allocated memory of each worker
for (int i=0; i<n_workers; i++) {
// Get our core
hwloc_obj_t core = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, i % n_cores);
// Pin the memory area
#ifdef HWLOC_MEMBIND_BYNODESET
int res = hwloc_set_area_membind(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE | HWLOC_MEMBIND_BYNODESET);
#else
int res = hwloc_set_area_membind_nodeset(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE);
#endif
if (res != 0) {
#ifndef STORM_SILENCE_WARNINGS
fprintf(stderr, "Lace error: Unable to bind worker memory to node!\n");
#endif
}
}
// Create pthread key
#ifndef __linux__
pthread_key_create(&worker_key, NULL);
@ -696,11 +767,7 @@ lace_init(int n, size_t dqsize)
}
if (verbosity) {
#if USE_HWLOC
fprintf(stderr, "Initializing Lace, %u nodes, %u cores, %u logical processors, %d workers.\n", n_nodes, n_cores, n_pus, n_workers);
#else
fprintf(stderr, "Initializing Lace, %d workers.\n", n_workers);
#endif
}
// Prepare lace_init structure
@ -740,11 +807,11 @@ lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
// Suspend this thread until cb returns
pthread_mutex_lock(&wait_until_done_mutex);
pthread_cond_wait(&wait_until_done, &wait_until_done_mutex);
if (lace_quits == 0) pthread_cond_wait(&wait_until_done, &wait_until_done_mutex);
pthread_mutex_unlock(&wait_until_done_mutex);
} else {
// use this thread as worker and return control
lace_init_worker(0, 0);
lace_init_worker(0);
lace_time_event(lace_get_worker(), 1);
}
}
@ -945,7 +1012,7 @@ lace_exec_in_new_frame(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root)
}
}
VOID_TASK_IMPL_2(lace_steal_loop_root, Task*, t, int*, done)
VOID_TASK_2(lace_steal_loop_root, Task*, t, int*, done)
{
t->f(__lace_worker, __lace_dq_head, t);
*done = 1;
@ -971,7 +1038,7 @@ lace_sync_and_exec(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root)
// one worker sets t to 0 again
if (LACE_WORKER_ID == 0) lace_newframe.t = 0;
// else while (*(volatile Task**)&lace_newframe.t != 0) {}
// else while (*(Task* volatile *)&lace_newframe.t != 0) {}
// the above line is commented out since lace_exec_in_new_frame includes
// a lace_barrier before the task is executed
@ -991,7 +1058,7 @@ lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head)
// one worker sets t to 0 again
if (LACE_WORKER_ID == 0) lace_newframe.t = 0;
// else while (*(volatile Task**)&lace_newframe.t != 0) {}
// else while (*(Task* volatile *)&lace_newframe.t != 0) {}
// the above line is commented out since lace_exec_in_new_frame includes
// a lace_barrier before the task is executed
@ -1043,3 +1110,10 @@ lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t)
while (!cas(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head);
lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2);
}
void
lace_abort_stack_overflow(void)
{
fprintf(stderr, "Lace fatal error: Task stack overflow! Aborting.\n");
exit(-1);
}

139
resources/3rdparty/sylvan/src/lace.h

@ -1,5 +1,6 @@
/*
* Copyright 2013-2015 Formal Methods and Tools, University of Twente
* Copyright 2013-2016 Formal Methods and Tools, University of Twente
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
@ -33,7 +34,7 @@ extern "C" {
#endif
#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */
#define LACE_LEAP_RANDOM 1
#define LACE_LEAP_RANDOM 0
#endif
#ifndef LACE_PIE_TIMES /* Record time spent stealing and leapfrogging */
@ -212,8 +213,9 @@ typedef struct _WorkerP {
Task *end; // dq+dq_size
Worker *_public; // pointer to public Worker struct
size_t stack_trigger; // for stack overflow detection
uint64_t rng; // my random seed (for lace_trng)
uint32_t seed; // my random seed (for lace_steal_random)
int16_t worker; // what is my worker id?
int16_t pu; // my pu (for HWLOC)
uint8_t allstolen; // my allstolen
volatile int8_t enabled; // if this worker is enabled
@ -223,12 +225,33 @@ typedef struct _WorkerP {
volatile int level;
#endif
uint32_t seed; // my random seed (for lace_steal_random)
int16_t pu; // my pu (for HWLOC)
} WorkerP;
#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__);
LACE_TYPEDEF_CB(void, lace_startup_cb, void*);
/**
* Using Lace.
*
* Optionally set the verbosity level with lace_set_verbosity.
* Call lace_init to allocate all data structures.
*
* You can create threads yourself or let Lace create threads with lace_startup.
*
* When creating threads yourself:
* - call lace_init_main for worker 0
* this method returns when all other workers have started
* - call lace_run_worker for all other workers
* workers perform work-stealing until worker 0 calls lace_exit
*
* When letting Lace create threads with lace_startup
* - calling with startup callback creates N threads and returns
* after the callback has returned, and all created threads are destroyed
* - calling without a startup callback creates N-1 threads and returns
* control to the caller. When lace_exit is called, all created threads are terminated.
*/
/**
* Set verbosity level (0 = no startup messages, 1 = startup messages)
* Default level: 0
@ -247,22 +270,25 @@ void lace_init(int n_workers, size_t dqsize);
* After lace_init, start all worker threads.
* If cb,arg are set, suspend this thread, call cb(arg) in a new thread
* and exit Lace upon return
* Otherwise, the current thread is initialized as a Lace thread.
* Otherwise, the current thread is initialized as worker 0.
*/
void lace_startup(size_t stacksize, lace_startup_cb, void* arg);
/**
* Initialize current thread as worker <idx> and allocate a deque with size <dqsize>.
* Use this when manually creating worker threads.
* Initialize worker 0. This method returns when all other workers are initialized
* (using lace_run_worker).
*
* When done, run lace_exit so all worker threads return from lace_run_worker.
*/
void lace_init_worker(int idx, size_t dqsize);
void lace_init_main();
/**
* Manually spawn worker <idx> with (optional) program stack size <stacksize>.
* If fun,arg are set, overrides default startup method.
* Typically: for workers 1...(n_workers-1): lace_spawn_worker(i, stack_size, 0, 0);
* Initialize the current thread as the Lace thread of worker <worker>, and perform
* work-stealing until lace_exit is called.
*
* For worker 0, call lace_init_main instead.
*/
pthread_t lace_spawn_worker(int idx, size_t stacksize, void *(*fun)(void*), void* arg);
void lace_run_worker(int worker);
/**
* Steal a random task.
@ -270,13 +296,6 @@ pthread_t lace_spawn_worker(int idx, size_t stacksize, void *(*fun)(void*), void
#define lace_steal_random() CALL(lace_steal_random)
void lace_steal_random_CALL(WorkerP*, Task*);
/**
* Steal random tasks until parameter *quit is set
* Note: task declarations at end; quit is of type int*
*/
#define lace_steal_random_loop(quit) CALL(lace_steal_random_loop, quit)
#define lace_steal_loop(quit) CALL(lace_steal_loop, quit)
/**
* Barrier (all workers must enter it before progressing)
*/
@ -364,6 +383,8 @@ static inline void CHECKSTACK(WorkerP *w)
#define CHECKSTACK(w) {}
#endif
void lace_abort_stack_overflow(void) __attribute__((noreturn));
typedef struct
{
Task *t;
@ -382,7 +403,25 @@ void lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
void lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head);
#define YIELD_NEWFRAME() { if (unlikely((*(volatile Task**)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
#define YIELD_NEWFRAME() { if (unlikely((*(Task* volatile *)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
/**
* Compute a random number, thread-local
*/
#define LACE_TRNG (__lace_worker->rng = 2862933555777941757ULL * __lace_worker->rng + 3037000493ULL)
/**
* Make all tasks of the current worker shared.
*/
#define LACE_MAKE_ALL_SHARED() lace_make_all_shared(__lace_worker, __lace_dq_head)
static inline void __attribute__((unused))
lace_make_all_shared( WorkerP *w, Task *__lace_dq_head)
{
if (w->split != __lace_dq_head) {
w->split = __lace_dq_head;
w->_public->ts.ts.split = __lace_dq_head - w->dq;
}
}
#if LACE_PIE_TIMES
static void lace_time_event( WorkerP *w, int event )
@ -620,7 +659,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * ); \
@ -636,7 +675,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head )
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -770,7 +809,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * ); \
@ -786,7 +825,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head )
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -923,7 +962,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \
@ -939,7 +978,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1)
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1073,7 +1112,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \
@ -1089,7 +1128,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1)
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1226,7 +1265,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \
@ -1242,7 +1281,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2)
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1376,7 +1415,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \
@ -1392,7 +1431,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2)
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1529,7 +1568,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \
@ -1545,7 +1584,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1679,7 +1718,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \
@ -1695,7 +1734,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1832,7 +1871,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\
@ -1848,7 +1887,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -1982,7 +2021,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\
@ -1998,7 +2037,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -2135,7 +2174,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\
@ -2151,7 +2190,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -2285,7 +2324,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\
@ -2301,7 +2340,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -2438,7 +2477,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\
@ -2454,7 +2493,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -2588,7 +2627,7 @@ typedef struct _TD_##NAME {
} TD_##NAME; \
\
/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 1 : -1];\
typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\
\
void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \
void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\
@ -2604,7 +2643,7 @@ void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, AT
TailSplit ts; \
uint32_t head, split, newsplit; \
\
/* assert(__dq_head < w->end); */ /* Assuming to be true */ \
if (__dq_head == w->end) lace_abort_stack_overflow(); \
\
t = (TD_##NAME *)__dq_head; \
t->f = &NAME##_WRAP; \
@ -2731,10 +2770,6 @@ void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq
#define VOID_TASK_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) VOID_TASK_DECL_6(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) VOID_TASK_IMPL_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6)
VOID_TASK_DECL_0(lace_steal_random);
VOID_TASK_DECL_1(lace_steal_random_loop, int*);
VOID_TASK_DECL_1(lace_steal_loop, int*);
VOID_TASK_DECL_2(lace_steal_loop_root, Task *, int*);
#ifdef __cplusplus
}

15
resources/3rdparty/sylvan/src/sha2.c

@ -509,9 +509,6 @@ void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) {
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
}
#endif /* SHA2_UNROLL_TRANSFORM */
@ -543,8 +540,6 @@ void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
/* The buffer is not yet full */
MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
context->bitcount += len << 3;
/* Clean up: */
usedspace = freespace = 0;
return;
}
}
@ -560,8 +555,6 @@ void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) {
MEMCPY_BCOPY(context->buffer, data, len);
context->bitcount += len << 3;
}
/* Clean up: */
usedspace = freespace = 0;
}
void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
@ -625,7 +618,6 @@ void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) {
/* Clean up state data: */
MEMSET_BZERO(context, sizeof(SHA256_CTX));
usedspace = 0;
}
char *SHA256_End(SHA256_CTX* context, char buffer[]) {
@ -832,9 +824,6 @@ void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) {
context->state[5] += f;
context->state[6] += g;
context->state[7] += h;
/* Clean up */
a = b = c = d = e = f = g = h = T1 = T2 = 0;
}
#endif /* SHA2_UNROLL_TRANSFORM */
@ -866,8 +855,6 @@ void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
/* The buffer is not yet full */
MEMCPY_BCOPY(&context->buffer[usedspace], data, len);
ADDINC128(context->bitcount, len << 3);
/* Clean up: */
usedspace = freespace = 0;
return;
}
}
@ -883,8 +870,6 @@ void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) {
MEMCPY_BCOPY(context->buffer, data, len);
ADDINC128(context->bitcount, len << 3);
}
/* Clean up: */
usedspace = freespace = 0;
}
void SHA512_Last(SHA512_CTX* context) {

0
resources/3rdparty/sylvan/src/sha2.h

245
resources/3rdparty/sylvan/src/stats.c

@ -1,245 +0,0 @@
/*
* Copyright 2011-2014 Formal Methods and Tools, University of Twente
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h> // for errno
#include <string.h> // memset
#include <stats.h>
#include <sys/mman.h>
#include <inttypes.h>
#include <sylvan.h> // for nodes table
#if SYLVAN_STATS
#ifdef __ELF__
__thread sylvan_stats_t sylvan_stats;
#else
pthread_key_t sylvan_stats_key;
#endif
#ifndef USE_HWLOC
#define USE_HWLOC 0
#endif
#if USE_HWLOC
#include <hwloc.h>
static hwloc_topology_t topo;
#endif
VOID_TASK_0(sylvan_stats_reset_perthread)
{
#ifdef __ELF__
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
sylvan_stats.counters[i] = 0;
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
sylvan_stats.timers[i] = 0;
}
#else
sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key);
if (sylvan_stats == NULL) {
sylvan_stats = mmap(0, sizeof(sylvan_stats_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (sylvan_stats == (sylvan_stats_t *)-1) {
fprintf(stderr, "sylvan_stats: Unable to allocate memory: %s!\n", strerror(errno));
exit(1);
}
#if USE_HWLOC
// Ensure the stats object is on our pu
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, LACE_WORKER_PU);
hwloc_set_area_membind(topo, sylvan_stats, sizeof(sylvan_stats_t), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
#endif
pthread_setspecific(sylvan_stats_key, sylvan_stats);
}
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
sylvan_stats->counters[i] = 0;
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
sylvan_stats->timers[i] = 0;
}
#endif
}
VOID_TASK_IMPL_0(sylvan_stats_init)
{
#ifndef __ELF__
pthread_key_create(&sylvan_stats_key, NULL);
#endif
#if USE_HWLOC
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
#endif
TOGETHER(sylvan_stats_reset_perthread);
}
/**
* Reset all counters (for statistics)
*/
VOID_TASK_IMPL_0(sylvan_stats_reset)
{
TOGETHER(sylvan_stats_reset_perthread);
}
#define BLACK "\33[22;30m"
#define GRAY "\33[01;30m"
#define RED "\33[22;31m"
#define LRED "\33[01;31m"
#define GREEN "\33[22;32m"
#define LGREEN "\33[01;32m"
#define BLUE "\33[22;34m"
#define LBLUE "\33[01;34m"
#define BROWN "\33[22;33m"
#define YELLOW "\33[01;33m"
#define CYAN "\33[22;36m"
#define LCYAN "\33[22;36m"
#define MAGENTA "\33[22;35m"
#define LMAGENTA "\33[01;35m"
#define NC "\33[0m"
#define BOLD "\33[1m"
#define ULINE "\33[4m" //underline
#define BLINK "\33[5m"
#define INVERT "\33[7m"
VOID_TASK_1(sylvan_stats_sum, sylvan_stats_t*, target)
{
#ifdef __ELF__
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
__sync_fetch_and_add(&target->counters[i], sylvan_stats.counters[i]);
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
__sync_fetch_and_add(&target->timers[i], sylvan_stats.timers[i]);
}
#else
sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key);
if (sylvan_stats != NULL) {
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
__sync_fetch_and_add(&target->counters[i], sylvan_stats->counters[i]);
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
__sync_fetch_and_add(&target->timers[i], sylvan_stats->timers[i]);
}
}
#endif
}
void
sylvan_stats_report(FILE *target, int color)
{
#if !SYLVAN_STATS
(void)target;
(void)color;
return;
#else
(void)color;
sylvan_stats_t totals;
memset(&totals, 0, sizeof(sylvan_stats_t));
LACE_ME;
TOGETHER(sylvan_stats_sum, &totals);
// fix timers for MACH
#ifdef __MACH__
mach_timebase_info_data_t timebase;
mach_timebase_info(&timebase);
uint64_t c = timebase.numer/timebase.denom;
for (int i=0;i<SYLVAN_TIMER_COUNTER;i++) totals.timers[i]*=c;
#endif
if (color) fprintf(target, LRED "*** " BOLD "Sylvan stats" NC LRED " ***" NC);
else fprintf(target, "*** Sylvan stats ***");
if (totals.counters[BDD_NODES_CREATED]) {
if (color) fprintf(target, ULINE LBLUE);
fprintf(target, "\nBDD operations count (cache reuse, cache put)\n");
if (color) fprintf(target, NC);
if (totals.counters[BDD_ITE]) fprintf(target, "ITE: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_ITE], totals.counters[BDD_ITE_CACHED], totals.counters[BDD_ITE_CACHEDPUT]);
if (totals.counters[BDD_AND]) fprintf(target, "AND: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_AND], totals.counters[BDD_AND_CACHED], totals.counters[BDD_AND_CACHEDPUT]);
if (totals.counters[BDD_XOR]) fprintf(target, "XOR: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_XOR], totals.counters[BDD_XOR_CACHED], totals.counters[BDD_XOR_CACHEDPUT]);
if (totals.counters[BDD_EXISTS]) fprintf(target, "Exists: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_EXISTS], totals.counters[BDD_EXISTS_CACHED], totals.counters[BDD_EXISTS_CACHEDPUT]);
if (totals.counters[BDD_AND_EXISTS]) fprintf(target, "AndExists: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_AND_EXISTS], totals.counters[BDD_AND_EXISTS_CACHED], totals.counters[BDD_AND_EXISTS_CACHEDPUT]);
if (totals.counters[BDD_RELNEXT]) fprintf(target, "RelNext: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_RELNEXT], totals.counters[BDD_RELNEXT_CACHED], totals.counters[BDD_RELNEXT_CACHEDPUT]);
if (totals.counters[BDD_RELPREV]) fprintf(target, "RelPrev: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_RELPREV], totals.counters[BDD_RELPREV_CACHED], totals.counters[BDD_RELPREV_CACHEDPUT]);
if (totals.counters[BDD_CLOSURE]) fprintf(target, "Closure: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_CLOSURE], totals.counters[BDD_CLOSURE_CACHED], totals.counters[BDD_CLOSURE_CACHEDPUT]);
if (totals.counters[BDD_COMPOSE]) fprintf(target, "Compose: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_COMPOSE], totals.counters[BDD_COMPOSE_CACHED], totals.counters[BDD_COMPOSE_CACHEDPUT]);
if (totals.counters[BDD_RESTRICT]) fprintf(target, "Restrict: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_RESTRICT], totals.counters[BDD_RESTRICT_CACHED], totals.counters[BDD_RESTRICT_CACHEDPUT]);
if (totals.counters[BDD_CONSTRAIN]) fprintf(target, "Constrain: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_CONSTRAIN], totals.counters[BDD_CONSTRAIN_CACHED], totals.counters[BDD_CONSTRAIN_CACHEDPUT]);
if (totals.counters[BDD_SUPPORT]) fprintf(target, "Support: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_SUPPORT], totals.counters[BDD_SUPPORT_CACHED], totals.counters[BDD_SUPPORT_CACHEDPUT]);
if (totals.counters[BDD_SATCOUNT]) fprintf(target, "SatCount: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_SATCOUNT], totals.counters[BDD_SATCOUNT_CACHED], totals.counters[BDD_SATCOUNT_CACHEDPUT]);
if (totals.counters[BDD_PATHCOUNT]) fprintf(target, "PathCount: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_PATHCOUNT], totals.counters[BDD_PATHCOUNT_CACHED], totals.counters[BDD_PATHCOUNT_CACHEDPUT]);
if (totals.counters[BDD_ISBDD]) fprintf(target, "IsBDD: %'"PRIu64 " (%'"PRIu64", %'"PRIu64 ")\n", totals.counters[BDD_ISBDD], totals.counters[BDD_ISBDD_CACHED], totals.counters[BDD_ISBDD_CACHEDPUT]);
fprintf(target, "BDD Nodes created: %'"PRIu64"\n", totals.counters[BDD_NODES_CREATED]);
fprintf(target, "BDD Nodes reused: %'"PRIu64"\n", totals.counters[BDD_NODES_REUSED]);
}
if (totals.counters[LDD_NODES_CREATED]) {
if (color) fprintf(target, ULINE LBLUE);
fprintf(target, "\nLDD operations count (cache reuse, cache put)\n");
if (color) fprintf(target, NC);
if (totals.counters[LDD_UNION]) fprintf(target, "Union: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_UNION], totals.counters[LDD_UNION_CACHED], totals.counters[LDD_UNION_CACHEDPUT]);
if (totals.counters[LDD_MINUS]) fprintf(target, "Minus: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_MINUS], totals.counters[LDD_MINUS_CACHED], totals.counters[LDD_MINUS_CACHEDPUT]);
if (totals.counters[LDD_INTERSECT]) fprintf(target, "Intersect: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_INTERSECT], totals.counters[LDD_INTERSECT_CACHED], totals.counters[LDD_INTERSECT_CACHEDPUT]);
if (totals.counters[LDD_RELPROD]) fprintf(target, "RelProd: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_RELPROD], totals.counters[LDD_RELPROD_CACHED], totals.counters[LDD_RELPROD_CACHEDPUT]);
if (totals.counters[LDD_RELPREV]) fprintf(target, "RelPrev: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_RELPREV], totals.counters[LDD_RELPREV_CACHED], totals.counters[LDD_RELPREV_CACHEDPUT]);
if (totals.counters[LDD_PROJECT]) fprintf(target, "Project: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_PROJECT], totals.counters[LDD_PROJECT_CACHED], totals.counters[LDD_PROJECT_CACHEDPUT]);
if (totals.counters[LDD_JOIN]) fprintf(target, "Join: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_JOIN], totals.counters[LDD_JOIN_CACHED], totals.counters[LDD_JOIN_CACHEDPUT]);
if (totals.counters[LDD_MATCH]) fprintf(target, "Match: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_MATCH], totals.counters[LDD_MATCH_CACHED], totals.counters[LDD_MATCH_CACHEDPUT]);
if (totals.counters[LDD_SATCOUNT]) fprintf(target, "SatCount: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_SATCOUNT], totals.counters[LDD_SATCOUNT_CACHED], totals.counters[LDD_SATCOUNT_CACHEDPUT]);
if (totals.counters[LDD_SATCOUNTL]) fprintf(target, "SatCountL: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_SATCOUNTL], totals.counters[LDD_SATCOUNTL_CACHED], totals.counters[LDD_SATCOUNTL_CACHEDPUT]);
if (totals.counters[LDD_ZIP]) fprintf(target, "Zip: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_ZIP], totals.counters[LDD_ZIP_CACHED], totals.counters[LDD_ZIP_CACHEDPUT]);
if (totals.counters[LDD_RELPROD_UNION]) fprintf(target, "RelProdUnion: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_RELPROD_UNION], totals.counters[LDD_RELPROD_UNION_CACHED], totals.counters[LDD_RELPROD_UNION_CACHEDPUT]);
if (totals.counters[LDD_PROJECT_MINUS]) fprintf(target, "ProjectMinus: %'"PRIu64 " (%'"PRIu64", %"PRIu64")\n", totals.counters[LDD_PROJECT_MINUS], totals.counters[LDD_PROJECT_MINUS_CACHED], totals.counters[LDD_PROJECT_MINUS_CACHEDPUT]);
fprintf(target, "LDD Nodes created: %'"PRIu64"\n", totals.counters[LDD_NODES_CREATED]);
fprintf(target, "LDD Nodes reused: %'"PRIu64"\n", totals.counters[LDD_NODES_REUSED]);
}
if (color) fprintf(target, ULINE LBLUE);
fprintf(target, "\nGarbage collection\n");
if (color) fprintf(target, NC);
fprintf(target, "Number of GC executions: %'"PRIu64"\n", totals.counters[SYLVAN_GC_COUNT]);
fprintf(target, "Total time spent: %'.6Lf sec.\n", (long double)totals.timers[SYLVAN_GC]/1000000000);
if (color) fprintf(target, ULINE LBLUE);
fprintf(target, "\nTables\n");
if (color) fprintf(target, NC);
fprintf(target, "Unique nodes table: %'zu of %'zu buckets filled.\n", llmsset_count_marked(nodes), llmsset_get_size(nodes));
fprintf(target, "Operation cache: %'zu of %'zu buckets filled.\n", cache_getused(), cache_getsize());
if (color) fprintf(target, ULINE LBLUE);
fprintf(target, "\nUnique table\n");
if (color) fprintf(target, NC);
fprintf(target, "Number of lookup iterations: %'"PRIu64"\n", totals.counters[LLMSSET_LOOKUP]);
#endif
}
#else
VOID_TASK_IMPL_0(sylvan_stats_init)
{
}
VOID_TASK_IMPL_0(sylvan_stats_reset)
{
}
void
sylvan_stats_report(FILE* target, int color)
{
(void)target;
(void)color;
}
#endif

11
resources/3rdparty/sylvan/src/storm_function_wrapper.cpp

@ -462,3 +462,14 @@ MTBDD storm_rational_function_leaf_parameter_replacement(MTBDD dd, storm_rationa
std::map<uint32_t, std::pair<storm::RationalFunctionVariable, std::pair<storm::RationalNumber, storm::RationalNumber>>>* replacements = (std::map<uint32_t, std::pair<storm::RationalFunctionVariable, std::pair<storm::RationalNumber, storm::RationalNumber>>>*)context;
return testiTest(srf_a, *replacements);
}
char* storm_rational_function_to_str(storm_rational_function_ptr val, char *buf, size_t buflen) {
std::lock_guard<std::mutex> lock(carlMutex);
std::stringstream ss;
storm::RationalFunction& srf_a = *(storm::RationalFunction*)val;
ss << srf_a;
std::string s = ss.str();
char* result = (char*)malloc(s.size() + 1);
std::memcpy(result, s.c_str(), s.size() + 1);
return result;
}

2
resources/3rdparty/sylvan/src/storm_function_wrapper.h

@ -49,6 +49,8 @@ MTBDD storm_rational_function_leaf_parameter_replacement(MTBDD dd, storm_rationa
double storm_rational_function_get_constant(storm_rational_function_ptr a);
char* storm_rational_function_to_str(storm_rational_function_ptr val, char *buf, size_t buflen);
#ifdef __cplusplus
}
#endif

162
resources/3rdparty/sylvan/src/sylvan.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,168 +16,25 @@
*/
/**
* Sylvan: parallel BDD/ListDD package.
* Sylvan: parallel MTBDD/ListDD package.
*
* This is a multi-core implementation of BDDs with complement edges.
* This is a multi-core implementation of MTBDDs with complement edges.
*
* This package requires parallel the work-stealing framework Lace.
* Lace must be initialized before initializing Sylvan
*
* This package uses explicit referencing.
* Use sylvan_ref and sylvan_deref to manage external references.
*
* Garbage collection requires all workers to cooperate. Garbage collection is either initiated
* by the user (calling sylvan_gc) or when the nodes table is full. All Sylvan operations
* check whether they need to cooperate on garbage collection. Garbage collection cannot occur
* otherwise. This means that it is perfectly fine to do this:
* BDD a = sylvan_ref(sylvan_and(b, c));
* since it is not possible that garbage collection occurs between the two calls.
*
* To temporarily disable garbage collection, use sylvan_gc_disable() and sylvan_gc_enable().
*/
#include <sylvan_config.h>
#include <stdint.h>
#include <stdio.h> // for FILE
#include <stdlib.h>
#include <lace.h> // for definitions
#include <sylvan_cache.h>
#include <llmsset.h>
#include <stats.h>
#include <stdlib.h> // for realloc
#ifndef SYLVAN_H
#define SYLVAN_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#ifndef SYLVAN_SIZE_FIBONACCI
#define SYLVAN_SIZE_FIBONACCI 0
#endif
// For now, only support 64-bit systems
typedef char __sylvan_check_size_t_is_8_bytes[(sizeof(uint64_t) == sizeof(size_t))?1:-1];
/**
* Initialize the Sylvan parallel decision diagrams package.
*
* After initialization, call sylvan_init_bdd and/or sylvan_init_ldd if you want to use
* the BDD and/or LDD functionality.
*
* BDDs and LDDs share a common node table and operations cache.
*
* The node table is resizable.
* The table is resized automatically when >50% of the table is filled during garbage collection.
* This behavior can be customized by overriding the gc hook.
*
* Memory usage:
* Every node requires 24 bytes memory. (16 bytes data + 8 bytes overhead)
* Every operation cache entry requires 36 bytes memory. (32 bytes data + 4 bytes overhead)
*
* Reasonable defaults: datasize of 1L<<26 (2048 MB), cachesize of 1L<<25 (1152 MB)
*/
void sylvan_init_package(size_t initial_tablesize, size_t max_tablesize, size_t initial_cachesize, size_t max_cachesize);
/**
* Frees all Sylvan data (also calls the quit() functions of BDD/MDD parts)
*/
void sylvan_quit();
/**
* Return number of occupied buckets in nodes table and total number of buckets.
*/
VOID_TASK_DECL_2(sylvan_table_usage, size_t*, size_t*);
#define sylvan_table_usage(filled, total) (CALL(sylvan_table_usage, filled, total))
/**
* Perform garbage collection.
*
* Garbage collection is performed in a new Lace frame, interrupting all ongoing work
* until garbage collection is completed.
*
* Garbage collection procedure:
* 1) The operation cache is cleared and the hash table is reset.
* 2) All live nodes are marked (to be rehashed). This is done by the "mark" callbacks.
* 3) The "hook" callback is called.
* By default, this doubles the hash table size when it is >50% full.
* 4) All live nodes are rehashed into the hash table.
*
* The behavior of garbage collection can be customized by adding "mark" callbacks and
* replacing the "hook" callback.
*/
VOID_TASK_DECL_0(sylvan_gc);
#define sylvan_gc() (CALL(sylvan_gc))
/**
* Enable or disable garbage collection.
*
* This affects both automatic and manual garbage collection, i.e.,
* calling sylvan_gc() while garbage collection is disabled does not have any effect.
*/
void sylvan_gc_enable();
void sylvan_gc_disable();
/**
* Add a "mark" callback to the list of callbacks.
*
* These are called during garbage collection to recursively mark nodes.
*
* Default "mark" functions that mark external references (via sylvan_ref) and internal
* references (inside operations) are added by sylvan_init_bdd/sylvan_init_bdd.
*
* Functions are called in order.
* level 10: marking functions of Sylvan (external/internal references)
* level 20: call the hook function (for resizing)
* level 30: rehashing
*/
LACE_TYPEDEF_CB(void, gc_mark_cb);
void sylvan_gc_add_mark(int order, gc_mark_cb callback);
/**
* Set "hook" callback. There can be only one.
*
* The hook is called after the "mark" phase and before the "rehash" phase.
* This allows users to perform certain actions, such as resizing the nodes table
* and the operation cache. Also, dynamic resizing could be performed then.
*/
LACE_TYPEDEF_CB(void, gc_hook_cb);
void sylvan_gc_set_hook(gc_hook_cb new_hook);
/**
* One of the hooks for resizing behavior.
* Default if SYLVAN_AGGRESSIVE_RESIZE is set.
* Always double size on gc() until maximum reached.
*/
VOID_TASK_DECL_0(sylvan_gc_aggressive_resize);
/**
* One of the hooks for resizing behavior.
* Default if SYLVAN_AGGRESSIVE_RESIZE is not set.
* Double size on gc() whenever >50% is used.
*/
VOID_TASK_DECL_0(sylvan_gc_default_hook);
/**
* Set "notify on dead" callback for the nodes table.
* See also documentation in llmsset.h
*/
#define sylvan_set_ondead(cb, ctx) llmsset_set_ondead(nodes, cb, ctx)
/**
* Global variables (number of workers, nodes table)
*/
extern llmsset_t nodes;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#include <lace.h>
#include <sylvan_tls.h>
#include <sylvan_common.h>
#include <sylvan_stats.h>
#include <sylvan_mtbdd.h>
#include <sylvan_bdd.h>
#include <sylvan_ldd.h>
#include <sylvan_mtbdd.h>
#endif

1478
resources/3rdparty/sylvan/src/sylvan_bdd.c
File diff suppressed because it is too large
View File

277
resources/3rdparty/sylvan/src/sylvan_bdd.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,76 +17,40 @@
/* Do not include this file directly. Instead, include sylvan.h */
#include <tls.h>
#ifndef SYLVAN_BDD_H
#define SYLVAN_BDD_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef uint64_t BDD; // low 40 bits used for index, highest bit for complement, rest 0
// BDDSET uses the BDD node hash table. A BDDSET is an ordered BDD.
typedef uint64_t BDDSET; // encodes a set of variables (e.g. for exists etc.)
// BDDMAP also uses the BDD node hash table. A BDDMAP is *not* an ordered BDD.
typedef uint64_t BDDMAP; // encodes a function of variable->BDD (e.g. for substitute)
typedef uint32_t BDDVAR; // low 24 bits only
#define sylvan_complement ((uint64_t)0x8000000000000000)
#define sylvan_false ((BDD)0x0000000000000000)
#define sylvan_true (sylvan_false|sylvan_complement)
#define sylvan_invalid ((BDD)0x7fffffffffffffff)
/* For strictly non-MT BDDs */
#define sylvan_isconst(bdd) (bdd == sylvan_true || bdd == sylvan_false)
#define sylvan_isnode(bdd) (bdd != sylvan_true && bdd != sylvan_false)
/**
* Initialize BDD functionality.
*
* Granularity (BDD only) determines usage of operation cache. Smallest value is 1: use the operation cache always.
* Higher values mean that the cache is used less often. Variables are grouped such that
* the cache is used when going to the next group, i.e., with granularity=3, variables [0,1,2] are in the
* first group, [3,4,5] in the next, etc. Then no caching occur between 0->1, 1->2, 0->2. Caching occurs
* on 0->3, 1->4, 2->3, etc.
* Granularity (BDD only) determines usage of operation cache.
* The smallest value is 1: use the operation cache always.
* Higher values mean that the cache is used less often. Variables are grouped
* such that the cache is used when going to the next group, i.e., with
* granularity=3, variables [0,1,2] are in the first group, [3,4,5] in the next, etc.
* Then no caching occur between 0->1, 1->2, 0->2. Caching occurs on 0->3, 1->4, 2->3, etc.
*
* A reasonable default is a granularity of 4-16, strongly depending on the structure of the BDDs.
* The appropriate value depends on the number of variables and the structure of
* the decision diagrams. When in doubt, choose a low value (1-5). The performance
* gain can be around 0-10%, so it is not extremely important.
*/
void sylvan_init_bdd(int granularity);
void sylvan_set_granularity(int granularity);
int sylvan_get_granularity(void);
/* Create a BDD representing just <var> or the negation of <var> */
BDD sylvan_ithvar(BDDVAR var);
static inline BDD sylvan_nithvar(BDD var) { return sylvan_ithvar(var) ^ sylvan_complement; }
/* Retrieve the <var> of the BDD node <bdd> */
BDDVAR sylvan_var(BDD bdd);
/* Follow <low> and <high> edges */
BDD sylvan_low(BDD bdd);
BDD sylvan_high(BDD bdd);
/* Add or remove external reference to BDD */
BDD sylvan_ref(BDD a);
void sylvan_deref(BDD a);
#define sylvan_nithvar(var) sylvan_not(sylvan_ithvar(var))
/* For use in custom mark functions */
VOID_TASK_DECL_1(sylvan_gc_mark_rec, BDD);
#define sylvan_gc_mark_rec(mdd) CALL(sylvan_gc_mark_rec, mdd)
/* Return the number of external references */
size_t sylvan_count_refs();
/* Add or remove BDD pointers to protect (indirect external references) */
void sylvan_protect(BDD* ptr);
void sylvan_unprotect(BDD* ptr);
/* Return the number of protected BDD pointers */
size_t sylvan_count_protected();
/* Mark BDD for "notify on dead" */
#define sylvan_notify_ondead(bdd) llmsset_notify_ondead(nodes, bdd&~sylvan_complement)
/* Unary, binary and if-then-else operations */
/*
* Unary, binary and if-then-else operations.
* These operations are all implemented by NOT, AND and XOR.
*/
#define sylvan_not(a) (((BDD)a)^sylvan_complement)
TASK_DECL_4(BDD, sylvan_ite, BDD, BDD, BDD, BDDVAR);
#define sylvan_ite(a,b,c) (CALL(sylvan_ite,a,b,c,0))
@ -93,7 +58,6 @@ TASK_DECL_3(BDD, sylvan_and, BDD, BDD, BDDVAR);
#define sylvan_and(a,b) (CALL(sylvan_and,a,b,0))
TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR);
#define sylvan_xor(a,b) (CALL(sylvan_xor,a,b,0))
/* Do not use nested calls for xor/equiv parameter b! */
#define sylvan_equiv(a,b) sylvan_not(sylvan_xor(a,b))
#define sylvan_or(a,b) sylvan_not(sylvan_and(sylvan_not(a),sylvan_not(b)))
#define sylvan_nand(a,b) sylvan_not(sylvan_and(a,b))
@ -104,18 +68,31 @@ TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR);
#define sylvan_diff(a,b) sylvan_and(a,sylvan_not(b))
#define sylvan_less(a,b) sylvan_and(sylvan_not(a),b)
/* Existential and Universal quantifiers */
/**
* Existential and universal quantification.
*/
TASK_DECL_3(BDD, sylvan_exists, BDD, BDD, BDDVAR);
#define sylvan_exists(a, vars) (CALL(sylvan_exists, a, vars, 0))
#define sylvan_forall(a, vars) (sylvan_not(CALL(sylvan_exists, sylvan_not(a), vars, 0)))
/**
* Compute \exists v: A(...) \and B(...)
* Parameter vars is the cube (conjunction) of all v variables.
* Projection. (Same as existential quantification, but <vars> contains variables to keep.
*/
TASK_DECL_2(BDD, sylvan_project, BDD, BDD);
#define sylvan_project(a, vars) CALL(sylvan_project, a, vars)
/**
* Compute \exists <vars>: <a> \and <b>
*/
TASK_DECL_4(BDD, sylvan_and_exists, BDD, BDD, BDDSET, BDDVAR);
#define sylvan_and_exists(a,b,vars) CALL(sylvan_and_exists,a,b,vars,0)
/**
* Compute and_exists, but as a projection (only keep given variables)
*/
TASK_DECL_3(BDD, sylvan_and_project, BDD, BDD, BDDSET);
#define sylvan_and_project(a,b,vars) CALL(sylvan_and_project,a,b,vars)
/**
* Compute R(s,t) = \exists x: A(s,x) \and B(x,t)
* or R(s) = \exists x: A(s,x) \and B(x)
@ -159,106 +136,33 @@ TASK_DECL_2(BDD, sylvan_closure, BDD, BDDVAR);
#define sylvan_closure(a) CALL(sylvan_closure,a,0);
/**
* Calculate a@b (a constrain b), such that (b -> a@b) = (b -> a)
* Compute f@c (f constrain c), such that f and f@c are the same when c is true
* The BDD c is also called the "care function"
* Special cases:
* - a@0 = 0
* - a@1 = f
* - 0@b = 0
* - 1@b = 1
* - a@a = 1
* - a@not(a) = 0
* - f@0 = 0
* - f@1 = f
* - 0@c = 0
* - 1@c = 1
* - f@f = 1
* - f@not(f) = 0
*/
TASK_DECL_3(BDD, sylvan_constrain, BDD, BDD, BDDVAR);
#define sylvan_constrain(f,c) (CALL(sylvan_constrain, (f), (c), 0))
TASK_DECL_3(BDD, sylvan_restrict, BDD, BDD, BDDVAR);
#define sylvan_restrict(f,c) (CALL(sylvan_restrict, (f), (c), 0))
TASK_DECL_3(BDD, sylvan_compose, BDD, BDDMAP, BDDVAR);
#define sylvan_compose(f,m) (CALL(sylvan_compose, (f), (m), 0))
/**
* Calculate the support of a BDD.
* A variable v is in the support of a Boolean function f iff f[v<-0] != f[v<-1]
* It is also the set of all variables in the BDD nodes of the BDD.
*/
TASK_DECL_1(BDD, sylvan_support, BDD);
#define sylvan_support(bdd) (CALL(sylvan_support, bdd))
/**
* A set of BDD variables is a cube (conjunction) of variables in their positive form.
* Note 2015-06-10: This used to be a union (disjunction) of variables in their positive form.
*/
// empty bddset
#define sylvan_set_empty() sylvan_true
#define sylvan_set_isempty(set) (set == sylvan_true)
// add variables to the bddset
#define sylvan_set_add(set, var) sylvan_and(set, sylvan_ithvar(var))
#define sylvan_set_addall(set, set_to_add) sylvan_and(set, set_to_add)
// remove variables from the bddset
#define sylvan_set_remove(set, var) sylvan_exists(set, var)
#define sylvan_set_removeall(set, set_to_remove) sylvan_exists(set, set_to_remove)
// iterate through all variables
#define sylvan_set_var(set) (sylvan_var(set))
#define sylvan_set_next(set) (sylvan_high(set))
int sylvan_set_in(BDDSET set, BDDVAR var);
size_t sylvan_set_count(BDDSET set);
void sylvan_set_toarray(BDDSET set, BDDVAR *arr);
// variables in arr should be ordered
TASK_DECL_2(BDDSET, sylvan_set_fromarray, BDDVAR*, size_t);
#define sylvan_set_fromarray(arr, length) ( CALL(sylvan_set_fromarray, arr, length) )
void sylvan_test_isset(BDDSET set);
/**
* BDDMAP maps BDDVAR-->BDD, implemented using BDD nodes.
* Based on disjunction of variables, but with high edges to BDDs instead of True terminals.
*/
// empty bddmap
static inline BDDMAP sylvan_map_empty() { return sylvan_false; }
static inline int sylvan_map_isempty(BDDMAP map) { return map == sylvan_false ? 1 : 0; }
// add key-value pairs to the bddmap
BDDMAP sylvan_map_add(BDDMAP map, BDDVAR key, BDD value);
BDDMAP sylvan_map_addall(BDDMAP map_1, BDDMAP map_2);
// remove key-value pairs from the bddmap
BDDMAP sylvan_map_remove(BDDMAP map, BDDVAR key);
BDDMAP sylvan_map_removeall(BDDMAP map, BDDSET toremove);
// iterate through all pairs
static inline BDDVAR sylvan_map_key(BDDMAP map) { return sylvan_var(map); }
static inline BDD sylvan_map_value(BDDMAP map) { return sylvan_high(map); }
static inline BDDMAP sylvan_map_next(BDDMAP map) { return sylvan_low(map); }
// is a key in the map
int sylvan_map_in(BDDMAP map, BDDVAR key);
// count number of keys
size_t sylvan_map_count(BDDMAP map);
// convert a BDDSET (cube of variables) to a map, with all variables pointing on the value
BDDMAP sylvan_set_to_map(BDDSET set, BDD value);
/**
* Node creation primitive.
* Careful: does not check ordering!
* Preferably only use when debugging!
*/
BDD sylvan_makenode(BDDVAR level, BDD low, BDD high);
#define sylvan_constrain(f,c) (CALL(sylvan_constrain, f, c, 0))
/**
* Write a DOT representation of a BDD
* Compute restrict f@c, which uses a heuristic to try and minimize a BDD f with respect to a care function c
* Similar to constrain, but avoids introducing variables from c into f.
*/
void sylvan_printdot(BDD bdd);
void sylvan_fprintdot(FILE *out, BDD bdd);
TASK_DECL_3(BDD, sylvan_restrict, BDD, BDD, BDDVAR);
#define sylvan_restrict(f,c) (CALL(sylvan_restrict, f, c, 0))
/**
* Write a DOT representation of a BDD.
* This variant does not print complement edges.
* Function composition.
* For each node with variable <key> which has a <key,value> pair in <map>,
* replace the node by the result of sylvan_ite(<value>, <low>, <high>).
*/
void sylvan_printdot_nc(BDD bdd);
void sylvan_fprintdot_nc(FILE *out, BDD bdd);
void sylvan_print(BDD bdd);
void sylvan_fprint(FILE *f, BDD bdd);
void sylvan_printsha(BDD bdd);
void sylvan_fprintsha(FILE *f, BDD bdd);
void sylvan_getsha(BDD bdd, char *target); // target must be at least 65 bytes...
TASK_DECL_3(BDD, sylvan_compose, BDD, BDDMAP, BDDVAR);
#define sylvan_compose(f,m) (CALL(sylvan_compose, (f), (m), 0))
/**
* Calculate number of satisfying variable assignments.
@ -299,6 +203,9 @@ int sylvan_sat_one(BDD bdd, BDDSET variables, uint8_t* str);
BDD sylvan_sat_one_bdd(BDD bdd);
#define sylvan_pick_cube sylvan_sat_one_bdd
BDD sylvan_sat_single(BDD bdd, BDDSET vars);
#define sylvan_pick_single_cube sylvan_sat_single
/**
* Enumerate all satisfying variable assignments from the given <bdd> using variables <vars>.
* Calls <cb> with four parameters: a user-supplied context, the array of BDD variables in <vars>,
@ -326,11 +233,6 @@ TASK_DECL_4(BDD, sylvan_collect, BDD, BDDSET, sylvan_collect_cb, void*);
TASK_DECL_2(double, sylvan_pathcount, BDD, BDDVAR);
#define sylvan_pathcount(bdd) (CALL(sylvan_pathcount, bdd, 0))
/**
* Compute the number of BDD nodes in the BDD
*/
size_t sylvan_nodecount(BDD a);
/**
* SAVING:
* use sylvan_serialize_add on every BDD you want to store
@ -345,74 +247,25 @@ size_t sylvan_nodecount(BDD a);
* use sylvan_serialize_reset to free all allocated structures
* use sylvan_serialize_totext to write a textual list of tuples of all BDDs.
* format: [(<key>,<level>,<key_low>,<key_high>,<complement_high>),...]
*
* for the old sylvan_print functions, use sylvan_serialize_totext
*/
size_t sylvan_serialize_add(BDD bdd);
size_t sylvan_serialize_get(BDD bdd);
BDD sylvan_serialize_get_reversed(size_t value);
void sylvan_serialize_reset();
void sylvan_serialize_reset(void);
void sylvan_serialize_totext(FILE *out);
void sylvan_serialize_tofile(FILE *out);
void sylvan_serialize_fromfile(FILE *in);
/**
* For debugging
* if (part of) the BDD is not 'marked' in the nodes table, assertion fails
* if the BDD is not ordered, returns 0
* if nicely ordered, returns 1
*/
TASK_DECL_1(int, sylvan_test_isbdd, BDD);
#define sylvan_test_isbdd(bdd) CALL(sylvan_test_isbdd, bdd)
/* Infrastructure for internal markings */
typedef struct bdd_refs_internal
{
size_t r_size, r_count;
size_t s_size, s_count;
BDD *results;
Task **spawns;
} *bdd_refs_internal_t;
extern DECLARE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t);
static inline BDD
bdd_refs_push(BDD bdd)
static void __attribute__((unused))
sylvan_fprint(FILE *f, BDD bdd)
{
LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t);
if (bdd_refs_key->r_count >= bdd_refs_key->r_size) {
bdd_refs_key->r_size *= 2;
bdd_refs_key->results = (BDD*)realloc(bdd_refs_key->results, sizeof(BDD) * bdd_refs_key->r_size);
}
bdd_refs_key->results[bdd_refs_key->r_count++] = bdd;
return bdd;
sylvan_serialize_reset();
size_t v = sylvan_serialize_add(bdd);
fprintf(f, "%s%zu,", bdd&sylvan_complement?"!":"", v);
sylvan_serialize_totext(f);
}
static inline void
bdd_refs_pop(int amount)
{
LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t);
bdd_refs_key->r_count-=amount;
}
static inline void
bdd_refs_spawn(Task *t)
{
LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t);
if (bdd_refs_key->s_count >= bdd_refs_key->s_size) {
bdd_refs_key->s_size *= 2;
bdd_refs_key->spawns = (Task**)realloc(bdd_refs_key->spawns, sizeof(Task*) * bdd_refs_key->s_size);
}
bdd_refs_key->spawns[bdd_refs_key->s_count++] = t;
}
static inline BDD
bdd_refs_sync(BDD result)
{
LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t);
bdd_refs_key->s_count--;
return result;
}
#define sylvan_print(dd) sylvan_fprint(stdout, dd)
#include "sylvan_bdd_storm.h"

87
resources/3rdparty/sylvan/src/sylvan_bdd_int.h

@ -1,87 +0,0 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internals for BDDs
*/
#ifndef SYLVAN_BDD_INT_H
#define SYLVAN_BDD_INT_H
/**
* Complement handling macros
*/
#define BDD_HASMARK(s) (s&sylvan_complement?1:0)
#define BDD_TOGGLEMARK(s) (s^sylvan_complement)
#define BDD_STRIPMARK(s) (s&~sylvan_complement)
#define BDD_TRANSFERMARK(from, to) (to ^ (from & sylvan_complement))
// Equal under mark
#define BDD_EQUALM(a, b) ((((a)^(b))&(~sylvan_complement))==0)
/**
* BDD node structure
*/
typedef struct __attribute__((packed)) bddnode {
uint64_t a, b;
} * bddnode_t; // 16 bytes
#define BDD_GETNODE(bdd) ((bddnode_t)llmsset_index_to_ptr(nodes, bdd&0x000000ffffffffff))
static inline int __attribute__((unused))
bddnode_getcomp(bddnode_t n)
{
return n->a & 0x8000000000000000 ? 1 : 0;
}
static inline uint64_t
bddnode_getlow(bddnode_t n)
{
return n->b & 0x000000ffffffffff; // 40 bits
}
static inline uint64_t
bddnode_gethigh(bddnode_t n)
{
return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first
}
static inline uint32_t
bddnode_getvariable(bddnode_t n)
{
return (uint32_t)(n->b >> 40);
}
static inline int
bddnode_getmark(bddnode_t n)
{
return n->a & 0x2000000000000000 ? 1 : 0;
}
static inline void
bddnode_setmark(bddnode_t n, int mark)
{
if (mark) n->a |= 0x2000000000000000;
else n->a &= 0xdfffffffffffffff;
}
static inline void
bddnode_makenode(bddnode_t n, uint32_t var, uint64_t low, uint64_t high)
{
n->a = high;
n->b = ((uint64_t)var)<<40 | low;
}
#endif

6
resources/3rdparty/sylvan/src/sylvan_bdd_storm.c

@ -23,7 +23,7 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
}
sylvan_ref(res);
BDD res1 = sylvan_ite(sylvan_ithvar(bddnode_getvariable(BDD_GETNODE(variables))), sylvan_false, res);
BDD res1 = sylvan_ite(sylvan_ithvar(bddnode_getvariable(MTBDD_GETNODE(variables))), sylvan_false, res);
if (res1 == sylvan_invalid) {
sylvan_deref(res);
return sylvan_invalid;
@ -39,10 +39,10 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
return a;
}
/* From now on, f and cube are non-constant. */
bddnode_t na = BDD_GETNODE(a);
bddnode_t na = MTBDD_GETNODE(a);
BDDVAR level = bddnode_getvariable(na);
bddnode_t nv = BDD_GETNODE(variables);
bddnode_t nv = MTBDD_GETNODE(variables);
BDDVAR vv = bddnode_getvariable(nv);
//printf("a level %i and cube level %i\n", level, vv);

101
resources/3rdparty/sylvan/src/sylvan_cache.c

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -43,6 +44,18 @@
* Therefore, size 2^N = 36*(2^N) bytes.
*/
struct __attribute__((packed)) cache6_entry {
uint64_t a;
uint64_t b;
uint64_t c;
uint64_t res;
uint64_t d;
uint64_t e;
uint64_t f;
uint64_t res2;
};
typedef struct cache6_entry *cache6_entry_t;
struct __attribute__((packed)) cache_entry {
uint64_t a;
uint64_t b;
@ -83,6 +96,84 @@ cache_hash(uint64_t a, uint64_t b, uint64_t c)
return hash;
}
static uint64_t
cache_hash6(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f)
{
const uint64_t prime = 1099511628211;
uint64_t hash = 14695981039346656037LLU;
hash = (hash ^ (a>>32));
hash = (hash ^ a) * prime;
hash = (hash ^ b) * prime;
hash = (hash ^ c) * prime;
hash = (hash ^ d) * prime;
hash = (hash ^ e) * prime;
hash = (hash ^ f) * prime;
return hash;
}
int
cache_get6(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f, uint64_t *res1, uint64_t *res2)
{
const uint64_t hash = cache_hash6(a, b, c, d, e, f);
#if CACHE_MASK
volatile uint64_t *s_bucket = (uint64_t*)cache_status + (hash & cache_mask)/2;
cache6_entry_t bucket = (cache6_entry_t)cache_table + (hash & cache_mask)/2;
#else
volatile uint64_t *s_bucket = (uint64_t*)cache_status + (hash % cache_size)/2;
cache6_entry_t bucket = (cache6_entry_t)cache_table + (hash % cache_size)/2;
#endif
const uint64_t s = *s_bucket;
compiler_barrier();
// abort if locked or second part of 2-part entry or if different hash
uint64_t x = ((hash>>32) & 0x7fff0000) | 0x04000000;
x = x | (x<<32);
if ((s & 0xffff0000ffff0000) != x) return 0;
// abort if key different
if (bucket->a != a || bucket->b != b || bucket->c != c) return 0;
if (bucket->d != d || bucket->e != e || bucket->f != f) return 0;
*res1 = bucket->res;
if (res2) *res2 = bucket->res2;
compiler_barrier();
// abort if status field changed after compiler_barrier()
return *s_bucket == s ? 1 : 0;
}
int
cache_put6(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f, uint64_t res1, uint64_t res2)
{
const uint64_t hash = cache_hash6(a, b, c, d, e, f);
#if CACHE_MASK
volatile uint64_t *s_bucket = (uint64_t*)cache_status + (hash & cache_mask)/2;
cache6_entry_t bucket = (cache6_entry_t)cache_table + (hash & cache_mask)/2;
#else
volatile uint64_t *s_bucket = (uint64_t*)cache_status + (hash % cache_size)/2;
cache6_entry_t bucket = (cache6_entry_t)cache_table + (hash % cache_size)/2;
#endif
const uint64_t s = *s_bucket;
// abort if locked
if (s & 0x8000000080000000LL) return 0;
// create new
uint64_t new_s = ((hash>>32) & 0x7fff0000) | 0x04000000;
new_s |= (new_s<<32);
new_s |= (((s>>32)+1)&0xffff)<<32;
new_s |= (s+1)&0xffff;
// use cas to claim bucket
if (!cas(s_bucket, s, new_s | 0x8000000080000000LL)) return 0;
// cas succesful: write data
bucket->a = a;
bucket->b = b;
bucket->c = c;
bucket->d = d;
bucket->e = e;
bucket->f = f;
bucket->res = res1;
bucket->res2 = res2;
compiler_barrier();
// after compiler_barrier(), unlock status field
*s_bucket = new_s;
return 1;
}
int
cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
{
@ -96,10 +187,10 @@ cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res)
#endif
const uint32_t s = *s_bucket;
compiler_barrier();
// abort if locked
if (s & 0x80000000) return 0;
// abort if locked or if part of a 2-part cache entry
if (s & 0xc0000000) return 0;
// abort if different hash
if ((s ^ (hash>>32)) & 0x7fff0000) return 0;
if ((s ^ (hash>>32)) & 0x3fff0000) return 0;
// abort if key different
if (bucket->a != a || bucket->b != b || bucket->c != c) return 0;
*res = bucket->res;
@ -123,7 +214,7 @@ cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res)
// abort if locked
if (s & 0x80000000) return 0;
// abort if hash identical -> no: in iscasmc this occasionally causes timeouts?!
const uint32_t hash_mask = (hash>>32) & 0x7fff0000;
const uint32_t hash_mask = (hash>>32) & 0x3fff0000;
// if ((s & 0x7fff0000) == hash_mask) return 0;
// use cas to claim bucket
const uint32_t new_s = ((s+1) & 0x0000ffff) | hash_mask;

37
resources/3rdparty/sylvan/src/sylvan_cache.h

@ -1,7 +1,24 @@
#include <stdint.h> // for uint32_t etc
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sylvan_config.h>
#include <stdint.h> // for uint32_t etc
#ifndef CACHE_H
#define CACHE_H
@ -40,10 +57,16 @@ typedef struct cache_entry *cache_entry_t;
int cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res);
int cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res);
/**
* Primitives for cache get/put that use two buckets
*/
int cache_get6(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f, uint64_t *res1, uint64_t *res2);
int cache_put6(uint64_t a, uint64_t b, uint64_t c, uint64_t d, uint64_t e, uint64_t f, uint64_t res1, uint64_t res2);
/**
* Helper function to get next 'operation id' (during initialization of modules)
*/
uint64_t cache_next_opid();
uint64_t cache_next_opid(void);
/**
* dd must be MTBDD, d2/d3 can be anything
@ -94,17 +117,17 @@ cache_put4(uint64_t opid, uint64_t dd, uint64_t dd2, uint64_t dd3, uint64_t dd4,
void cache_create(size_t _cache_size, size_t _max_size);
void cache_free();
void cache_free(void);
void cache_clear();
void cache_clear(void);
void cache_setsize(size_t size);
size_t cache_getused();
size_t cache_getused(void);
size_t cache_getsize();
size_t cache_getsize(void);
size_t cache_getmaxsize();
size_t cache_getmaxsize(void);
#ifdef __cplusplus
}

415
resources/3rdparty/sylvan/src/sylvan_common.c

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,166 +15,193 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan.h>
#include <sylvan_common.h>
#include <sylvan_int.h>
#ifndef cas
#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new)))
#endif
/**
* Static global variables
* Implementation of garbage collection
*/
llmsset_t nodes;
/**
* Retrieve nodes
* Whether garbage collection is enabled or not.
*/
static int gc_enabled = 1;
llmsset_t
__sylvan_get_internal_data()
/**
* Enable garbage collection (both automatic and manual).
*/
void
sylvan_gc_enable()
{
return nodes;
gc_enabled = 1;
}
/**
* Calculate table usage (in parallel)
* Disable garbage collection (both automatic and manual).
*/
VOID_TASK_IMPL_2(sylvan_table_usage, size_t*, filled, size_t*, total)
void
sylvan_gc_disable()
{
size_t tot = llmsset_get_size(nodes);
if (filled != NULL) *filled = llmsset_count_marked(nodes);
if (total != NULL) *total = tot;
gc_enabled = 0;
}
/**
* Implementation of garbage collection
* This variable is used for a cas flag so only one gc runs at one time
*/
static int gc_enabled = 1;
static volatile int gc; // variable used in cas switch to ensure only one gc at a time
static volatile int gc;
struct reg_gc_mark_entry
/**
* Structures for the marking mechanisms
*/
typedef struct gc_hook_entry
{
struct reg_gc_mark_entry *next;
gc_mark_cb cb;
int order;
};
struct gc_hook_entry *next;
gc_hook_cb cb;
} * gc_hook_entry_t;
static struct reg_gc_mark_entry *gc_mark_register = NULL;
static gc_hook_entry_t mark_list;
static gc_hook_entry_t pregc_list;
static gc_hook_entry_t postgc_list;
static gc_hook_cb main_hook;
void
sylvan_gc_add_mark(int order, gc_mark_cb cb)
sylvan_gc_hook_pregc(gc_hook_cb callback)
{
struct reg_gc_mark_entry *e = (struct reg_gc_mark_entry*)malloc(sizeof(struct reg_gc_mark_entry));
e->cb = cb;
e->order = order;
if (gc_mark_register == NULL || gc_mark_register->order>order) {
e->next = gc_mark_register;
gc_mark_register = e;
return;
}
struct reg_gc_mark_entry *f = gc_mark_register;
for (;;) {
if (f->next == NULL) {
e->next = NULL;
f->next = e;
return;
}
if (f->next->order > order) {
e->next = f->next;
f->next = e;
return;
}
f = f->next;
}
gc_hook_entry_t e = (gc_hook_entry_t)malloc(sizeof(struct gc_hook_entry));
e->cb = callback;
e->next = pregc_list;
pregc_list = e;
}
static gc_hook_cb gc_hook;
void
sylvan_gc_set_hook(gc_hook_cb new_hook)
sylvan_gc_hook_postgc(gc_hook_cb callback)
{
gc_hook = new_hook;
gc_hook_entry_t e = (gc_hook_entry_t)malloc(sizeof(struct gc_hook_entry));
e->cb = callback;
e->next = postgc_list;
postgc_list = e;
}
void
sylvan_gc_enable()
sylvan_gc_add_mark(gc_hook_cb callback)
{
gc_enabled = 1;
gc_hook_entry_t e = (gc_hook_entry_t)malloc(sizeof(struct gc_hook_entry));
e->cb = callback;
e->next = mark_list;
mark_list = e;
}
void
sylvan_gc_disable()
sylvan_gc_hook_main(gc_hook_cb callback)
{
gc_enabled = 0;
main_hook = callback;
}
/* Mark hook for cache */
VOID_TASK_0(sylvan_gc_mark_cache)
/**
* Clear the operation cache.
*/
VOID_TASK_IMPL_0(sylvan_clear_cache)
{
/* We simply clear the cache.
* Alternatively, we could implement for example some strategy
* where part of the cache is cleared and part is marked
*/
cache_clear();
cache_clear();
}
/* Default hook */
/**
* Clear the nodes table and mark all referenced nodes.
*
* This does not clear the hash data or rehash the nodes.
* After marking, the "destroy" hooks are called for all unmarked nodes,
* for example to free data of custom MTBDD leaves.
*/
VOID_TASK_IMPL_0(sylvan_clear_and_mark)
{
llmsset_clear_data(nodes);
for (gc_hook_entry_t e = mark_list; e != NULL; e = e->next) {
WRAP(e->cb);
}
llmsset_destroy_unmarked(nodes);
}
/**
* Clear the hash array of the nodes table and rehash all marked buckets.
*/
VOID_TASK_IMPL_0(sylvan_rehash_all)
{
// clear hash array
llmsset_clear_hashes(nodes);
// rehash marked nodes
if (llmsset_rehash(nodes) != 0) {
fprintf(stderr, "sylvan_gc_rehash error: not all nodes could be rehashed!\n");
exit(1);
}
}
/**
* Logic for resizing the nodes table and operation cache
*/
/**
* Helper routine to compute the next size....
*/
size_t
next_size(size_t n)
next_size(size_t current_size)
{
#if SYLVAN_SIZE_FIBONACCI
size_t f1=1, f2=1;
for (;;) {
f2 += f1;
if (f2 > n) return f2;
if (f2 > current_size) return f2;
f1 += f2;
if (f1 > n) return f1;
if (f1 > current_size) return f1;
}
#else
return n*2;
return current_size*2;
#endif
}
/**
* Resizing heuristic that always doubles the tables when running gc (until max).
* The nodes table and operation cache are both resized until their maximum size.
*/
VOID_TASK_IMPL_0(sylvan_gc_aggressive_resize)
{
/**
* Always resize when gc called
*/
size_t max_size = llmsset_get_max_size(nodes);
size_t size = llmsset_get_size(nodes);
if (size < max_size) {
size_t new_size = next_size(size);
if (new_size > max_size) new_size = max_size;
size_t nodes_size = llmsset_get_size(nodes);
size_t nodes_max = llmsset_get_max_size(nodes);
if (nodes_size < nodes_max) {
size_t new_size = next_size(nodes_size);
if (new_size > nodes_max) new_size = nodes_max;
llmsset_set_size(nodes, new_size);
size_t cache_size = cache_getsize();
size_t cache_max = cache_getmaxsize();
if (cache_size < cache_max) {
new_size = next_size(cache_size);
if (new_size > cache_max) new_size = cache_max;
cache_setsize(new_size);
}
}
size_t cache_size = cache_getsize();
size_t cache_max = cache_getmaxsize();
if (cache_size < cache_max) {
size_t new_size = next_size(cache_size);
if (new_size > cache_max) new_size = cache_max;
cache_setsize(new_size);
}
}
VOID_TASK_IMPL_0(sylvan_gc_default_hook)
/**
* Resizing heuristic that only resizes when more than 50% is marked.
* The operation cache is only resized if the nodes table is resized.
*/
VOID_TASK_IMPL_0(sylvan_gc_normal_resize)
{
/**
* Default behavior:
* if we can resize the nodes set, and if we use more than 50%, then increase size
*/
size_t max_size = llmsset_get_max_size(nodes);
size_t size = llmsset_get_size(nodes);
if (size < max_size) {
size_t nodes_size = llmsset_get_size(nodes);
size_t nodes_max = llmsset_get_max_size(nodes);
if (nodes_size < nodes_max) {
size_t marked = llmsset_count_marked(nodes);
if (marked*2 > size) {
size_t new_size = next_size(size);
if (new_size > max_size) new_size = max_size;
if (marked*2 > nodes_size) {
size_t new_size = next_size(nodes_size);
if (new_size > nodes_max) new_size = nodes_max;
llmsset_set_size(nodes, new_size);
// also increase the operation cache
size_t cache_size = cache_getsize();
size_t cache_max = cache_getmaxsize();
if (cache_size < cache_max) {
@ -185,82 +213,163 @@ VOID_TASK_IMPL_0(sylvan_gc_default_hook)
}
}
VOID_TASK_0(sylvan_gc_call_hook)
/**
* Actual implementation of garbage collection
*/
VOID_TASK_0(sylvan_gc_go)
{
// call hook function (resizing, reordering, etc)
WRAP(gc_hook);
sylvan_stats_count(SYLVAN_GC_COUNT);
sylvan_timer_start(SYLVAN_GC);
// call pre gc hooks
for (gc_hook_entry_t e = pregc_list; e != NULL; e = e->next) {
WRAP(e->cb);
}
/*
* This simply clears the cache.
* Alternatively, we could implement for example some strategy
* where part of the cache is cleared and part is marked
*/
CALL(sylvan_clear_cache);
CALL(sylvan_clear_and_mark);
// call hooks for resizing and all that
WRAP(main_hook);
CALL(sylvan_rehash_all);
// call post gc hooks
for (gc_hook_entry_t e = postgc_list; e != NULL; e = e->next) {
WRAP(e->cb);
}
sylvan_timer_stop(SYLVAN_GC);
}
VOID_TASK_0(sylvan_gc_rehash)
/**
* Perform garbage collection
*/
VOID_TASK_IMPL_0(sylvan_gc)
{
// rehash marked nodes
llmsset_rehash(nodes);
if (gc_enabled) {
if (cas(&gc, 0, 1)) {
NEWFRAME(sylvan_gc_go);
gc = 0;
} else {
/* wait for new frame to appear */
while (*(Task* volatile*)&(lace_newframe.t) == 0) {}
lace_yield(__lace_worker, __lace_dq_head);
}
}
}
VOID_TASK_0(sylvan_gc_destroy_unmarked)
/**
* The unique table
*/
llmsset_t nodes;
static size_t table_min = 0, table_max = 0, cache_min = 0, cache_max = 0;
static int
is_power_of_two(size_t size)
{
llmsset_destroy_unmarked(nodes);
return __builtin_popcount(size) == 1 ? 1 : 0;
}
VOID_TASK_0(sylvan_gc_go)
void
sylvan_set_sizes(size_t min_tablesize, size_t max_tablesize, size_t min_cachesize, size_t max_cachesize)
{
sylvan_stats_count(SYLVAN_GC_COUNT);
sylvan_timer_start(SYLVAN_GC);
/* Some sanity checks */
if (min_tablesize > max_tablesize) min_tablesize = max_tablesize;
if (min_cachesize > max_cachesize) min_cachesize = max_cachesize;
// clear hash array
llmsset_clear(nodes);
if (!is_power_of_two(min_tablesize) || !is_power_of_two(max_tablesize) ||
!is_power_of_two(min_cachesize) || !is_power_of_two(max_cachesize)) {
fprintf(stderr, "sylvan_set_sizes error: parameters not powers of 2!\n");
exit(1);
}
// call mark functions, hook and rehash
struct reg_gc_mark_entry *e = gc_mark_register;
while (e != NULL) {
WRAP(e->cb);
e = e->next;
if (max_tablesize > 0x0000040000000000) {
fprintf(stderr, "sylvan_set_sizes error: tablesize must be <= 42 bits!\n");
exit(1);
}
sylvan_timer_stop(SYLVAN_GC);
table_min = min_tablesize;
table_max = max_tablesize;
cache_min = min_cachesize;
cache_max = max_cachesize;
}
/* Perform garbage collection */
VOID_TASK_IMPL_0(sylvan_gc)
void
sylvan_set_limits(size_t memorycap, int table_ratio, int initial_ratio)
{
if (!gc_enabled) return;
if (cas(&gc, 0, 1)) {
NEWFRAME(sylvan_gc_go);
gc = 0;
if (table_ratio > 10 && table_ratio < 10) {
fprintf(stderr, "sylvan_set_limits: table_ratio unreasonable (between -10 and 10)\n");
exit(1);
}
size_t max_t = 1;
size_t max_c = 1;
if (table_ratio > 0) {
max_t <<= table_ratio;
} else {
/* wait for new frame to appear */
while (*(Task* volatile*)&(lace_newframe.t) == 0) {}
lace_yield(__lace_worker, __lace_dq_head);
max_c <<= -table_ratio;
}
size_t cur = max_t * 24 + max_c * 36;
if (cur > memorycap) {
fprintf(stderr, "sylvan_set_limits: memory cap incompatible with requested table ratio\n");
}
while (2*cur < memorycap && max_t < 0x0000040000000000) {
max_t *= 2;
max_c *= 2;
cur *= 2;
}
if (initial_ratio < 0) {
fprintf(stderr, "sylvan_set_limits: initial_ratio unreasonable (may not be negative)\n");
exit(1);
}
size_t min_t = max_t, min_c = max_c;
while (initial_ratio > 0 && min_t > 0x1000 && min_c > 0x1000) {
min_t >>= 1;
min_c >>= 1;
initial_ratio--;
}
table_min = min_t;
table_max = max_t;
cache_min = min_c;
cache_max = max_c;
}
/**
* Package init and quit functions
* Initializes Sylvan.
*/
void
sylvan_init_package(size_t tablesize, size_t maxsize, size_t cachesize, size_t max_cachesize)
sylvan_init_package(void)
{
if (tablesize > maxsize) tablesize = maxsize;
if (cachesize > max_cachesize) cachesize = max_cachesize;
if (maxsize > 0x000003ffffffffff) {
fprintf(stderr, "sylvan_init_package error: tablesize must be <= 42 bits!\n");
if (table_max == 0) {
fprintf(stderr, "sylvan_init_package error: table sizes not set (sylvan_set_sizes or sylvan_set_limits)!");
exit(1);
}
nodes = llmsset_create(tablesize, maxsize);
cache_create(cachesize, max_cachesize);
/* Create tables */
nodes = llmsset_create(table_min, table_max);
cache_create(cache_min, cache_max);
/* Initialize garbage collection */
gc = 0;
#if SYLVAN_AGGRESSIVE_RESIZE
gc_hook = TASK(sylvan_gc_aggressive_resize);
main_hook = TASK(sylvan_gc_aggressive_resize);
#else
gc_hook = TASK(sylvan_gc_default_hook);
main_hook = TASK(sylvan_gc_normal_resize);
#endif
sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_cache));
sylvan_gc_add_mark(19, TASK(sylvan_gc_destroy_unmarked));
sylvan_gc_add_mark(20, TASK(sylvan_gc_call_hook));
sylvan_gc_add_mark(30, TASK(sylvan_gc_rehash));
LACE_ME;
sylvan_stats_init();
@ -293,12 +402,36 @@ sylvan_quit()
free(e);
}
while (gc_mark_register != NULL) {
struct reg_gc_mark_entry *e = gc_mark_register;
gc_mark_register = e->next;
while (pregc_list != NULL) {
gc_hook_entry_t e = pregc_list;
pregc_list = e->next;
free(e);
}
while (postgc_list != NULL) {
gc_hook_entry_t e = postgc_list;
postgc_list = e->next;
free(e);
}
while (mark_list != NULL) {
gc_hook_entry_t e = mark_list;
mark_list = e->next;
free(e);
}
cache_free();
llmsset_free(nodes);
}
/**
* Calculate table usage (in parallel)
*/
VOID_TASK_IMPL_2(sylvan_table_usage, size_t*, filled, size_t*, total)
{
size_t tot = llmsset_get_size(nodes);
if (filled != NULL) *filled = llmsset_count_marked(nodes);
if (total != NULL) *total = tot;
}

242
resources/3rdparty/sylvan/src/sylvan_common.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,63 +22,194 @@
extern "C" {
#endif /* __cplusplus */
/* Garbage collection test task - t */
#define sylvan_gc_test() YIELD_NEWFRAME()
/**
* Initialize the Sylvan parallel decision diagrams package.
*
* First, Sylvan must know how big the nodes table and cache may be.
* Either use sylvan_set_sizes to explicitly set the table sizes, or use sylvan_set_limits
* to let Sylvan compute the sizes for you.
*
* Then, call sylvan_init_package. This allocates the tables and other support structures.
* Sylvan allocates virtual memory to accomodate the maximum sizes of both tables.
* Initially, Sylvan only uses the minimum sizes.
* During garbage collection, table sizes may be doubled until the maximum size is reached.
*
* Then, call initialization functions for the MTBDD/LDD modules like sylvan_init_mtbdd
* and sylvan_init_ldd.
*
* Memory usage:
* Every node requires 24 bytes memory. (16 bytes data + 8 bytes table overhead)
* Every operation cache entry requires 36 bytes memory. (32 bytes data + 4 bytes table overhead)
*/
void sylvan_init_package(void);
/**
* Explicitly set the sizes of the nodes table and the operation cache.
* The sizes are in bytes, but they must be powers of two.
* The minimum size is the size initially used.
* The maximum size is the size allocated in virtual memory.
*/
void sylvan_set_sizes(size_t min_tablesize, size_t max_tablesize, size_t min_cachesize, size_t max_cachesize);
// BDD operations
#define CACHE_BDD_ITE (0LL<<40)
#define CACHE_BDD_AND (1LL<<40)
#define CACHE_BDD_XOR (2LL<<40)
#define CACHE_BDD_EXISTS (3LL<<40)
#define CACHE_BDD_AND_EXISTS (4LL<<40)
#define CACHE_BDD_RELNEXT (5LL<<40)
#define CACHE_BDD_RELPREV (6LL<<40)
#define CACHE_BDD_SATCOUNT (7LL<<40)
#define CACHE_BDD_COMPOSE (8LL<<40)
#define CACHE_BDD_RESTRICT (9LL<<40)
#define CACHE_BDD_CONSTRAIN (10LL<<40)
#define CACHE_BDD_CLOSURE (11LL<<40)
#define CACHE_BDD_ISBDD (12LL<<40)
#define CACHE_BDD_SUPPORT (13LL<<40)
#define CACHE_BDD_PATHCOUNT (14LL<<40)
// MDD operations
#define CACHE_MDD_RELPROD (20LL<<40)
#define CACHE_MDD_MINUS (21LL<<40)
#define CACHE_MDD_UNION (22LL<<40)
#define CACHE_MDD_INTERSECT (23LL<<40)
#define CACHE_MDD_PROJECT (24LL<<40)
#define CACHE_MDD_JOIN (25LL<<40)
#define CACHE_MDD_MATCH (26LL<<40)
#define CACHE_MDD_RELPREV (27LL<<40)
#define CACHE_MDD_SATCOUNT (28LL<<40)
#define CACHE_MDD_SATCOUNTL1 (29LL<<40)
#define CACHE_MDD_SATCOUNTL2 (30LL<<40)
// MTBDD operations
#define CACHE_MTBDD_APPLY (40LL<<40)
#define CACHE_MTBDD_UAPPLY (41LL<<40)
#define CACHE_MTBDD_ABSTRACT (42LL<<40)
#define CACHE_MTBDD_ITE (43LL<<40)
#define CACHE_MTBDD_AND_EXISTS (44LL<<40)
#define CACHE_MTBDD_SUPPORT (45LL<<40)
#define CACHE_MTBDD_COMPOSE (46LL<<40)
#define CACHE_MTBDD_EQUAL_NORM (47LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_REL (48LL<<40)
#define CACHE_MTBDD_MINIMUM (49LL<<40)
#define CACHE_MTBDD_MAXIMUM (50LL<<40)
#define CACHE_MTBDD_LEQ (51LL<<40)
#define CACHE_MTBDD_LESS (52LL<<40)
#define CACHE_MTBDD_GEQ (53LL<<40)
#define CACHE_MTBDD_GREATER (54LL<<40)
#define CACHE_MTBDD_NONZERO_COUNT (55LL<<40)
/**
* Registration of quit functions
*/
typedef void (*quit_cb)();
/**
* Implicitly compute and set the sizes of the nodes table and the operation cache.
*
* This function computes max_tablesize and max_cachesize to fit the memory cap.
* The memory cap is in bytes.
*
* The parameter table_ratio controls the ratio between the nodes table and the cache.
* For the value 0, both tables are of the same size.
* For values 1, 2, 3 ... the nodes table will be 2x, 4x, 8x ... as big as the cache
* For values -1, -2, -3 ... the cache will be 2x, 4x, 8x ... as big as the nodes table
*
* The parameter initial_ratio controls how much smaller the initial table sizes are.
* For values of 1, 2, 3, 4 the tables will initially be 2, 4, 8, 16 times smaller.
*/
void sylvan_set_limits(size_t memory_cap, int table_ratio, int initial_ratio);
/**
* Frees all Sylvan data (also calls the quit() functions of BDD/LDD parts)
*/
void sylvan_quit(void);
/**
* Registers a hook callback called during sylvan_quit()
*/
typedef void (*quit_cb)(void);
void sylvan_register_quit(quit_cb cb);
/**
* Return number of occupied buckets in nodes table and total number of buckets.
*/
VOID_TASK_DECL_2(sylvan_table_usage, size_t*, size_t*);
#define sylvan_table_usage(filled, total) (CALL(sylvan_table_usage, filled, total))
/**
* GARBAGE COLLECTION
*
* Garbage collection is performed in a new Lace frame, interrupting all ongoing work
* until garbage collection is completed.
*
* By default, garbage collection is triggered when no new nodes can be added to the nodes table.
* This is detected when there are no more available buckets in the bounded probe sequence.
* Garbage collection can also be triggered manually with sylvan_gc()
*
* Garbage collection procedure:
* 1) All installed pre_gc hooks are called.
* See sylvan_gc_hook_pre to add hooks.
* 2) The operation cache is cleared.
* 3) The nodes table (data part) is cleared.
* 4) All nodes are marked (to be rehashed) using the various marking callbacks.
* See sylvan_gc_add_mark to add marking callbacks.
* Afterwards, the ondead hook is called for all now-dead nodes with the custom flag set.
* 5) The main gc hook is called. The function of this hook is to perform resizing.
* The default implementation doubles the nodes table and operation cache sizes.
* See sylvan_gc_hook_main to set the hook.
* 5) The nodes table (hash part) is cleared.
* 6) All marked nodes are rehashed.
* 7) All installed post_gc hooks are called.
* See sylvan_gc_hook_post to add hooks.
*
* For parts of the garbage collection process, specific methods exist.
* - sylvan_clear_cache() clears the operation cache (step 2)
* - sylvan_clear_and_mark() performs steps 3 and 4.
* - sylvan_rehash_all() performs steps 5 and 6.
*/
/**
* Trigger garbage collection manually.
*/
/**
* Trigger garbage collection manually.
*/
VOID_TASK_DECL_0(sylvan_gc);
#define sylvan_gc() (CALL(sylvan_gc))
/**
* Enable or disable garbage collection.
*
* This affects both automatic and manual garbage collection, i.e.,
* calling sylvan_gc() while garbage collection is disabled does not have any effect.
* If no new nodes can be added, Sylvan will write an error and abort.
*/
void sylvan_gc_enable(void);
void sylvan_gc_disable(void);
/**
* Test if garbage collection must happen now.
* This is just a call to the Lace framework to see if NEWFRAME has been used.
* Before calling this, make sure all used BDDs are referenced.
*/
#define sylvan_gc_test() YIELD_NEWFRAME()
/**
* Clear the operation cache.
*/
VOID_TASK_DECL_0(sylvan_clear_cache);
#define sylvan_clear_cache() CALL(sylvan_clear_cache)
/**
* Clear the nodes table (data part) and mark all nodes with the marking mechanisms.
*/
VOID_TASK_DECL_0(sylvan_clear_and_mark);
#define sylvan_clear_and_mark() CALL(sylvan_clear_and_mark)
/**
* Clear the nodes table (hash part) and rehash all marked nodes.
*/
VOID_TASK_DECL_0(sylvan_rehash_all);
#define sylvan_rehash_all() CALL(sylvan_rehash_all)
/**
* Callback type
*/
LACE_TYPEDEF_CB(void, gc_hook_cb);
/**
* Add a hook that is called before garbage collection begins.
*/
void sylvan_gc_hook_pregc(gc_hook_cb callback);
/**
* Add a hook that is called after garbage collection is finished.
*/
void sylvan_gc_hook_postgc(gc_hook_cb callback);
/**
* Replace the hook called between node marking and rehashing.
* Typically, the hook resizes the hash table and operation cache according to some heuristic.
*/
void sylvan_gc_hook_main(gc_hook_cb callback);
/**
* Add a marking mechanism.
*
* The mark_cb callback is called during garbage collection and should call the
* appropriate recursive marking functions for the decision diagram nodes, for example
* mtbdd_gc_mark_rec() for MTBDDs or lddmc_gc_mark_rec() for LDDs.
*
* The sylvan_count_refs() function uses the count_cb callbacks to compute the number
* of references.
*/
void sylvan_gc_add_mark(gc_hook_cb mark_cb);
/**
* One of the hooks for resizing behavior.
* Default if SYLVAN_AGGRESSIVE_RESIZE is set.
* Always double size on gc() until maximum reached.
* Use sylvan_gc_hook_main() to set this heuristic.
*/
VOID_TASK_DECL_0(sylvan_gc_aggressive_resize);
/**
* One of the hooks for resizing behavior.
* Default if SYLVAN_AGGRESSIVE_RESIZE is not set.
* Double size on gc() whenever >50% is used.
* Use sylvan_gc_hook_main() to set this heuristic.
*/
VOID_TASK_DECL_0(sylvan_gc_normal_resize);
#ifdef __cplusplus
}
#endif /* __cplusplus */

0
resources/3rdparty/sylvan/src/sylvan_config.h

211
resources/3rdparty/sylvan/src/sylvan_gmp.c

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,22 +15,16 @@
* limitations under the License.
*/
#include <sylvan_config.h>
#include <sylvan_int.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_common.h>
#include <sylvan_mtbdd_int.h>
#include <sylvan_gmp.h>
#include <gmp.h>
static uint32_t gmp_type;
/**
* helper function for hash
@ -106,8 +101,50 @@ gmp_destroy(uint64_t val)
free((void*)val);
}
static uint32_t gmp_type;
static uint64_t CACHE_GMP_AND_EXISTS;
static char*
gmp_to_str(int comp, uint64_t val, char *buf, size_t buflen)
{
mpq_ptr op = (mpq_ptr)val;
size_t minsize = mpz_sizeinbase(mpq_numref(op), 10) + mpz_sizeinbase (mpq_denref(op), 10) + 3;
if (buflen >= minsize) return mpq_get_str(buf, 10, op);
else return mpq_get_str(NULL, 10, op);
(void)comp;
}
static int
gmp_write_binary(FILE* out, uint64_t val)
{
mpq_ptr op = (mpq_ptr)val;
mpz_t i;
mpz_init(i);
mpq_get_num(i, op);
if (mpz_out_raw(out, i) == 0) return -1;
mpq_get_den(i, op);
if (mpz_out_raw(out, i) == 0) return -1;
mpz_clear(i);
return 0;
}
static int
gmp_read_binary(FILE* in, uint64_t *val)
{
mpq_ptr mres = (mpq_ptr)malloc(sizeof(__mpq_struct));
mpq_init(mres);
mpz_t i;
mpz_init(i);
if (mpz_inp_raw(i, in) == 0) return -1;
mpq_set_num(mres, i);
if (mpz_inp_raw(i, in) == 0) return -1;
mpq_set_den(mres, i);
mpz_clear(i);
*(mpq_ptr*)val = mres;
return 0;
}
/**
* Initialize gmp custom leaves
@ -115,9 +152,15 @@ static uint64_t CACHE_GMP_AND_EXISTS;
void
gmp_init()
{
/* Register custom leaf 3 */
gmp_type = mtbdd_register_custom_leaf(gmp_hash, gmp_equals, gmp_create, gmp_destroy);
CACHE_GMP_AND_EXISTS = cache_next_opid();
/* Register custom leaf */
gmp_type = sylvan_mt_create_type();
sylvan_mt_set_hash(gmp_type, gmp_hash);
sylvan_mt_set_equals(gmp_type, gmp_equals);
sylvan_mt_set_create(gmp_type, gmp_create);
sylvan_mt_set_destroy(gmp_type, gmp_destroy);
sylvan_mt_set_to_str(gmp_type, gmp_to_str);
sylvan_mt_set_write_binary(gmp_type, gmp_write_binary);
sylvan_mt_set_read_binary(gmp_type, gmp_read_binary);
}
/**
@ -144,6 +187,8 @@ TASK_IMPL_2(MTBDD, gmp_op_plus, MTBDD*, pa, MTBDD*, pb)
/* If both leaves, compute plus */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
@ -178,6 +223,8 @@ TASK_IMPL_2(MTBDD, gmp_op_minus, MTBDD*, pa, MTBDD*, pb)
/* If both leaves, compute plus */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
@ -210,6 +257,8 @@ TASK_IMPL_2(MTBDD, gmp_op_times, MTBDD*, pa, MTBDD*, pb)
/* Handle multiplication of leaves */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
@ -244,6 +293,8 @@ TASK_IMPL_2(MTBDD, gmp_op_divide, MTBDD*, pa, MTBDD*, pb)
/* Handle division of leaves */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
@ -275,6 +326,8 @@ TASK_IMPL_2(MTBDD, gmp_op_min, MTBDD*, pa, MTBDD*, pb)
/* Compute result for leaves */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
int cmp = mpq_cmp(ma, mb);
@ -306,6 +359,8 @@ TASK_IMPL_2(MTBDD, gmp_op_max, MTBDD*, pa, MTBDD*, pb)
/* Compute result for leaves */
if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
assert(mtbdd_gettype(a) == gmp_type && mtbdd_gettype(b) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
int cmp = mpq_cmp(ma, mb);
@ -331,6 +386,8 @@ TASK_IMPL_2(MTBDD, gmp_op_neg, MTBDD, dd, size_t, p)
/* Compute result for leaf */
if (mtbdd_isleaf(dd)) {
assert(mtbdd_gettype(dd) == gmp_type);
mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd);
mpq_t mres;
@ -355,6 +412,8 @@ TASK_IMPL_2(MTBDD, gmp_op_abs, MTBDD, dd, size_t, p)
/* Compute result for leaf */
if (mtbdd_isleaf(dd)) {
assert(mtbdd_gettype(dd) == gmp_type);
mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd);
mpq_t mres;
@ -435,6 +494,8 @@ TASK_2(MTBDD, gmp_op_threshold_d, MTBDD, a, size_t, svalue)
/* Compute result */
if (mtbdd_isleaf(a)) {
assert(mtbdd_gettype(a) == gmp_type);
double value = *(double*)&svalue;
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
return mpq_get_d(ma) >= value ? mtbdd_true : mtbdd_false;
@ -453,6 +514,8 @@ TASK_2(MTBDD, gmp_op_strict_threshold_d, MTBDD, a, size_t, svalue)
/* Compute result */
if (mtbdd_isleaf(a)) {
assert(mtbdd_gettype(a) == gmp_type);
double value = *(double*)&svalue;
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
return mpq_get_d(ma) > value ? mtbdd_true : mtbdd_false;
@ -484,6 +547,8 @@ TASK_IMPL_2(MTBDD, gmp_op_threshold, MTBDD*, pa, MTBDD*, pb)
/* Handle comparison of leaves */
if (mtbdd_isleaf(a)) {
assert(mtbdd_gettype(a) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
int cmp = mpq_cmp(ma, mb);
@ -506,6 +571,8 @@ TASK_IMPL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, pa, MTBDD*, pb)
/* Handle comparison of leaves */
if (mtbdd_isleaf(a)) {
assert(mtbdd_gettype(a) == gmp_type);
mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a);
mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b);
int cmp = mpq_cmp(ma, mb);
@ -519,7 +586,7 @@ TASK_IMPL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, pa, MTBDD*, pb)
* Multiply <a> and <b>, and abstract variables <vars> using summation.
* This is similar to the "and_exists" operation in BDDs.
*/
TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v)
TASK_IMPL_3(MTBDD, gmp_and_abstract_plus, MTBDD, a, MTBDD, b, MTBDD, v)
{
/* Check terminal cases */
@ -541,8 +608,14 @@ TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v)
/* Maybe perform garbage collection */
sylvan_gc_test();
/* Count operation */
sylvan_stats_count(MTBDD_AND_ABSTRACT_PLUS);
/* Check cache. Note that we do this now, since the times operator might swap a and b (commutative) */
if (cache_get3(CACHE_GMP_AND_EXISTS, a, b, v, &result)) return result;
if (cache_get3(CACHE_MTBDD_AND_ABSTRACT_PLUS, a, b, v, &result)) {
sylvan_stats_count(MTBDD_AND_ABSTRACT_PLUS_CACHED);
return result;
}
/* Now, v is not a constant, and either a or b is not a constant */
@ -560,7 +633,7 @@ TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v)
if (vv < var) {
/* Recursive, then abstract result */
result = CALL(gmp_and_exists, a, b, node_gethigh(v, nv));
result = CALL(gmp_and_abstract_plus, a, b, node_gethigh(v, nv));
mtbdd_refs_push(result);
result = mtbdd_apply(result, result, TASK(gmp_op_plus));
mtbdd_refs_pop(1);
@ -574,22 +647,112 @@ TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v)
if (vv == var) {
/* Recursive, then abstract result */
mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, node_gethigh(v, nv)));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, node_gethigh(v, nv)));
MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(gmp_and_exists)));
mtbdd_refs_spawn(SPAWN(gmp_and_abstract_plus, ahigh, bhigh, node_gethigh(v, nv)));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_abstract_plus, alow, blow, node_gethigh(v, nv)));
MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(gmp_and_abstract_plus)));
result = CALL(mtbdd_apply, low, high, TASK(gmp_op_plus));
mtbdd_refs_pop(2);
} else /* vv > v */ {
/* Recursive, then create node */
mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, v));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, v));
MTBDD high = mtbdd_refs_sync(SYNC(gmp_and_exists));
mtbdd_refs_spawn(SPAWN(gmp_and_abstract_plus, ahigh, bhigh, v));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_abstract_plus, alow, blow, v));
MTBDD high = mtbdd_refs_sync(SYNC(gmp_and_abstract_plus));
mtbdd_refs_pop(1);
result = mtbdd_makenode(var, low, high);
}
}
/* Store in cache */
cache_put3(CACHE_GMP_AND_EXISTS, a, b, v, result);
if (cache_put3(CACHE_MTBDD_AND_ABSTRACT_PLUS, a, b, v, result)) {
sylvan_stats_count(MTBDD_AND_ABSTRACT_PLUS_CACHEDPUT);
}
return result;
}
/**
* Multiply <a> and <b>, and abstract variables <vars> by taking the maximum.
*/
TASK_IMPL_3(MTBDD, gmp_and_abstract_max, MTBDD, a, MTBDD, b, MTBDD, v)
{
/* Check terminal cases */
/* If v == true, then <vars> is an empty set */
if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(gmp_op_times));
/* Try the times operator on a and b */
MTBDD result = CALL(gmp_op_times, &a, &b);
if (result != mtbdd_invalid) {
/* Times operator successful, store reference (for garbage collection) */
mtbdd_refs_push(result);
/* ... and perform abstraction */
result = mtbdd_abstract(result, v, TASK(gmp_abstract_op_max));
mtbdd_refs_pop(1);
/* Note that the operation cache is used in mtbdd_abstract */
return result;
}
/* Now, v is not a constant, and either a or b is not a constant */
/* Get top variable */
int la = mtbdd_isleaf(a);
int lb = mtbdd_isleaf(b);
mtbddnode_t na = la ? 0 : MTBDD_GETNODE(a);
mtbddnode_t nb = lb ? 0 : MTBDD_GETNODE(b);
uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na);
uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb);
uint32_t var = va < vb ? va : vb;
mtbddnode_t nv = MTBDD_GETNODE(v);
uint32_t vv = mtbddnode_getvariable(nv);
while (vv < var) {
/* we can skip variables, because max(r,r) = r */
v = node_high(v, nv);
if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(gmp_op_times));
nv = MTBDD_GETNODE(v);
vv = mtbddnode_getvariable(nv);
}
/* Maybe perform garbage collection */
sylvan_gc_test();
/* Count operation */
sylvan_stats_count(MTBDD_AND_ABSTRACT_MAX);
/* Check cache. Note that we do this now, since the times operator might swap a and b (commutative) */
if (cache_get3(CACHE_MTBDD_AND_ABSTRACT_MAX, a, b, v, &result)) {
sylvan_stats_count(MTBDD_AND_ABSTRACT_MAX_CACHED);
return result;
}
/* Get cofactors */
MTBDD alow, ahigh, blow, bhigh;
alow = (!la && va == var) ? node_getlow(a, na) : a;
ahigh = (!la && va == var) ? node_gethigh(a, na) : a;
blow = (!lb && vb == var) ? node_getlow(b, nb) : b;
bhigh = (!lb && vb == var) ? node_gethigh(b, nb) : b;
if (vv == var) {
/* Recursive, then abstract result */
mtbdd_refs_spawn(SPAWN(gmp_and_abstract_max, ahigh, bhigh, node_gethigh(v, nv)));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_abstract_max, alow, blow, node_gethigh(v, nv)));
MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(gmp_and_abstract_max)));
result = CALL(mtbdd_apply, low, high, TASK(gmp_op_max));
mtbdd_refs_pop(2);
} else /* vv > v */ {
/* Recursive, then create node */
mtbdd_refs_spawn(SPAWN(gmp_and_abstract_max, ahigh, bhigh, v));
MTBDD low = mtbdd_refs_push(CALL(gmp_and_abstract_max, alow, blow, v));
MTBDD high = mtbdd_refs_sync(SYNC(gmp_and_abstract_max));
mtbdd_refs_pop(1);
result = mtbdd_makenode(var, low, high);
}
/* Store in cache */
if (cache_put3(CACHE_MTBDD_AND_ABSTRACT_MAX, a, b, v, result)) {
sylvan_stats_count(MTBDD_AND_ABSTRACT_MAX_CACHEDPUT);
}
return result;
}

16
resources/3rdparty/sylvan/src/sylvan_gmp.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -31,7 +32,7 @@ extern "C" {
/**
* Initialize GMP custom leaves
*/
void gmp_init();
void gmp_init(void);
/**
* Create MPQ leaf
@ -146,8 +147,15 @@ TASK_DECL_2(MTBDD, gmp_op_abs, MTBDD, size_t);
* Multiply <a> and <b>, and abstract variables <vars> using summation.
* This is similar to the "and_exists" operation in BDDs.
*/
TASK_DECL_3(MTBDD, gmp_and_exists, MTBDD, MTBDD, MTBDD);
#define gmp_and_exists(a, b, vars) CALL(gmp_and_exists, a, b, vars)
TASK_DECL_3(MTBDD, gmp_and_abstract_plus, MTBDD, MTBDD, MTBDD);
#define gmp_and_abstract_plus(a, b, vars) CALL(gmp_and_abstract_plus, a, b, vars)
#define gmp_and_exists gmp_and_abstract_plus
/**
* Multiply <a> and <b>, and abstract variables <vars> by taking the maximum.
*/
TASK_DECL_3(MTBDD, gmp_and_abstract_max, MTBDD, MTBDD, MTBDD);
#define gmp_and_abstract_max(a, b, vars) CALL(gmp_and_abstract_max, a, b, vars)
/**
* Convert to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise;

106
resources/3rdparty/sylvan/src/sylvan_int.h

@ -0,0 +1,106 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Internals of Sylvan
*/
#include <sylvan.h>
#include <sylvan_cache.h>
#include <sylvan_table.h>
#include <sylvan_stats.h>
#ifndef SYLVAN_INT_H
#define SYLVAN_INT_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* Nodes table.
*/
extern llmsset_t nodes;
/**
* Macros for all operation identifiers for the operation cache
*/
// BDD operations
#define CACHE_BDD_ITE (0LL<<40)
#define CACHE_BDD_AND (1LL<<40)
#define CACHE_BDD_XOR (2LL<<40)
#define CACHE_BDD_EXISTS (3LL<<40)
#define CACHE_BDD_PROJECT (4LL<<40)
#define CACHE_BDD_AND_EXISTS (5LL<<40)
#define CACHE_BDD_AND_PROJECT (6LL<<40)
#define CACHE_BDD_RELNEXT (7LL<<40)
#define CACHE_BDD_RELPREV (8LL<<40)
#define CACHE_BDD_SATCOUNT (9LL<<40)
#define CACHE_BDD_COMPOSE (10LL<<40)
#define CACHE_BDD_RESTRICT (11LL<<40)
#define CACHE_BDD_CONSTRAIN (12LL<<40)
#define CACHE_BDD_CLOSURE (13LL<<40)
#define CACHE_BDD_ISBDD (14LL<<40)
#define CACHE_BDD_SUPPORT (15LL<<40)
#define CACHE_BDD_PATHCOUNT (16LL<<40)
// MDD operations
#define CACHE_MDD_RELPROD (20LL<<40)
#define CACHE_MDD_MINUS (21LL<<40)
#define CACHE_MDD_UNION (22LL<<40)
#define CACHE_MDD_INTERSECT (23LL<<40)
#define CACHE_MDD_PROJECT (24LL<<40)
#define CACHE_MDD_JOIN (25LL<<40)
#define CACHE_MDD_MATCH (26LL<<40)
#define CACHE_MDD_RELPREV (27LL<<40)
#define CACHE_MDD_SATCOUNT (28LL<<40)
#define CACHE_MDD_SATCOUNTL1 (29LL<<40)
#define CACHE_MDD_SATCOUNTL2 (30LL<<40)
// MTBDD operations
#define CACHE_MTBDD_APPLY (40LL<<40)
#define CACHE_MTBDD_UAPPLY (41LL<<40)
#define CACHE_MTBDD_ABSTRACT (42LL<<40)
#define CACHE_MTBDD_ITE (43LL<<40)
#define CACHE_MTBDD_AND_ABSTRACT_PLUS (44LL<<40)
#define CACHE_MTBDD_AND_ABSTRACT_MAX (45LL<<40)
#define CACHE_MTBDD_SUPPORT (46LL<<40)
#define CACHE_MTBDD_COMPOSE (47LL<<40)
#define CACHE_MTBDD_EQUAL_NORM (48LL<<40)
#define CACHE_MTBDD_EQUAL_NORM_REL (49LL<<40)
#define CACHE_MTBDD_MINIMUM (50LL<<40)
#define CACHE_MTBDD_MAXIMUM (51LL<<40)
#define CACHE_MTBDD_LEQ (52LL<<40)
#define CACHE_MTBDD_LESS (53LL<<40)
#define CACHE_MTBDD_GEQ (54LL<<40)
#define CACHE_MTBDD_GREATER (55LL<<40)
#define CACHE_MTBDD_EVAL_COMPOSE (56LL<<40)
#define CACHE_MTBDD_NONZERO_COUNT (57LL<<40)
#define CACHE_MTBDD_AND_EXISTS_RF (58LL<<40)
#define CACHE_MTBDD_MINIMUM_RF (59LL<<40)
#define CACHE_MTBDD_MAXIMUM_RF (60LL<<40)
#ifdef __cplusplus
}
#endif /* __cplusplus */
#include <sylvan_mtbdd_int.h>
#include <sylvan_ldd_int.h>
#endif

491
resources/3rdparty/sylvan/src/sylvan_ldd.c
File diff suppressed because it is too large
View File

9
resources/3rdparty/sylvan/src/sylvan_ldd.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2014 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -30,7 +31,7 @@ typedef uint64_t MDD; // Note: low 40 bits only
#define lddmc_true ((MDD)1)
/* Initialize LDD functionality */
void sylvan_init_ldd();
void sylvan_init_ldd(void);
/* Primitives */
MDD lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq);
@ -61,7 +62,7 @@ VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD)
#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd)
/* Return the number of external references */
size_t lddmc_count_refs();
size_t lddmc_count_refs(void);
/* Mark MDD for "notify on dead" */
#define lddmc_notify_ondead(mdd) llmsset_notify_ondead(nodes, mdd)
@ -227,7 +228,7 @@ TASK_DECL_4(MDD, lddmc_compose, MDD, lddmc_compose_cb, void*, int);
size_t lddmc_serialize_add(MDD mdd);
size_t lddmc_serialize_get(MDD mdd);
MDD lddmc_serialize_get_reversed(size_t value);
void lddmc_serialize_reset();
void lddmc_serialize_reset(void);
void lddmc_serialize_totext(FILE *out);
void lddmc_serialize_tofile(FILE *out);
void lddmc_serialize_fromfile(FILE *in);

125
resources/3rdparty/sylvan/src/sylvan_ldd_int.h

@ -0,0 +1,125 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*#include <sylvan_config.h>
#include <assert.h>
#include <inttypes.h>
#include <math.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan.h>
#include <sylvan_int.h>
#include <avl.h>
#include <sylvan_refs.h>
#include <sha2.h>
*/
/**
* Internals for LDDs
*/
#ifndef SYLVAN_LDD_INT_H
#define SYLVAN_LDD_INT_H
/**
* LDD node structure
*
* RmRR RRRR RRRR VVVV | VVVV DcDD DDDD DDDD (little endian - in memory)
* VVVV RRRR RRRR RRRm | DDDD DDDD DDDc VVVV (big endian)
*/
typedef struct __attribute__((packed)) mddnode {
uint64_t a, b;
} * mddnode_t; // 16 bytes
#define LDD_GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd))
static inline uint32_t __attribute__((unused))
mddnode_getvalue(mddnode_t n)
{
return *(uint32_t*)((uint8_t*)n+6);
}
static inline uint8_t __attribute__((unused))
mddnode_getmark(mddnode_t n)
{
return n->a & 1;
}
static inline uint8_t __attribute__((unused))
mddnode_getcopy(mddnode_t n)
{
return n->b & 0x10000 ? 1 : 0;
}
static inline uint64_t __attribute__((unused))
mddnode_getright(mddnode_t n)
{
return (n->a & 0x0000ffffffffffff) >> 1;
}
static inline uint64_t __attribute__((unused))
mddnode_getdown(mddnode_t n)
{
return n->b >> 17;
}
static inline void __attribute__((unused))
mddnode_setvalue(mddnode_t n, uint32_t value)
{
*(uint32_t*)((uint8_t*)n+6) = value;
}
static inline void __attribute__((unused))
mddnode_setmark(mddnode_t n, uint8_t mark)
{
n->a = (n->a & 0xfffffffffffffffe) | (mark ? 1 : 0);
}
static inline void __attribute__((unused))
mddnode_setright(mddnode_t n, uint64_t right)
{
n->a = (n->a & 0xffff000000000001) | (right << 1);
}
static inline void __attribute__((unused))
mddnode_setdown(mddnode_t n, uint64_t down)
{
n->b = (n->b & 0x000000000001ffff) | (down << 17);
}
static inline void __attribute__((unused))
mddnode_make(mddnode_t n, uint32_t value, uint64_t right, uint64_t down)
{
n->a = right << 1;
n->b = down << 17;
*(uint32_t*)((uint8_t*)n+6) = value;
}
static inline void __attribute__((unused))
mddnode_makecopy(mddnode_t n, uint64_t right, uint64_t down)
{
n->a = right << 1;
n->b = ((down << 1) | 1) << 16;
}
#endif

266
resources/3rdparty/sylvan/src/sylvan_mt.c

@ -0,0 +1,266 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sylvan_config.h>
#include <assert.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <sylvan_mt.h>
#include <sylvan_int.h> // for llmsset*, nodes, sylvan_register_quit
/**
* Handling of custom leaves "registry"
*/
typedef struct
{
sylvan_mt_hash_cb hash_cb;
sylvan_mt_equals_cb equals_cb;
sylvan_mt_create_cb create_cb;
sylvan_mt_destroy_cb destroy_cb;
sylvan_mt_to_str_cb to_str_cb;
sylvan_mt_write_binary_cb write_binary_cb;
sylvan_mt_read_binary_cb read_binary_cb;
} customleaf_t;
static customleaf_t *cl_registry;
static size_t cl_registry_count;
static size_t cl_registry_size;
/**
* Implementation of hooks for llmsset
*/
/**
* Internal helper function
*/
static inline customleaf_t*
sylvan_mt_from_node(uint64_t a, uint64_t b)
{
uint32_t type = a & 0xffffffff;
assert(type < cl_registry_count);
return cl_registry + type;
(void)b;
}
static void
_sylvan_create_cb(uint64_t *a, uint64_t *b)
{
customleaf_t *c = sylvan_mt_from_node(*a, *b);
if (c->create_cb != NULL) c->create_cb(b);
}
static void
_sylvan_destroy_cb(uint64_t a, uint64_t b)
{
// for leaf
customleaf_t *c = sylvan_mt_from_node(a, b);
if (c->destroy_cb != NULL) c->destroy_cb(b);
}
static uint64_t
_sylvan_hash_cb(uint64_t a, uint64_t b, uint64_t seed)
{
customleaf_t *c = sylvan_mt_from_node(a, b);
if (c->hash_cb != NULL) return c->hash_cb(b, seed ^ a);
else return llmsset_hash(a, b, seed);
}
static int
_sylvan_equals_cb(uint64_t a, uint64_t b, uint64_t aa, uint64_t bb)
{
if (a != aa) return 0;
customleaf_t *c = sylvan_mt_from_node(a, b);
if (c->equals_cb != NULL) return c->equals_cb(b, bb);
else return b == bb ? 1 : 0;
}
uint32_t
sylvan_mt_create_type()
{
if (cl_registry_count == cl_registry_size) {
// resize registry array
cl_registry_size += 8;
cl_registry = (customleaf_t *)realloc(cl_registry, sizeof(customleaf_t) * (cl_registry_size));
memset(cl_registry + cl_registry_count, 0, sizeof(customleaf_t) * (cl_registry_size-cl_registry_count));
}
return cl_registry_count++;
}
void sylvan_mt_set_hash(uint32_t type, sylvan_mt_hash_cb hash_cb)
{
customleaf_t *c = cl_registry + type;
c->hash_cb = hash_cb;
}
void sylvan_mt_set_equals(uint32_t type, sylvan_mt_equals_cb equals_cb)
{
customleaf_t *c = cl_registry + type;
c->equals_cb = equals_cb;
}
void sylvan_mt_set_create(uint32_t type, sylvan_mt_create_cb create_cb)
{
customleaf_t *c = cl_registry + type;
c->create_cb = create_cb;
}
void sylvan_mt_set_destroy(uint32_t type, sylvan_mt_destroy_cb destroy_cb)
{
customleaf_t *c = cl_registry + type;
c->destroy_cb = destroy_cb;
}
void sylvan_mt_set_to_str(uint32_t type, sylvan_mt_to_str_cb to_str_cb)
{
customleaf_t *c = cl_registry + type;
c->to_str_cb = to_str_cb;
}
void sylvan_mt_set_write_binary(uint32_t type, sylvan_mt_write_binary_cb write_binary_cb)
{
customleaf_t *c = cl_registry + type;
c->write_binary_cb = write_binary_cb;
}
void sylvan_mt_set_read_binary(uint32_t type, sylvan_mt_read_binary_cb read_binary_cb)
{
customleaf_t *c = cl_registry + type;
c->read_binary_cb = read_binary_cb;
}
/**
* Initialize and quit functions
*/
static int mt_initialized = 0;
static void
sylvan_mt_quit()
{
if (mt_initialized == 0) return;
mt_initialized = 0;
free(cl_registry);
cl_registry = NULL;
cl_registry_count = 0;
cl_registry_size = 0;
}
void
sylvan_init_mt()
{
if (mt_initialized) return;
mt_initialized = 1;
// Register quit handler to free structures
sylvan_register_quit(sylvan_mt_quit);
// Tell llmsset to use our custom hooks
llmsset_set_custom(nodes, _sylvan_hash_cb, _sylvan_equals_cb, _sylvan_create_cb, _sylvan_destroy_cb);
// Initialize data structures
cl_registry_size = 8;
cl_registry = (customleaf_t *)calloc(sizeof(customleaf_t), cl_registry_size);
cl_registry_count = 3; // 0, 1, 2 are taken
}
/**
* Return 1 if the given <type> has a custom hash callback, or 0 otherwise.
*/
int
sylvan_mt_has_custom_hash(uint32_t type)
{
assert(type < cl_registry_count);
customleaf_t *c = cl_registry + type;
return c->hash_cb != NULL ? 1 : 0;
}
/**
* Convert a leaf (possibly complemented) to a string representation.
* If it does not fit in <buf> of size <buflen>, returns a freshly allocated char* array.
*/
char*
sylvan_mt_to_str(int complement, uint32_t type, uint64_t value, char* buf, size_t buflen)
{
assert(type < cl_registry_count);
customleaf_t *c = cl_registry + type;
if (type == 0) {
size_t required = (size_t)snprintf(NULL, 0, "%" PRId64, (int64_t)value);
char *ptr = buf;
if (buflen < required) {
ptr = (char*)malloc(required);
buflen = required;
}
if (ptr != NULL) snprintf(ptr, buflen, "%" PRId64, (int64_t)value);
return ptr;
} else if (type == 1) {
size_t required = (size_t)snprintf(NULL, 0, "%f", *(double*)&value);
char *ptr = buf;
if (buflen < required) {
ptr = (char*)malloc(required);
buflen = required;
}
if (ptr != NULL) snprintf(ptr, buflen, "%f", *(double*)&value);
return ptr;
} else if (type == 2) {
int32_t num = (int32_t)(value>>32);
uint32_t denom = value&0xffffffff;
size_t required = (size_t)snprintf(NULL, 0, "%" PRId32 "/%" PRIu32, num, denom);
char *ptr = buf;
if (buflen < required) {
ptr = (char*)malloc(required);
buflen = required;
}
if (ptr != NULL) snprintf(ptr, buflen, "%" PRId32 "/%" PRIu32, num, denom);
return ptr;
} else if (c->to_str_cb != NULL) {
return c->to_str_cb(complement, value, buf, buflen);
} else {
return NULL;
}
}
uint64_t
sylvan_mt_hash(uint32_t type, uint64_t value, uint64_t seed)
{
assert(type < cl_registry_count);
customleaf_t *c = cl_registry + type;
if (c->hash_cb != NULL) return c->hash_cb(value, seed);
else return llmsset_hash((uint64_t)type, value, seed);
}
int
sylvan_mt_write_binary(uint32_t type, uint64_t value, FILE *out)
{
assert(type < cl_registry_count);
customleaf_t *c = cl_registry + type;
if (c->write_binary_cb != NULL) return c->write_binary_cb(out, value);
else return 0;
}
int
sylvan_mt_read_binary(uint32_t type, uint64_t *value, FILE *in)
{
assert(type < cl_registry_count);
customleaf_t *c = cl_registry + type;
if (c->read_binary_cb != NULL) return c->read_binary_cb(in, value);
else return 0;
}

132
resources/3rdparty/sylvan/src/sylvan_mt.h

@ -0,0 +1,132 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains declarations for custom Multi-Terminal support.
*/
#ifndef SYLVAN_MT_H
#define SYLVAN_MT_H
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* Helper implementation for custom terminal (multi-terminal support)
* Types can implement the following callback functions:
*
* hash(value, seed)
* return hash of value with given seed
* equals(value1, value2)
* return 1 if equal, 0 if not equal
* create(&value)
* optionally allocate object and update value with the pointer
* destroy(value)
* optionally destroy/free value if value points to an allocated object
* to_str(complemented, value, buf, bufsize)
* return string representation of (complemented) value to buf if bufsize large enough,
* otherwise return newly allocated string
* write_binary(fp, value)
* write binary representation of a leaf to given FILE* fp
* return 0 if successful
* read_binary(fp, &value)
* read binary representation of a leaf from given FILE* fp
* treat allocated objects like create (and destroy)
* return 0 if successful
*
* If the 64-byte value already completely describes the leaf, then the functions
* write_binary and read_binary should be set to NULL.
*
* If the 64-byte value is also already a canonical representation, then the functions
* hash, equals, create and destroy should be set to NULL.
*
* Two values are equal (with equals) iff they have the same hash (with hash)
*
* A value v obtained due to create must be equal to the original value (with equals):
* create(v) => equals(\old(v), \new(v))
*
* NOTE ON EXPECTED LEAF NODE STRUCTURE: the implementation expects leaves in a specific format:
* - 16-byte node { uint64_t a, b; }
* - type == a & 0x00000000ffffffff
* - value == b
*/
typedef uint64_t (*sylvan_mt_hash_cb)(uint64_t, uint64_t);
typedef int (*sylvan_mt_equals_cb)(uint64_t, uint64_t);
typedef void (*sylvan_mt_create_cb)(uint64_t*);
typedef void (*sylvan_mt_destroy_cb)(uint64_t);
typedef char* (*sylvan_mt_to_str_cb)(int, uint64_t, char*, size_t);
typedef int (*sylvan_mt_write_binary_cb)(FILE*, uint64_t);
typedef int (*sylvan_mt_read_binary_cb)(FILE*, uint64_t*);
/**
* Initialize the multi-terminal subsystem
*/
void sylvan_init_mt(void);
/**
* Register a new leaf type.
*/
uint32_t sylvan_mt_create_type(void);
/**
* Set the callback handlers for <type>
*/
void sylvan_mt_set_hash(uint32_t type, sylvan_mt_hash_cb hash_cb);
void sylvan_mt_set_equals(uint32_t type, sylvan_mt_equals_cb equals_cb);
void sylvan_mt_set_create(uint32_t type, sylvan_mt_create_cb create_cb);
void sylvan_mt_set_destroy(uint32_t type, sylvan_mt_destroy_cb destroy_cb);
void sylvan_mt_set_to_str(uint32_t type, sylvan_mt_to_str_cb to_str_cb);
void sylvan_mt_set_write_binary(uint32_t type, sylvan_mt_write_binary_cb write_binary_cb);
void sylvan_mt_set_read_binary(uint32_t type, sylvan_mt_read_binary_cb read_binary_cb);
/**
* Returns 1 if the given type implements hash, or 0 otherwise.
* (used when inserting into the unique table)
*/
int sylvan_mt_has_custom_hash(uint32_t type);
/**
* Get a hash for given value (calls hash callback of type).
* If the type does not implement hash, then this is the same hash as used by the unique table.
*/
uint64_t sylvan_mt_hash(uint32_t type, uint64_t value, uint64_t seed);
/**
* Get text representation of leaf (calls to_str callback of type).
*/
char *sylvan_mt_to_str(int complement, uint32_t type, uint64_t value, char *buf, size_t buflen);
/**
* Write a leaf in binary form (calls write_binary callback of type).
*/
int sylvan_mt_write_binary(uint32_t type, uint64_t value, FILE *out);
/**
* Read a leaf in binary form (calls read_binary callback of type).
*/
int sylvan_mt_read_binary(uint32_t type, uint64_t *value, FILE *in);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif

1437
resources/3rdparty/sylvan/src/sylvan_mtbdd.c
File diff suppressed because it is too large
View File

408
resources/3rdparty/sylvan/src/sylvan_mtbdd.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -38,6 +39,8 @@
#ifndef SYLVAN_MTBDD_H
#define SYLVAN_MTBDD_H
#include <sylvan_mt.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
@ -48,6 +51,7 @@ extern "C" {
* For Boolean MTBDDs, this means "not X", for Integer and Real MTBDDs, this means "-X".
*/
typedef uint64_t MTBDD;
typedef uint64_t BDD;
typedef MTBDD MTBDDMAP;
/**
@ -58,12 +62,72 @@ typedef MTBDD MTBDDMAP;
#define mtbdd_true (mtbdd_false|mtbdd_complement)
#define mtbdd_invalid ((MTBDD)0xffffffffffffffffLL)
/* Compatibility */
// #define BDD MTBDD
#define BDDMAP MTBDDMAP
#define BDDSET MTBDD
#define BDDVAR uint32_t
#define sylvan_complement mtbdd_complement
#define sylvan_false mtbdd_false
#define sylvan_true mtbdd_true
#define sylvan_invalid mtbdd_invalid
#define sylvan_init_bdd sylvan_init_mtbdd
#define sylvan_ref mtbdd_ref
#define sylvan_deref mtbdd_deref
#define sylvan_count_refs mtbdd_count_refs
#define sylvan_protect mtbdd_protect
#define sylvan_unprotect mtbdd_unprotect
#define sylvan_count_protected mtbdd_count_protected
#define sylvan_gc_mark_rec mtbdd_gc_mark_rec
#define sylvan_notify_ondead mtbdd_notify_ondead
#define bdd_refs_push mtbdd_refs_push
#define bdd_refs_pop mtbdd_refs_pop
#define bdd_refs_spawn mtbdd_refs_spawn
#define bdd_refs_sync mtbdd_refs_sync
#define sylvan_map_empty mtbdd_map_empty
#define sylvan_map_isempty mtbdd_map_isempty
#define sylvan_map_key mtbdd_map_key
#define sylvan_map_value mtbdd_map_value
#define sylvan_map_next mtbdd_map_next
#define sylvan_map_contains mtbdd_map_contains
#define sylvan_map_count mtbdd_map_count
#define sylvan_map_add mtbdd_map_add
#define sylvan_map_addall mtbdd_map_addall
#define sylvan_map_remove mtbdd_map_remove
#define sylvan_map_removeall mtbdd_map_removeall
#define sylvan_set_empty mtbdd_set_empty
#define sylvan_set_isempty mtbdd_set_isempty
#define sylvan_set_add mtbdd_set_add
#define sylvan_set_addall mtbdd_set_addall
#define sylvan_set_remove mtbdd_set_remove
#define sylvan_set_removeall mtbdd_set_removeall
#define sylvan_set_first mtbdd_set_first
#define sylvan_set_next mtbdd_set_next
#define sylvan_set_fromarray mtbdd_set_fromarray
#define sylvan_set_toarray mtbdd_set_toarray
#define sylvan_set_in mtbdd_set_in
#define sylvan_set_count mtbdd_set_count
#define sylvan_test_isset mtbdd_test_isset
#define sylvan_var mtbdd_getvar
#define sylvan_low mtbdd_getlow
#define sylvan_high mtbdd_gethigh
#define sylvan_makenode mtbdd_makenode
#define sylvan_makemapnode mtbdd_makemapnode
#define sylvan_support mtbdd_support
#define sylvan_test_isbdd mtbdd_test_isvalid
#define sylvan_nodecount mtbdd_nodecount
#define sylvan_printdot mtbdd_printdot
#define sylvan_fprintdot mtbdd_fprintdot
#define sylvan_printsha mtbdd_printsha
#define sylvan_fprintsha mtbdd_fprintsha
#define sylvan_getsha mtbdd_getsha
/**
* Initialize MTBDD functionality.
* This initializes internal and external referencing datastructures,
* and registers them in the garbage collection framework.
*/
void sylvan_init_mtbdd();
void sylvan_init_mtbdd(void);
/**
* Create a MTBDD terminal of type <type> and value <value>.
@ -74,8 +138,13 @@ MTBDD mtbdd_makeleaf(uint32_t type, uint64_t value);
/**
* Create an internal MTBDD node of Boolean variable <var>, with low edge <low> and high edge <high>.
* <var> is a 24-bit integer.
* Please note that this does NOT check variable ordering!
*/
MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high);
MTBDD _mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high);
static inline MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high)
{
return low == high ? low : _mtbdd_makenode(var, low, high);
}
/**
* Returns 1 is the MTBDD is a terminal, or 0 otherwise.
@ -120,11 +189,18 @@ double mtbdd_getdouble(MTBDD terminal);
#define mtbdd_getdenom(terminal) ((uint32_t)(mtbdd_getvalue(terminal)&0xffffffff))
/**
* Create the conjunction of variables in arr.
* I.e. arr[0] \and arr[1] \and ... \and arr[length-1]
* Create the conjunction of variables in arr,
* i.e. arr[0] \and arr[1] \and ... \and arr[length-1]
* The variable in arr must be ordered.
*/
MTBDD mtbdd_fromarray(uint32_t* arr, size_t length);
/**
* Given a cube of variables, write each variable to arr.
* WARNING: arr must be sufficiently long!
*/
void mtbdd_toarray(MTBDD set, uint32_t *arr);
/**
* Create a MTBDD cube representing the conjunction of variables in their positive or negative
* form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any).
@ -148,14 +224,20 @@ TASK_DECL_2(double, mtbdd_satcount, MTBDD, size_t);
#define mtbdd_satcount(dd, nvars) CALL(mtbdd_satcount, dd, nvars)
/**
* Count the number of MTBDD leaves (excluding mtbdd_false and mtbdd_true) in the MTBDD
* Count the number of MTBDD leaves (excluding mtbdd_false and mtbdd_true) in the given <count> MTBDDs
*/
size_t mtbdd_leafcount(MTBDD mtbdd);
size_t mtbdd_leafcount_more(const MTBDD *mtbdds, size_t count);
#define mtbdd_leafcount(dd) mtbdd_leafcount_more(&dd, 1)
/**
* Count the number of MTBDD nodes and terminals (excluding mtbdd_false and mtbdd_true) in a MTBDD
* Count the number of MTBDD nodes and terminals (excluding mtbdd_false and mtbdd_true) in the given <count> MTBDDs
*/
size_t mtbdd_nodecount(MTBDD mtbdd);
size_t mtbdd_nodecount_more(const MTBDD *mtbdds, size_t count);
static inline size_t
mtbdd_nodecount(const MTBDD dd) {
return mtbdd_nodecount_more(&dd, 1);
}
/**
* Callback function types for binary ("dyadic") and unary ("monadic") operations.
@ -307,14 +389,21 @@ TASK_DECL_3(MTBDD, mtbdd_abstract_op_max, MTBDD, MTBDD, int);
* <f> must be a Boolean MTBDD (or standard BDD).
*/
TASK_DECL_3(MTBDD, mtbdd_ite, MTBDD, MTBDD, MTBDD);
#define mtbdd_ite(f, g, h) CALL(mtbdd_ite, f, g, h)
#define mtbdd_ite(f, g, h) CALL(mtbdd_ite, f, g, h);
/**
* Multiply <a> and <b>, and abstract variables <vars> using summation.
* This is similar to the "and_exists" operation in BDDs.
*/
TASK_DECL_3(MTBDD, mtbdd_and_exists, MTBDD, MTBDD, MTBDD);
#define mtbdd_and_exists(a, b, vars) CALL(mtbdd_and_exists, a, b, vars)
TASK_DECL_3(MTBDD, mtbdd_and_abstract_plus, MTBDD, MTBDD, MTBDD);
#define mtbdd_and_abstract_plus(a, b, vars) CALL(mtbdd_and_abstract_plus, a, b, vars)
#define mtbdd_and_exists mtbdd_and_abstract_plus
/**
* Multiply <a> and <b>, and abstract variables <vars> by taking the maximum.
*/
TASK_DECL_3(MTBDD, mtbdd_and_abstract_max, MTBDD, MTBDD, MTBDD);
#define mtbdd_and_abstract_max(a, b, vars) CALL(mtbdd_and_abstract_max, a, b, vars)
/**
* Monad that converts double to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise;
@ -429,6 +518,60 @@ typedef int (*mtbdd_enum_filter_cb)(MTBDD);
MTBDD mtbdd_enum_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb);
MTBDD mtbdd_enum_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb);
/**
* Given an MTBDD <dd> and a cube of variables <variables> expected in <dd>,
* mtbdd_enum_all_first and mtbdd_enum_all_next enumerate all satisfying assignments in <dd> that lead
* to a non-False leaf.
*
* The functions return the leaf (or mtbdd_false if no new satisfying assignment is found) and encodes
* the assignment in the supplied array <arr>, 0 for False and 1 for True.
*
* The supplied array <arr> must be large enough for all variables in <variables>.
*
* Usage:
* MTBDD leaf = mtbdd_enum_first(dd, variables, arr, NULL);
* while (leaf != mtbdd_false) {
* .... // do something with arr/leaf
* leaf = mtbdd_enum_next(dd, variables, arr, NULL);
* }
*
* The callback is an optional function that returns 0 when the given terminal node should be skipped.
*/
MTBDD mtbdd_enum_all_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb);
MTBDD mtbdd_enum_all_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb);
/**
* Given a MTBDD <dd>, call <cb> with context <context> for every unique path in <dd> ending in leaf <leaf>.
*
* Usage:
* VOID_TASK_3(cb, mtbdd_enum_trace_t, trace, MTBDD, leaf, void*, context) { ... do something ... }
* mtbdd_enum_par(dd, cb, context);
*/
typedef struct mtbdd_enum_trace {
struct mtbdd_enum_trace *prev;
uint32_t var;
int val; // 0 or 1
} * mtbdd_enum_trace_t;
LACE_TYPEDEF_CB(void, mtbdd_enum_cb, mtbdd_enum_trace_t, MTBDD, void*)
VOID_TASK_DECL_3(mtbdd_enum_par, MTBDD, mtbdd_enum_cb, void*);
#define mtbdd_enum_par(dd, cb, context) CALL(mtbdd_enum_par, dd, cb, context)
/**
* Function composition after partial evaluation.
*
* Given a function F(X) = f, compute the composition F'(X) = g(f) for every assignment to X.
* All variables X in <vars> must appear before all variables in f and g(f).
*
* Usage:
* TASK_2(MTBDD, g, MTBDD, in) { ... return g of <in> ... }
* MTBDD x_vars = ...; // the cube of variables x
* MTBDD result = mtbdd_eval_compose(dd, x_vars, TASK(g));
*/
LACE_TYPEDEF_CB(MTBDD, mtbdd_eval_compose_cb, MTBDD);
TASK_DECL_3(MTBDD, mtbdd_eval_compose, MTBDD, MTBDD, mtbdd_eval_compose_cb);
#define mtbdd_eval_compose(dd, vars, cb) CALL(mtbdd_eval_compose, dd, vars, cb)
/**
* For debugging.
* Tests if all nodes in the MTBDD are correctly ``marked'' in the nodes table.
@ -440,16 +583,221 @@ TASK_DECL_1(int, mtbdd_test_isvalid, MTBDD);
#define mtbdd_test_isvalid(mtbdd) CALL(mtbdd_test_isvalid, mtbdd)
/**
* Write a DOT representation of a MTBDD
* Write a .dot representation of a given MTBDD
* The callback function is required for custom terminals.
*/
typedef void (*print_terminal_label_cb)(FILE *out, uint32_t type, uint64_t value);
void mtbdd_fprintdot(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb);
#define mtbdd_printdot(mtbdd, cb) mtbdd_fprintdot(stdout, mtbdd, cb)
void mtbdd_fprintdot(FILE *out, MTBDD mtbdd);
#define mtbdd_printdot(mtbdd, cb) mtbdd_fprintdot(stdout, mtbdd)
/**
* Write a .dot representation of a given MTBDD, but without complement edges.
*/
void mtbdd_fprintdot_nc(FILE *out, MTBDD mtbdd);
#define mtbdd_printdot_nc(mtbdd, cb) mtbdd_fprintdot_nc(stdout, mtbdd)
/**
* Write a text representation of a leaf to the given file.
*/
void mtbdd_fprint_leaf(FILE *out, MTBDD leaf);
/**
* Write a text representation of a leaf to stdout.
*/
void mtbdd_print_leaf(MTBDD leaf);
/**
* Obtain the textual representation of a leaf.
* The returned result is either equal to the given <buf> (if the results fits)
* or to a newly allocated array (with malloc).
*/
char *mtbdd_leaf_to_str(MTBDD leaf, char *buf, size_t buflen);
/**
* Some debugging functions that generate SHA2 hashes of MTBDDs.
* They are independent of where nodes are located in hash tables.
* Note that they are not "perfect", but they can be useful to run easy sanity checks.
*/
/**
* Print SHA2 hash to stdout.
*/
void mtbdd_printsha(MTBDD dd);
/**
* Print SHA2 hash to given file.
*/
void mtbdd_fprintsha(FILE *f, MTBDD dd);
/**
* Obtain SHA2 hash; target array must be at least 65 bytes long.
*/
void mtbdd_getsha(MTBDD dd, char *target);
/**
* Visitor functionality for MTBDDs.
* Visits internal nodes and leafs.
*/
/**
* pre_cb callback: given input MTBDD and context,
* return whether to visit children (if not leaf)
* post_cb callback: given input MTBDD and context
*/
LACE_TYPEDEF_CB(int, mtbdd_visit_pre_cb, MTBDD, void*);
LACE_TYPEDEF_CB(void, mtbdd_visit_post_cb, MTBDD, void*);
/**
* Sequential visit operation
*/
VOID_TASK_DECL_4(mtbdd_visit_seq, MTBDD, mtbdd_visit_pre_cb, mtbdd_visit_post_cb, void*);
#define mtbdd_visit_seq(...) CALL(mtbdd_visit_seq, __VA_ARGS__)
/**
* Parallel visit operation
*/
VOID_TASK_DECL_4(mtbdd_visit_par, MTBDD, mtbdd_visit_pre_cb, mtbdd_visit_post_cb, void*);
#define mtbdd_visit_par(...) CALL(mtbdd_visit_par, __VA_ARGS__)
/**
* Writing MTBDDs to file.
*
* Every node that is to be written is assigned a number, starting from 1,
* such that reading the result in the future can be done in one pass.
*
* We use a skiplist to store the assignment.
*
* The functions mtbdd_writer_tobinary and mtbdd_writer_totext can be used to
* store an array of MTBDDs to binary format or text format.
*
* One could also do the procedure manually instead.
* - call mtbdd_writer_start to allocate the skiplist.
* - call mtbdd_writer_add to add a given MTBDD to the skiplist
* - call mtbdd_writer_writebinary to write all added nodes to a file
* - OR: mtbdd_writer_writetext to write all added nodes in text format
* - call mtbdd_writer_get to obtain the MTBDD identifier as stored in the skiplist
* - call mtbdd_writer_end to free the skiplist
*/
/**
* Write <count> decision diagrams given in <dds> in internal binary form to <file>.
*
* The internal binary format is as follows, to store <count> decision diagrams...
* uint64_t: nodecount -- number of nodes
* <nodecount> times uint128_t: each leaf/node
* uint64_t: count -- number of stored decision diagrams
* <count> times uint64_t: each stored decision diagram
*/
VOID_TASK_DECL_3(mtbdd_writer_tobinary, FILE *, MTBDD *, int);
#define mtbdd_writer_tobinary(file, dds, count) CALL(mtbdd_writer_tobinary, file, dds, count)
/**
* Write <count> decision diagrams given in <dds> in ASCII form to <file>.
* Also supports custom leaves using the leaf_to_str callback.
*
* The text format writes in the same order as the binary format, except...
* [
* node(id, var, low, high), -- for a normal node (no complement on high)
* node(id, var, low, ~high), -- for a normal node (complement on high)
* leaf(id, type, "value"), -- for a leaf (with value between "")
* ],[dd1, dd2, dd3, ...,] -- and each the stored decision diagram.
*/
VOID_TASK_DECL_3(mtbdd_writer_totext, FILE *, MTBDD *, int);
#define mtbdd_writer_totext(file, dds, count) CALL(mtbdd_writer_totext, file, dds, count)
/**
* Skeleton typedef for the skiplist
*/
typedef struct sylvan_skiplist *sylvan_skiplist_t;
/**
* Allocate a skiplist for writing an MTBDD.
*/
sylvan_skiplist_t mtbdd_writer_start(void);
/**
* Add the given MTBDD to the skiplist.
*/
VOID_TASK_DECL_2(mtbdd_writer_add, sylvan_skiplist_t, MTBDD);
#define mtbdd_writer_add(sl, dd) CALL(mtbdd_writer_add, sl, dd)
/**
* Write all assigned MTBDD nodes in binary format to the file.
*/
void mtbdd_writer_writebinary(FILE *out, sylvan_skiplist_t sl);
/**
* Retrieve the identifier of the given stored MTBDD.
* This is useful if you want to be able to retrieve the stored MTBDD later.
*/
uint64_t mtbdd_writer_get(sylvan_skiplist_t sl, MTBDD dd);
/**
* Free the allocated skiplist.
*/
void mtbdd_writer_end(sylvan_skiplist_t sl);
/**
* Reading MTBDDs from file.
*
* The function mtbdd_reader_frombinary is basically the reverse of mtbdd_writer_tobinary.
*
* One can also perform the procedure manually.
* - call mtbdd_reader_readbinary to read the nodes from file
* - call mtbdd_reader_get to obtain the MTBDD for the given identifier as stored in the file.
* - call mtbdd_reader_end to free the array returned by mtbdd_reader_readbinary
*
* Returns 0 if successful, -1 otherwise.
*/
/*
* Read <count> decision diagrams to <dds> from <file> in internal binary form.
*/
TASK_DECL_3(int, mtbdd_reader_frombinary, FILE*, MTBDD*, int);
#define mtbdd_reader_frombinary(file, dds, count) CALL(mtbdd_reader_frombinary, file, dds, count)
/**
* Reading a file earlier written with mtbdd_writer_writebinary
* Returns an array with the conversion from stored identifier to MTBDD
* This array is allocated with malloc and must be freed afterwards.
* Returns NULL if there was an error.
*/
TASK_DECL_1(uint64_t*, mtbdd_reader_readbinary, FILE*);
#define mtbdd_reader_readbinary(file) CALL(mtbdd_reader_readbinary, file)
/**
* Retrieve the MTBDD of the given stored identifier.
*/
MTBDD mtbdd_reader_get(uint64_t* arr, uint64_t identifier);
/**
* Free the allocated translation array
*/
void mtbdd_reader_end(uint64_t *arr);
/**
* MTBDDSET
* Just some convenience functions for handling sets of variables represented as a
* cube (conjunction) of positive literals
*/
#define mtbdd_set_empty() mtbdd_true
#define mtbdd_set_isempty(set) (set == mtbdd_true)
#define mtbdd_set_add(set, var) sylvan_and(set, sylvan_ithvar(var))
#define mtbdd_set_addall(set, set2) sylvan_and(set, set2)
#define mtbdd_set_remove(set, var) sylvan_exists(set, var)
#define mtbdd_set_removeall(set, set2) sylvan_exists(set, set2)
#define mtbdd_set_first(set) sylvan_var(set)
#define mtbdd_set_next(set) sylvan_high(set)
#define mtbdd_set_fromarray(arr, count) mtbdd_fromarray(arr, count)
#define mtbdd_set_toarray(set, arr) mtbdd_toarray(set, arr)
int mtbdd_set_in(BDDSET set, BDDVAR var);
size_t mtbdd_set_count(BDDSET set);
void mtbdd_test_isset(BDDSET set);
/**
* MTBDDMAP, maps uint32_t variables to MTBDDs.
* A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD
* A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD.
*/
#define mtbdd_map_empty() mtbdd_false
#define mtbdd_map_isempty(map) (map == mtbdd_false ? 1 : 0)
@ -487,26 +835,6 @@ MTBDDMAP mtbdd_map_remove(MTBDDMAP map, uint32_t key);
*/
MTBDDMAP mtbdd_map_removeall(MTBDDMAP map, MTBDD variables);
/**
* Custom node types
* Overrides standard hash/equality/notify_on_dead behavior
* hash(value, seed) return hash version
* equals(value1, value2) return 1 if equal, 0 if not equal
* create(&value) replace value by new value for object allocation
* destroy(value)
* NOTE: equals(value1, value2) must imply: hash(value1, seed) == hash(value2,seed)
* NOTE: new value of create must imply: equals(old, new)
*/
typedef uint64_t (*mtbdd_hash_cb)(uint64_t, uint64_t);
typedef int (*mtbdd_equals_cb)(uint64_t, uint64_t);
typedef void (*mtbdd_create_cb)(uint64_t*);
typedef void (*mtbdd_destroy_cb)(uint64_t);
/**
* Registry callback handlers for <type>.
*/
uint32_t mtbdd_register_custom_leaf(mtbdd_hash_cb hash_cb, mtbdd_equals_cb equals_cb, mtbdd_create_cb create_cb, mtbdd_destroy_cb destroy_cb);
/**
* Garbage collection
* Sylvan supplies two default methods to handle references to nodes, but the user
@ -528,7 +856,7 @@ VOID_TASK_DECL_1(mtbdd_gc_mark_rec, MTBDD);
*/
MTBDD mtbdd_ref(MTBDD a);
void mtbdd_deref(MTBDD a);
size_t mtbdd_count_refs();
size_t mtbdd_count_refs(void);
/**
* Default external pointer referencing. During garbage collection, the pointers are followed and the MTBDD
@ -536,10 +864,10 @@ size_t mtbdd_count_refs();
*/
void mtbdd_protect(MTBDD* ptr);
void mtbdd_unprotect(MTBDD* ptr);
size_t mtbdd_count_protected();
size_t mtbdd_count_protected(void);
/**
* If sylvan_set_ondead is set to a callback, then this function marks MTBDDs (terminals).
* If mtbdd_set_ondead is set to a callback, then this function marks MTBDDs (terminals).
* When they are dead after the mark phase in garbage collection, the callback is called for marked MTBDDs.
* The ondead callback can either perform cleanup or resurrect dead terminals.
*/

74
resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -22,7 +23,7 @@
#define SYLVAN_MTBDD_INT_H
/**
* MTBDD node structure
* BDD/MTBDD node structure
*/
typedef struct __attribute__((packed)) mtbddnode {
uint64_t a, b;
@ -44,85 +45,124 @@ typedef struct __attribute__((packed)) mtbddnode {
// Node: a = L=0, C, M, high; b = variable, low
// Only complement edge on "high"
static inline int
static inline int __attribute__((unused))
mtbddnode_isleaf(mtbddnode_t n)
{
return n->a & 0x4000000000000000 ? 1 : 0;
}
static inline uint32_t
static inline uint32_t __attribute__((unused))
mtbddnode_gettype(mtbddnode_t n)
{
return n->a & 0x00000000ffffffff;
}
static inline uint64_t
static inline uint64_t __attribute__((unused))
mtbddnode_getvalue(mtbddnode_t n)
{
return n->b;
}
static inline int
static inline int __attribute__((unused))
mtbddnode_getcomp(mtbddnode_t n)
{
return n->a & 0x8000000000000000 ? 1 : 0;
}
static inline uint64_t
static inline uint64_t __attribute__((unused))
mtbddnode_getlow(mtbddnode_t n)
{
return n->b & 0x000000ffffffffff; // 40 bits
}
static inline uint64_t
static inline uint64_t __attribute__((unused))
mtbddnode_gethigh(mtbddnode_t n)
{
return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first
}
static inline uint32_t
static inline uint32_t __attribute__((unused))
mtbddnode_getvariable(mtbddnode_t n)
{
return (uint32_t)(n->b >> 40);
}
static inline int
static inline int __attribute__((unused))
mtbddnode_getmark(mtbddnode_t n)
{
return n->a & 0x2000000000000000 ? 1 : 0;
}
static inline void
static inline void __attribute__((unused))
mtbddnode_setmark(mtbddnode_t n, int mark)
{
if (mark) n->a |= 0x2000000000000000;
else n->a &= 0xdfffffffffffffff;
}
static inline void
static inline void __attribute__((unused))
mtbddnode_makeleaf(mtbddnode_t n, uint32_t type, uint64_t value)
{
n->a = 0x4000000000000000 | (uint64_t)type;
n->b = value;
}
static inline void
static inline void __attribute__((unused))
mtbddnode_makenode(mtbddnode_t n, uint32_t var, uint64_t low, uint64_t high)
{
n->a = high;
n->b = ((uint64_t)var)<<40 | low;
}
static MTBDD
node_getlow(MTBDD mtbdd, mtbddnode_t node)
static inline void __attribute__((unused))
mtbddnode_makemapnode(mtbddnode_t n, uint32_t var, uint64_t low, uint64_t high)
{
n->a = high | 0x1000000000000000;
n->b = ((uint64_t)var)<<40 | low;
}
static inline int __attribute__((unused))
mtbddnode_ismapnode(mtbddnode_t n)
{
return n->a & 0x1000000000000000 ? 1 : 0;
}
static MTBDD __attribute__((unused))
mtbddnode_followlow(MTBDD mtbdd, mtbddnode_t node)
{
return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_getlow(node));
}
static MTBDD
node_gethigh(MTBDD mtbdd, mtbddnode_t node)
static MTBDD __attribute__((unused))
mtbddnode_followhigh(MTBDD mtbdd, mtbddnode_t node)
{
return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_gethigh(node));
}
/**
* Compatibility
*/
#define node_getlow mtbddnode_followlow
#define node_gethigh mtbddnode_followhigh
#define BDD_HASMARK MTBDD_HASMARK
#define BDD_TOGGLEMARK MTBDD_TOGGLEMARK
#define BDD_STRIPMARK MTBDD_STRIPMARK
#define BDD_TRANSFERMARK MTBDD_TRANSFERMARK
#define BDD_EQUALM MTBDD_EQUALM
#define bddnode mtbddnode
#define bddnode_t mtbddnode_t
#define bddnode_getcomp mtbddnode_getcomp
#define bddnode_getlow mtbddnode_getlow
#define bddnode_gethigh mtbddnode_gethigh
#define bddnode_getvariable mtbddnode_getvariable
#define bddnode_getmark mtbddnode_getmark
#define bddnode_setmark mtbddnode_setmark
#define bddnode_makenode mtbddnode_makenode
#define bddnode_makemapnode mtbddnode_makemapnode
#define bddnode_ismapnode mtbddnode_ismapnode
#define node_low node_getlow
#define node_high node_gethigh
#endif

134
resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c

@ -1,41 +1,9 @@
#include <sylvan_bdd_int.h>
#include <sylvan_mtbdd_int.h>
/**
* Generate SHA2 structural hashes.
* Hashes are independent of location.
* Mainly useful for debugging purposes.
*/
static void
mtbdd_sha2_rec(MTBDD mtbdd, SHA256_CTX *ctx)
{
if (mtbdd == sylvan_true || mtbdd == sylvan_false) {
SHA256_Update(ctx, (void*)&mtbdd, sizeof(MTBDD));
return;
}
mtbddnode_t node = MTBDD_GETNODE(mtbdd);
if (mtbddnode_isleaf(node)) {
uint64_t val = mtbddnode_getvalue(node);
SHA256_Update(ctx, (void*)&val, sizeof(uint64_t));
} else if (mtbddnode_getmark(node) == 0) {
mtbddnode_setmark(node, 1);
uint32_t level = mtbddnode_getvariable(node);
if (MTBDD_STRIPMARK(mtbddnode_gethigh(node))) level |= 0x80000000;
SHA256_Update(ctx, (void*)&level, sizeof(uint32_t));
mtbdd_sha2_rec(mtbddnode_gethigh(node), ctx);
mtbdd_sha2_rec(mtbddnode_getlow(node), ctx);
}
}
#include "storm_function_wrapper.h"
void
mtbdd_getsha(MTBDD mtbdd, char *target)
{
SHA256_CTX ctx;
SHA256_Init(&ctx);
mtbdd_sha2_rec(mtbdd, &ctx);
if (mtbdd != sylvan_true && mtbdd != sylvan_false) mtbdd_unmark_rec(mtbdd);
SHA256_End(&ctx, target);
}
// Import the srf_type created for rational function.s
extern uint32_t srf_type;
/**
* Binary operation Times (for MTBDDs of same type)
@ -99,12 +67,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_divide, MTBDD*, pa, MTBDD*, pb)
MTBDD result = mtbdd_fraction(nom_a, denom_a);
return result;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_divide type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -148,12 +110,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_equals, MTBDD*, pa, MTBDD*, pb)
if (nom_a == nom_b && denom_a == denom_b) return mtbdd_true;
return mtbdd_false;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_equals type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
if (a < b) {
@ -201,12 +157,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_less, MTBDD*, pa, MTBDD*, pb)
uint64_t denom_b = val_b&0xffffffff;
return nom_a * denom_b < nom_b * denom_a ? mtbdd_true : mtbdd_false;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_less type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -250,12 +200,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_less_or_equal, MTBDD*, pa, MTBDD*, pb)
nom_b *= denom_a;
return nom_a <= nom_b ? mtbdd_true : mtbdd_false;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_less_or_equal type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -299,12 +243,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_greater_or_equal, MTBDD*, pa, MTBDD*, pb)
nom_b *= denom_a;
return nom_a >= nom_b ? mtbdd_true : mtbdd_false;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_greater_or_equal type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -336,12 +274,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_pow, MTBDD*, pa, MTBDD*, pb)
} else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) {
assert(0);
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_pow type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -373,12 +305,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_mod, MTBDD*, pa, MTBDD*, pb)
} else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) {
assert(0);
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_mod type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -410,12 +336,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_logxy, MTBDD*, pa, MTBDD*, pb)
} else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) {
assert(0);
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID && mtbddnode_gettype(nb) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_logxy type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -439,10 +359,10 @@ TASK_IMPL_2(MTBDD, mtbdd_op_not_zero, MTBDD, a, size_t, v)
return mtbdd_getnumer(a) != 0 ? mtbdd_true : mtbdd_false;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(a)) == 0 ? mtbdd_true : mtbdd_false;
}
#endif
else if (mtbddnode_gettype(na) == srf_type) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(a)) == 0 ? mtbdd_true : mtbdd_false;
}
#endif
}
// Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter).
@ -475,12 +395,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_floor, MTBDD, a, size_t, v)
MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a), 1);
return result;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_floor type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
// Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter).
@ -513,12 +427,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_ceil, MTBDD, a, size_t, v)
MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a) + 1, 1);
return result;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
printf("ERROR mtbdd_op_ceil type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID");
assert(0);
}
#endif
}
// Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter).
@ -585,9 +493,9 @@ TASK_IMPL_2(double, mtbdd_non_zero_count, MTBDD, dd, size_t, nvars)
return mtbdd_getnumer(dd) != 0 ? powl(2.0L, nvars) : 0.0;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(dd)) == 0 ? powl(2.0L, nvars) : 0.0;
}
else if (mtbddnode_gettype(na) == srf_type) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(dd)) == 0 ? powl(2.0L, nvars) : 0.0;
}
#endif
}
@ -622,9 +530,9 @@ int mtbdd_iszero(MTBDD dd) {
return mtbdd_getnumer(dd) == 0;
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if (mtbdd_gettype(dd) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(dd)) == 1 ? 1 : 0;
}
else if (mtbdd_gettype(dd) == srf_type) {
return storm_rational_function_is_zero((storm_rational_function_ptr)mtbdd_getvalue(dd)) == 1 ? 1 : 0;
}
#endif
return 0;
}
@ -665,12 +573,6 @@ TASK_IMPL_2(MTBDD, mtbdd_op_complement, MTBDD, a, size_t, k)
printf("ERROR: mtbdd_op_complement type FRACTION.\n");
assert(0);
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
else if ((mtbddnode_gettype(na) == SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID)) {
printf("ERROR: mtbdd_op_complement type SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID.\n");
assert(0);
}
#endif
}
return mtbdd_invalid;
@ -694,7 +596,7 @@ TASK_IMPL_3(BDD, mtbdd_minExistsRepresentative, MTBDD, a, BDD, variables, BDDVAR
}
sylvan_ref(res);
BDD res1 = sylvan_not(sylvan_ite(sylvan_ithvar(bddnode_getvariable(BDD_GETNODE(variables))), sylvan_true, sylvan_not(res)));
BDD res1 = sylvan_not(sylvan_ite(sylvan_ithvar(bddnode_getvariable(MTBDD_GETNODE(variables))), sylvan_true, sylvan_not(res)));
if (res1 == sylvan_invalid) {
sylvan_deref(res);
return sylvan_invalid;
@ -705,7 +607,7 @@ TASK_IMPL_3(BDD, mtbdd_minExistsRepresentative, MTBDD, a, BDD, variables, BDDVAR
mtbddnode_t na = MTBDD_GETNODE(a);
uint32_t va = mtbddnode_getvariable(na);
bddnode_t nv = BDD_GETNODE(variables);
bddnode_t nv = MTBDD_GETNODE(variables);
BDDVAR vv = bddnode_getvariable(nv);
/* Abstract a variable that does not appear in a. */
@ -864,7 +766,7 @@ TASK_IMPL_3(BDD, mtbdd_maxExistsRepresentative, MTBDD, a, MTBDD, variables, uint
}
sylvan_ref(res);
BDD res1 = sylvan_not(sylvan_ite(sylvan_ithvar(bddnode_getvariable(BDD_GETNODE(variables))), sylvan_true, sylvan_not(res)));
BDD res1 = sylvan_not(sylvan_ite(sylvan_ithvar(bddnode_getvariable(MTBDD_GETNODE(variables))), sylvan_true, sylvan_not(res)));
if (res1 == sylvan_invalid) {
sylvan_deref(res);
return sylvan_invalid;
@ -875,7 +777,7 @@ TASK_IMPL_3(BDD, mtbdd_maxExistsRepresentative, MTBDD, a, MTBDD, variables, uint
mtbddnode_t na = MTBDD_GETNODE(a);
uint32_t va = mtbddnode_getvariable(na);
bddnode_t nv = BDD_GETNODE(variables);
bddnode_t nv = MTBDD_GETNODE(variables);
BDDVAR vv = bddnode_getvariable(nv);
/* Abstract a variable that does not appear in a. */

3
resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h

@ -1,5 +1,3 @@
void mtbdd_getsha(MTBDD mtbdd, char *target); // target must be at least 65 bytes...
/**
* Binary operation Divide (for MTBDDs of same type)
* Only for MTBDDs where all leaves are Integer or Double.
@ -120,7 +118,6 @@ int mtbdd_isnonzero(MTBDD);
#define mtbdd_regular(dd) (dd & ~mtbdd_complement)
#define GETNODE_BDD(bdd) ((bddnode_t)llmsset_index_to_ptr(nodes, bdd&0x000000ffffffffff))
#define mtbdd_set_next(set) (mtbdd_gethigh(set))
#define mtbdd_set_isempty(set) (set == mtbdd_true)
/* Create a MTBDD representing just <var> or the negation of <var> */

68
resources/3rdparty/sylvan/src/sylvan_obj.cpp

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,7 +16,6 @@
*/
#include <sylvan_obj.hpp>
#include <sylvan_storm_rational_function.h>
using namespace sylvan;
@ -35,7 +35,7 @@ Bdd::operator!=(const Bdd& other) const
return bdd != other.bdd;
}
Bdd
Bdd&
Bdd::operator=(const Bdd& right)
{
bdd = right.bdd;
@ -89,7 +89,7 @@ Bdd::operator*(const Bdd& other) const
return Bdd(sylvan_and(bdd, other.bdd));
}
Bdd
Bdd&
Bdd::operator*=(const Bdd& other)
{
LACE_ME;
@ -104,7 +104,7 @@ Bdd::operator&(const Bdd& other) const
return Bdd(sylvan_and(bdd, other.bdd));
}
Bdd
Bdd&
Bdd::operator&=(const Bdd& other)
{
LACE_ME;
@ -119,7 +119,7 @@ Bdd::operator+(const Bdd& other) const
return Bdd(sylvan_or(bdd, other.bdd));
}
Bdd
Bdd&
Bdd::operator+=(const Bdd& other)
{
LACE_ME;
@ -134,7 +134,7 @@ Bdd::operator|(const Bdd& other) const
return Bdd(sylvan_or(bdd, other.bdd));
}
Bdd
Bdd&
Bdd::operator|=(const Bdd& other)
{
LACE_ME;
@ -149,7 +149,7 @@ Bdd::operator^(const Bdd& other) const
return Bdd(sylvan_xor(bdd, other.bdd));
}
Bdd
Bdd&
Bdd::operator^=(const Bdd& other)
{
LACE_ME;
@ -164,7 +164,7 @@ Bdd::operator-(const Bdd& other) const
return Bdd(sylvan_and(bdd, sylvan_not(other.bdd)));
}
Bdd
Bdd&
Bdd::operator-=(const Bdd& other)
{
LACE_ME;
@ -373,7 +373,7 @@ Bdd::PickOneCube(const BddSet &variables) const
if (bdd == sylvan_false) return result;
for (; !sylvan_set_isempty(vars); vars = sylvan_set_next(vars)) {
uint32_t var = sylvan_set_var(vars);
uint32_t var = sylvan_set_first(vars);
if (bdd == sylvan_true) {
// pick 0
result.push_back(false);
@ -544,7 +544,7 @@ BddMap::operator+(const Bdd& other) const
return BddMap(sylvan_map_addall(bdd, other.bdd));
}
BddMap
BddMap&
BddMap::operator+=(const Bdd& other)
{
bdd = sylvan_map_addall(bdd, other.bdd);
@ -557,7 +557,7 @@ BddMap::operator-(const Bdd& other) const
return BddMap(sylvan_map_removeall(bdd, other.bdd));
}
BddMap
BddMap&
BddMap::operator-=(const Bdd& other)
{
bdd = sylvan_map_removeall(bdd, other.bdd);
@ -605,15 +605,6 @@ Mtbdd::doubleTerminal(double value)
return mtbdd_double(value);
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
Mtbdd
Mtbdd::stormRationalFunctionTerminal(storm::RationalFunction const& value)
{
storm_rational_function_ptr ptr = (storm_rational_function_ptr)(&value);
return mtbdd_storm_rational_function(ptr);
}
#endif
Mtbdd
Mtbdd::fractionTerminal(int64_t nominator, uint64_t denominator)
{
@ -811,7 +802,7 @@ Mtbdd::operator!=(const Mtbdd& other) const
return mtbdd != other.mtbdd;
}
Mtbdd
Mtbdd&
Mtbdd::operator=(const Mtbdd& right)
{
mtbdd = right.mtbdd;
@ -837,7 +828,7 @@ Mtbdd::operator*(const Mtbdd& other) const
return mtbdd_times(mtbdd, other.mtbdd);
}
Mtbdd
Mtbdd&
Mtbdd::operator*=(const Mtbdd& other)
{
LACE_ME;
@ -852,7 +843,7 @@ Mtbdd::operator+(const Mtbdd& other) const
return mtbdd_plus(mtbdd, other.mtbdd);
}
Mtbdd
Mtbdd&
Mtbdd::operator+=(const Mtbdd& other)
{
LACE_ME;
@ -867,7 +858,7 @@ Mtbdd::operator-(const Mtbdd& other) const
return mtbdd_minus(mtbdd, other.mtbdd);
}
Mtbdd
Mtbdd&
Mtbdd::operator-=(const Mtbdd& other)
{
LACE_ME;
@ -973,7 +964,7 @@ MtbddMap::operator+(const Mtbdd& other) const
return MtbddMap(mtbdd_map_addall(mtbdd, other.mtbdd));
}
MtbddMap
MtbddMap&
MtbddMap::operator+=(const Mtbdd& other)
{
mtbdd = mtbdd_map_addall(mtbdd, other.mtbdd);
@ -986,7 +977,7 @@ MtbddMap::operator-(const Mtbdd& other) const
return MtbddMap(mtbdd_map_removeall(mtbdd, other.mtbdd));
}
MtbddMap
MtbddMap&
MtbddMap::operator-=(const Mtbdd& other)
{
mtbdd = mtbdd_map_removeall(mtbdd, other.mtbdd);
@ -1025,20 +1016,32 @@ MtbddMap::isEmpty()
void
Sylvan::initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize)
{
sylvan_init_package(initialTableSize, maxTableSize, initialCacheSize, maxCacheSize);
sylvan_set_sizes(initialTableSize, maxTableSize, initialCacheSize, maxCacheSize);
sylvan_init_package();
}
void
Sylvan::setGranularity(int granularity)
{
sylvan_set_granularity(granularity);
}
int
Sylvan::getGranularity()
{
return sylvan_get_granularity();
}
void
Sylvan::initBdd(int granularity)
Sylvan::initBdd()
{
sylvan_init_bdd(granularity);
sylvan_init_bdd();
}
void
Sylvan::initMtbdd()
{
sylvan_init_mtbdd();
sylvan_storm_rational_function_init();
}
void
@ -1047,4 +1050,5 @@ Sylvan::quitPackage()
sylvan_quit();
}
#include "sylvan_obj_storm.cpp"
#include <sylvan_obj_storm.cpp>

59
resources/3rdparty/sylvan/src/sylvan_obj.hpp

@ -27,9 +27,9 @@
namespace sylvan {
class Mtbdd;
class BddSet;
class BddMap;
class Mtbdd;
class Bdd {
friend class Sylvan;
@ -84,7 +84,7 @@ public:
int operator==(const Bdd& other) const;
int operator!=(const Bdd& other) const;
Bdd operator=(const Bdd& right);
Bdd& operator=(const Bdd& right);
int operator<=(const Bdd& other) const;
int operator>=(const Bdd& other) const;
int operator<(const Bdd& other) const;
@ -92,17 +92,17 @@ public:
Bdd operator!() const;
Bdd operator~() const;
Bdd operator*(const Bdd& other) const;
Bdd operator*=(const Bdd& other);
Bdd& operator*=(const Bdd& other);
Bdd operator&(const Bdd& other) const;
Bdd operator&=(const Bdd& other);
Bdd& operator&=(const Bdd& other);
Bdd operator+(const Bdd& other) const;
Bdd operator+=(const Bdd& other);
Bdd& operator+=(const Bdd& other);
Bdd operator|(const Bdd& other) const;
Bdd operator|=(const Bdd& other);
Bdd& operator|=(const Bdd& other);
Bdd operator^(const Bdd& other) const;
Bdd operator^=(const Bdd& other);
Bdd& operator^=(const Bdd& other);
Bdd operator-(const Bdd& other) const;
Bdd operator-=(const Bdd& other);
Bdd& operator-=(const Bdd& other);
/**
* @brief Returns non-zero if this Bdd is bddOne() or bddZero()
@ -328,7 +328,7 @@ public:
size_t NodeCount() const;
#include "sylvan_obj_bdd_storm.hpp"
private:
BDD bdd;
};
@ -496,9 +496,9 @@ public:
BddMap(uint32_t key_variable, const Bdd value);
BddMap operator+(const Bdd& other) const;
BddMap operator+=(const Bdd& other);
BddMap& operator+=(const Bdd& other);
BddMap operator-(const Bdd& other) const;
BddMap operator-=(const Bdd& other);
BddMap& operator-=(const Bdd& other);
/**
* @brief Adds a key-value pair to the map
@ -544,13 +544,6 @@ public:
*/
static Mtbdd doubleTerminal(double value);
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
/**
* @brief Creates a Mtbdd leaf representing the rational function value <value>
*/
static Mtbdd stormRationalFunctionTerminal(storm::RationalFunction const& value);
#endif
/**
* @brief Creates a Mtbdd leaf representing the fraction value <nominator>/<denominator>
* Internally, Sylvan uses 32-bit values and reports overflows to stderr.
@ -605,15 +598,15 @@ public:
int operator==(const Mtbdd& other) const;
int operator!=(const Mtbdd& other) const;
Mtbdd operator=(const Mtbdd& right);
Mtbdd& operator=(const Mtbdd& right);
Mtbdd operator!() const;
Mtbdd operator~() const;
Mtbdd operator*(const Mtbdd& other) const;
Mtbdd operator*=(const Mtbdd& other);
Mtbdd& operator*=(const Mtbdd& other);
Mtbdd operator+(const Mtbdd& other) const;
Mtbdd operator+=(const Mtbdd& other);
Mtbdd& operator+=(const Mtbdd& other);
Mtbdd operator-(const Mtbdd& other) const;
Mtbdd operator-=(const Mtbdd& other);
Mtbdd& operator-=(const Mtbdd& other);
// not implemented (compared to Bdd): <=, >=, <, >, &, &=, |, |=, ^, ^=
@ -785,7 +778,7 @@ public:
size_t NodeCount() const;
#include "sylvan_obj_mtbdd_storm.hpp"
private:
MTBDD mtbdd;
};
@ -803,9 +796,9 @@ public:
MtbddMap(uint32_t key_variable, Mtbdd value);
MtbddMap operator+(const Mtbdd& other) const;
MtbddMap operator+=(const Mtbdd& other);
MtbddMap& operator+=(const Mtbdd& other);
MtbddMap operator-(const Mtbdd& other) const;
MtbddMap operator-=(const Mtbdd& other);
MtbddMap& operator-=(const Mtbdd& other);
/**
* @brief Adds a key-value pair to the map
@ -840,18 +833,30 @@ public:
static void initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize);
/**
* @brief Initializes the BDD module of the Sylvan framework.
* @brief Set the granularity for the BDD operations.
* @param granularity determins operation cache behavior; for higher values (2+) it will use the operation cache less often.
* Values of 3-7 may result in better performance, since occasionally not using the operation cache is fine in practice.
* A granularity of 1 means that every BDD operation will be cached at every variable level.
*/
static void initBdd(int granularity);
static void setGranularity(int granularity);
/**
* @brief Retrieve the granularity for the BDD operations.
*/
static int getGranularity();
/**
* @brief Initializes the BDD module of the Sylvan framework.
*/
static void initBdd();
/**
* @brief Initializes the MTBDD module of the Sylvan framework.
*/
static void initMtbdd();
#include "sylvan_obj_sylvan_storm.hpp"
/**
* @brief Frees all memory in use by Sylvan.
* Warning: if you have any Bdd objects which are not bddZero() or bddOne() after this, your program may crash!

6
resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp

@ -1,3 +1,5 @@
/**
* @brief Computes f - g
*/
@ -9,6 +11,10 @@
Mtbdd Divide(const Mtbdd &other) const;
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
/**
* @brief Creates a Mtbdd leaf representing the rational function value <value>
*/
static Mtbdd stormRationalFunctionTerminal(storm::RationalFunction const& value);
Bdd EqualsRF(const Mtbdd& other) const;
Bdd LessRF(const Mtbdd& other) const;

19
resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp

@ -1,3 +1,13 @@
#include "storm_function_wrapper.h"
#include "sylvan_storm_rational_function.h"
void
Sylvan::initCustomMtbdd()
{
sylvan_init_mt();
sylvan_storm_rational_function_init();
}
Bdd
Bdd::ExistAbstractRepresentative(const BddSet& cube) const {
LACE_ME;
@ -17,6 +27,13 @@ Bdd::toInt64Mtbdd() const {
}
#if defined(SYLVAN_HAVE_CARL) || defined(STORM_HAVE_CARL)
Mtbdd
Mtbdd::stormRationalFunctionTerminal(storm::RationalFunction const& value)
{
storm_rational_function_ptr ptr = (storm_rational_function_ptr)(&value);
return mtbdd_storm_rational_function(ptr);
}
Mtbdd
Bdd::toStormRationalFunctionMtbdd() const {
LACE_ME;
@ -279,7 +296,7 @@ Mtbdd::Maximum() const {
void
Mtbdd::PrintDot(FILE *out) const {
mtbdd_fprintdot(out, mtbdd, NULL);
mtbdd_fprintdot(out, mtbdd);
}
std::string

1
resources/3rdparty/sylvan/src/sylvan_obj_sylvan_storm.hpp

@ -0,0 +1 @@
static void initCustomMtbdd();

5
resources/3rdparty/sylvan/src/refs.c → resources/3rdparty/sylvan/src/sylvan_refs.c

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,7 +25,7 @@
#include <string.h> // for strerror
#include <sys/mman.h> // for mmap
#include <refs.h>
#include <sylvan_refs.h>
#ifndef compiler_barrier
#define compiler_barrier() { asm volatile("" ::: "memory"); }

3
resources/3rdparty/sylvan/src/refs.h → resources/3rdparty/sylvan/src/sylvan_refs.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.

172
resources/3rdparty/sylvan/src/sylvan_sl.c

@ -0,0 +1,172 @@
/*
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stdio.h>
#include <sys/mman.h> // for mmap, munmap, etc
#include <sylvan.h>
#include <sylvan_sl.h>
/* A SL_DEPTH of 6 means 32 bytes per bucket, of 14 means 64 bytes per bucket.
However, there is a very large performance drop with only 6 levels. */
#define SL_DEPTH 14
typedef struct
{
BDD dd;
uint32_t next[SL_DEPTH];
} sl_bucket;
struct sylvan_skiplist
{
sl_bucket *buckets;
size_t size;
size_t next;
};
#ifndef cas
#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new)))
#endif
sylvan_skiplist_t
sylvan_skiplist_alloc(size_t size)
{
if (size >= 0x80000000) {
fprintf(stderr, "sylvan: Trying to allocate a skiplist >= 0x80000000 buckets!\n");
exit(1);
}
sylvan_skiplist_t l = malloc(sizeof(struct sylvan_skiplist));
l->buckets = (sl_bucket*)mmap(0, sizeof(sl_bucket)*size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
if (l->buckets == (sl_bucket*)-1) {
fprintf(stderr, "sylvan: Unable to allocate virtual memory (%'zu bytes) for the skiplist!\n", size*sizeof(sl_bucket));
exit(1);
}
l->size = size;
l->next = 1;
return l;
}
void
sylvan_skiplist_free(sylvan_skiplist_t l)
{
munmap(l->buckets, sizeof(sl_bucket)*l->size);
free(l);
}
/**
* Return the assigned number of the given dd,
* or 0 if not found.
*/
uint64_t
sylvan_skiplist_get(sylvan_skiplist_t l, MTBDD dd)
{
if (dd == mtbdd_false || dd == mtbdd_true) return 0;
uint32_t loc = 0, k = SL_DEPTH-1;
for (;;) {
/* invariant: [loc].dd < dd */
/* note: this is always true for loc==0 */
sl_bucket *e = l->buckets + loc;
uint32_t loc_next = (*(volatile uint32_t*)&e->next[k]) & 0x7fffffff;
if (loc_next != 0 && l->buckets[loc_next].dd == dd) {
/* found */
return loc_next;
} else if (loc_next != 0 && l->buckets[loc_next].dd < dd) {
/* go right */
loc = loc_next;
} else if (k > 0) {
/* go down */
k--;
} else {
return 0;
}
}
}
VOID_TASK_IMPL_2(sylvan_skiplist_assign_next, sylvan_skiplist_t, l, MTBDD, dd)
{
if (dd == mtbdd_false || dd == mtbdd_true) return;
uint32_t trace[SL_DEPTH];
uint32_t loc = 0, loc_next = 0, k = SL_DEPTH-1;
for (;;) {
/* invariant: [loc].dd < dd */
/* note: this is always true for loc==0 */
sl_bucket *e = l->buckets + loc;
loc_next = (*(volatile uint32_t*)&e->next[k]) & 0x7fffffff;
if (loc_next != 0 && l->buckets[loc_next].dd == dd) {
/* found */
return;
} else if (loc_next != 0 && l->buckets[loc_next].dd < dd) {
/* go right */
loc = loc_next;
} else if (k > 0) {
/* go down */
trace[k] = loc;
k--;
} else if (!(e->next[0] & 0x80000000) && cas(&e->next[0], loc_next, loc_next|0x80000000)) {
/* locked */
break;
}
}
/* claim next item */
const uint64_t next = __sync_fetch_and_add(&l->next, 1);
if (next >= l->size) {
fprintf(stderr, "Out of cheese exception, no more blocks available\n");
exit(1);
}
/* fill next item */
sl_bucket *a = l->buckets + next;
a->dd = dd;
a->next[0] = loc_next;
compiler_barrier();
l->buckets[loc].next[0] = next;
/* determine height */
uint64_t h = 1 + __builtin_clz(LACE_TRNG) / 2;
if (h > SL_DEPTH) h = SL_DEPTH;
/* go up and create links */
for (k=1;k<h;k++) {
loc = trace[k];
for (;;) {
sl_bucket *e = l->buckets + loc;
/* note, at k>0, no locks on edges */
uint32_t loc_next = *(volatile uint32_t*)&e->next[k];
if (loc_next != 0 && l->buckets[loc_next].dd < dd) {
loc = loc_next;
} else {
a->next[k] = loc_next;
if (cas(&e->next[k], loc_next, next)) break;
}
}
}
}
size_t
sylvan_skiplist_count(sylvan_skiplist_t l)
{
return l->next - 1;
}
MTBDD
sylvan_skiplist_getr(sylvan_skiplist_t l, uint64_t index)
{
return l->buckets[index].dd;
}

70
resources/3rdparty/sylvan/src/sylvan_sl.h

@ -0,0 +1,70 @@
/*
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SYLVAN_SKIPLIST_H
#define SYLVAN_SKIPLIST_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
* Implementation of a simple limited-depth skiplist.
* The skiplist is used by the serialization mechanism in Sylvan.
* Each stored MTBDD is assigned a number starting with 1.
* Each bucket takes 32 bytes.
*/
typedef struct sylvan_skiplist *sylvan_skiplist_t;
/**
* Allocate a new skiplist of maximum size <size>.
* Only supports at most 0x7fffffff (max int32) buckets
*/
sylvan_skiplist_t sylvan_skiplist_alloc(size_t size);
/**
* Free the given skiplist.
*/
void sylvan_skiplist_free(sylvan_skiplist_t sl);
/**
* Get the number assigned to the given node <dd>.
* Returns 0 if no number was assigned.
*/
uint64_t sylvan_skiplist_get(sylvan_skiplist_t sl, MTBDD dd);
/**
* Assign the next number (starting at 1) to the given node <dd>.
*/
VOID_TASK_DECL_2(sylvan_skiplist_assign_next, sylvan_skiplist_t, MTBDD);
#define sylvan_skiplist_assign_next(sl, dd) CALL(sylvan_skiplist_assign_next, sl, dd)
/**
* Give the number of assigned nodes. (numbers 1,2,...,N)
*/
size_t sylvan_skiplist_count(sylvan_skiplist_t sl);
/**
* Get the MTBDD assigned to the number <index>, with the index 1,...,count.
*/
MTBDD sylvan_skiplist_getr(sylvan_skiplist_t sl, uint64_t index);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif

296
resources/3rdparty/sylvan/src/sylvan_stats.c

@ -0,0 +1,296 @@
/*
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h> // for errno
#include <string.h> // memset
#include <sylvan_stats.h>
#include <sys/mman.h>
#include <inttypes.h>
#include <sylvan_int.h>
#if SYLVAN_STATS
#ifdef __ELF__
__thread sylvan_stats_t sylvan_stats;
#else
pthread_key_t sylvan_stats_key;
#endif
#include <hwloc.h>
static hwloc_topology_t topo;
/**
* Instructions for sylvan_stats_report
*/
struct
{
int type; /* 0 for print line, 1 for simple counter, 2 for operation with CACHED and CACHEDPUT */
/* 3 for timer, 4 for report table data */
int id;
const char *key;
} sylvan_report_info[] =
{
{0, 0, "Tables"},
{1, BDD_NODES_CREATED, "MTBDD nodes created"},
{1, BDD_NODES_REUSED, "MTBDD nodes reused"},
{1, LDD_NODES_CREATED, "LDD nodes created"},
{1, LDD_NODES_REUSED, "LDD nodes reused"},
{1, LLMSSET_LOOKUP, "Lookup iterations"},
{4, 0, NULL}, /* trigger to report unique nodes and operation cache */
{0, 0, "Operation Count Cache get Cache put"},
{2, BDD_AND, "BDD and"},
{2, BDD_XOR, "BDD xor"},
{2, BDD_ITE, "BDD ite"},
{2, BDD_EXISTS, "BDD exists"},
{2, BDD_PROJECT, "BDD project"},
{2, BDD_AND_EXISTS, "BDD andexists"},
{2, BDD_AND_PROJECT, "BDD andproject"},
{2, BDD_RELNEXT, "BDD relnext"},
{2, BDD_RELPREV, "BDD relprev"},
{2, BDD_CLOSURE, "BDD closure"},
{2, BDD_COMPOSE, "BDD compose"},
{2, BDD_RESTRICT, "BDD restrict"},
{2, BDD_CONSTRAIN, "BDD constrain"},
{2, BDD_SUPPORT, "BDD support"},
{2, BDD_SATCOUNT, "BDD satcount"},
{2, BDD_PATHCOUNT, "BDD pathcount"},
{2, BDD_ISBDD, "BDD isbdd"},
{2, MTBDD_APPLY, "MTBDD binary apply"},
{2, MTBDD_UAPPLY, "MTBDD unary apply"},
{2, MTBDD_ABSTRACT, "MTBDD abstract"},
{2, MTBDD_ITE, "MTBDD ite"},
{2, MTBDD_EQUAL_NORM, "MTBDD eq norm"},
{2, MTBDD_EQUAL_NORM_REL, "MTBDD eq norm rel"},
{2, MTBDD_LEQ, "MTBDD leq"},
{2, MTBDD_LESS, "MTBDD less"},
{2, MTBDD_GEQ, "MTBDD geq"},
{2, MTBDD_GREATER, "MTBDD greater"},
{2, MTBDD_AND_ABSTRACT_PLUS, "MTBDD and_abs_plus"},
{2, MTBDD_AND_ABSTRACT_MAX, "MTBDD and_abs_max"},
{2, MTBDD_COMPOSE, "MTBDD compose"},
{2, MTBDD_MINIMUM, "MTBDD minimum"},
{2, MTBDD_MAXIMUM, "MTBDD maximum"},
{2, MTBDD_EVAL_COMPOSE, "MTBDD eval_compose"},
{2, LDD_UNION, "LDD union"},
{2, LDD_MINUS, "LDD minus"},
{2, LDD_INTERSECT, "LDD intersect"},
{2, LDD_RELPROD, "LDD relprod"},
{2, LDD_RELPREV, "LDD relprev"},
{2, LDD_PROJECT, "LDD project"},
{2, LDD_JOIN, "LDD join"},
{2, LDD_MATCH, "LDD match"},
{2, LDD_SATCOUNT, "LDD satcount"},
{2, LDD_SATCOUNTL, "LDD satcountl"},
{2, LDD_ZIP, "LDD zip"},
{2, LDD_RELPROD_UNION, "LDD relprod_union"},
{2, LDD_PROJECT_MINUS, "LDD project_minus"},
{0, 0, "Garbage collection"},
{1, SYLVAN_GC_COUNT, "GC executions"},
{3, SYLVAN_GC, "Total time spent"},
{-1, -1, NULL},
};
VOID_TASK_0(sylvan_stats_reset_perthread)
{
#ifdef __ELF__
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
sylvan_stats.counters[i] = 0;
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
sylvan_stats.timers[i] = 0;
}
#else
sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key);
if (sylvan_stats == NULL) {
sylvan_stats = mmap(0, sizeof(sylvan_stats_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
if (sylvan_stats == (sylvan_stats_t *)-1) {
fprintf(stderr, "sylvan_stats: Unable to allocate memory: %s!\n", strerror(errno));
exit(1);
}
// Ensure the stats object is on our pu
hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, LACE_WORKER_PU);
hwloc_set_area_membind(topo, sylvan_stats, sizeof(sylvan_stats_t), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
pthread_setspecific(sylvan_stats_key, sylvan_stats);
}
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
sylvan_stats->counters[i] = 0;
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
sylvan_stats->timers[i] = 0;
}
#endif
}
VOID_TASK_IMPL_0(sylvan_stats_init)
{
#ifndef __ELF__
pthread_key_create(&sylvan_stats_key, NULL);
#endif
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
TOGETHER(sylvan_stats_reset_perthread);
}
/**
* Reset all counters (for statistics)
*/
VOID_TASK_IMPL_0(sylvan_stats_reset)
{
TOGETHER(sylvan_stats_reset_perthread);
}
VOID_TASK_1(sylvan_stats_sum, sylvan_stats_t*, target)
{
#ifdef __ELF__
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
__sync_fetch_and_add(&target->counters[i], sylvan_stats.counters[i]);
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
__sync_fetch_and_add(&target->timers[i], sylvan_stats.timers[i]);
}
#else
sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key);
if (sylvan_stats != NULL) {
for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
__sync_fetch_and_add(&target->counters[i], sylvan_stats->counters[i]);
}
for (int i=0; i<SYLVAN_TIMER_COUNTER; i++) {
__sync_fetch_and_add(&target->timers[i], sylvan_stats->timers[i]);
}
}
#endif
}
VOID_TASK_IMPL_1(sylvan_stats_snapshot, sylvan_stats_t*, target)
{
memset(target, 0, sizeof(sylvan_stats_t));
TOGETHER(sylvan_stats_sum, target);
}
#define BLACK "\33[22;30m"
#define GRAY "\33[1;30m"
#define RED "\33[22;31m"
#define LRED "\33[1;31m"
#define GREEN "\33[22;32m"
#define LGREEN "\33[1;32m"
#define BROWN "\33[22;33m"
#define YELLOW "\33[1;33m"
#define BLUE "\33[22;34m"
#define LBLUE "\33[1;34m"
#define MAGENTA "\33[22;35m"
#define LMAGENTA "\33[1;35m"
#define CYAN "\33[22;36m"
#define LCYAN "\33[1;36m"
#define LGRAY "\33[22;37m"
#define WHITE "\33[1;37m"
#define NC "\33[m"
#define BOLD "\33[1m"
#define ULINE "\33[4m"
#define PINK "\33[38;5;200m"
static char*
to_h(double size, char *buf)
{
const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
int i = 0;
for (;size>1024;size/=1024) i++;
sprintf(buf, "%.*f %s", i, size, units[i]);
return buf;
}
void
sylvan_stats_report(FILE *target)
{
LACE_ME;
sylvan_stats_t totals;
sylvan_stats_snapshot(&totals);
// fix timers for MACH
#ifdef __MACH__
mach_timebase_info_data_t timebase;
mach_timebase_info(&timebase);
uint64_t c = timebase.numer/timebase.denom;
for (int i=0;i<SYLVAN_TIMER_COUNTER;i++) totals.timers[i]*=c;
#endif
int color = isatty(fileno(target)) ? 1 : 0;
if (color) fprintf(target, ULINE WHITE "Sylvan statistics\n" NC);
else fprintf(target, "Sylvan statistics\n");
int i=0;
for (;;) {
if (sylvan_report_info[i].id == -1) break;
int id = sylvan_report_info[i].id;
int type = sylvan_report_info[i].type;
if (type == 0) {
if (color) fprintf(target, WHITE "\n%s\n" NC, sylvan_report_info[i].key);
else fprintf(target, "\n%s\n", sylvan_report_info[i].key);
} else if (type == 1) {
if (totals.counters[id] > 0) {
fprintf(target, "%-20s %'-16"PRIu64"\n", sylvan_report_info[i].key, totals.counters[id]);
}
} else if (type == 2) {
if (totals.counters[id] > 0) {
fprintf(target, "%-20s %'-16"PRIu64 " %'-16"PRIu64" %'-16"PRIu64 "\n", sylvan_report_info[i].key, totals.counters[id], totals.counters[id+1], totals.counters[id+2]);
}
} else if (type == 3) {
if (totals.timers[id] > 0) {
fprintf(target, "%-20s %'.6Lf sec.\n", sylvan_report_info[i].key, (long double)totals.timers[id]/1000000000);
}
} else if (type == 4) {
fprintf(target, "%-20s %'zu of %'zu buckets filled.\n", "Unique nodes table", llmsset_count_marked(nodes), llmsset_get_size(nodes));
fprintf(target, "%-20s %'zu of %'zu buckets filled.\n", "Operation cache", cache_getused(), cache_getsize());
char buf[64], buf2[64];
to_h(24ULL * llmsset_get_size(nodes), buf);
to_h(24ULL * llmsset_get_max_size(nodes), buf2);
fprintf(target, "%-20s %s (max real) of %s (allocated virtual memory).\n", "Memory (nodes)", buf, buf2);
to_h(36ULL * cache_getsize(), buf);
to_h(36ULL * cache_getmaxsize(), buf2);
fprintf(target, "%-20s %s (max real) of %s (allocated virtual memory).\n", "Memory (cache)", buf, buf2);
}
i++;
}
}
#else
VOID_TASK_IMPL_0(sylvan_stats_init)
{
}
VOID_TASK_IMPL_0(sylvan_stats_reset)
{
}
VOID_TASK_IMPL_1(sylvan_stats_snapshot, sylvan_stats_t*, target)
{
memset(target, 0, sizeof(sylvan_stats_t));
}
void
sylvan_stats_report(FILE* target)
{
(void)target;
}
#endif

179
resources/3rdparty/sylvan/src/stats.h → resources/3rdparty/sylvan/src/sylvan_stats.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,8 +15,8 @@
* limitations under the License.
*/
#include <lace.h>
#include <sylvan_config.h>
#include <lace.h>
#ifndef SYLVAN_STATS_H
#define SYLVAN_STATS_H
@ -24,100 +25,71 @@
extern "C" {
#endif /* __cplusplus */
#define OPCOUNTER(NAME) NAME, NAME ## _CACHEDPUT, NAME ## _CACHED
typedef enum {
BDD_ITE,
BDD_AND,
BDD_XOR,
BDD_EXISTS,
BDD_AND_EXISTS,
BDD_RELNEXT,
BDD_RELPREV,
BDD_SATCOUNT,
BDD_COMPOSE,
BDD_RESTRICT,
BDD_CONSTRAIN,
BDD_CLOSURE,
BDD_ISBDD,
BDD_SUPPORT,
BDD_PATHCOUNT,
BDD_ITE_CACHEDPUT,
BDD_AND_CACHEDPUT,
BDD_XOR_CACHEDPUT,
BDD_EXISTS_CACHEDPUT,
BDD_AND_EXISTS_CACHEDPUT,
BDD_RELNEXT_CACHEDPUT,
BDD_RELPREV_CACHEDPUT,
BDD_SATCOUNT_CACHEDPUT,
BDD_COMPOSE_CACHEDPUT,
BDD_RESTRICT_CACHEDPUT,
BDD_CONSTRAIN_CACHEDPUT,
BDD_CLOSURE_CACHEDPUT,
BDD_ISBDD_CACHEDPUT,
BDD_SUPPORT_CACHEDPUT,
BDD_PATHCOUNT_CACHEDPUT,
BDD_ITE_CACHED,
BDD_AND_CACHED,
BDD_XOR_CACHED,
BDD_EXISTS_CACHED,
BDD_AND_EXISTS_CACHED,
BDD_RELNEXT_CACHED,
BDD_RELPREV_CACHED,
BDD_SATCOUNT_CACHED,
BDD_COMPOSE_CACHED,
BDD_RESTRICT_CACHED,
BDD_CONSTRAIN_CACHED,
BDD_CLOSURE_CACHED,
BDD_ISBDD_CACHED,
BDD_SUPPORT_CACHED,
BDD_PATHCOUNT_CACHED,
/* Creating nodes */
BDD_NODES_CREATED,
BDD_NODES_REUSED,
LDD_UNION,
LDD_MINUS,
LDD_INTERSECT,
LDD_RELPROD,
LDD_RELPREV,
LDD_PROJECT,
LDD_JOIN,
LDD_MATCH,
LDD_SATCOUNT,
LDD_SATCOUNTL,
LDD_ZIP,
LDD_RELPROD_UNION,
LDD_PROJECT_MINUS,
LDD_UNION_CACHEDPUT,
LDD_MINUS_CACHEDPUT,
LDD_INTERSECT_CACHEDPUT,
LDD_RELPROD_CACHEDPUT,
LDD_RELPREV_CACHEDPUT,
LDD_PROJECT_CACHEDPUT,
LDD_JOIN_CACHEDPUT,
LDD_MATCH_CACHEDPUT,
LDD_SATCOUNT_CACHEDPUT,
LDD_SATCOUNTL_CACHEDPUT,
LDD_ZIP_CACHEDPUT,
LDD_RELPROD_UNION_CACHEDPUT,
LDD_PROJECT_MINUS_CACHEDPUT,
LDD_UNION_CACHED,
LDD_MINUS_CACHED,
LDD_INTERSECT_CACHED,
LDD_RELPROD_CACHED,
LDD_RELPREV_CACHED,
LDD_PROJECT_CACHED,
LDD_JOIN_CACHED,
LDD_MATCH_CACHED,
LDD_SATCOUNT_CACHED,
LDD_SATCOUNTL_CACHED,
LDD_ZIP_CACHED,
LDD_RELPROD_UNION_CACHED,
LDD_PROJECT_MINUS_CACHED,
LDD_NODES_CREATED,
LDD_NODES_REUSED,
/* BDD operations */
OPCOUNTER(BDD_ITE),
OPCOUNTER(BDD_AND),
OPCOUNTER(BDD_XOR),
OPCOUNTER(BDD_EXISTS),
OPCOUNTER(BDD_PROJECT),
OPCOUNTER(BDD_AND_EXISTS),
OPCOUNTER(BDD_AND_PROJECT),
OPCOUNTER(BDD_RELNEXT),
OPCOUNTER(BDD_RELPREV),
OPCOUNTER(BDD_SATCOUNT),
OPCOUNTER(BDD_COMPOSE),
OPCOUNTER(BDD_RESTRICT),
OPCOUNTER(BDD_CONSTRAIN),
OPCOUNTER(BDD_CLOSURE),
OPCOUNTER(BDD_ISBDD),
OPCOUNTER(BDD_SUPPORT),
OPCOUNTER(BDD_PATHCOUNT),
/* MTBDD operations */
OPCOUNTER(MTBDD_APPLY),
OPCOUNTER(MTBDD_UAPPLY),
OPCOUNTER(MTBDD_ABSTRACT),
OPCOUNTER(MTBDD_ITE),
OPCOUNTER(MTBDD_EQUAL_NORM),
OPCOUNTER(MTBDD_EQUAL_NORM_REL),
OPCOUNTER(MTBDD_LEQ),
OPCOUNTER(MTBDD_LESS),
OPCOUNTER(MTBDD_GEQ),
OPCOUNTER(MTBDD_GREATER),
OPCOUNTER(MTBDD_AND_ABSTRACT_PLUS),
OPCOUNTER(MTBDD_AND_ABSTRACT_MAX),
OPCOUNTER(MTBDD_COMPOSE),
OPCOUNTER(MTBDD_MINIMUM),
OPCOUNTER(MTBDD_MAXIMUM),
OPCOUNTER(MTBDD_EVAL_COMPOSE),
/* LDD operations */
OPCOUNTER(LDD_UNION),
OPCOUNTER(LDD_MINUS),
OPCOUNTER(LDD_INTERSECT),
OPCOUNTER(LDD_RELPROD),
OPCOUNTER(LDD_RELPREV),
OPCOUNTER(LDD_PROJECT),
OPCOUNTER(LDD_JOIN),
OPCOUNTER(LDD_MATCH),
OPCOUNTER(LDD_SATCOUNT),
OPCOUNTER(LDD_SATCOUNTL),
OPCOUNTER(LDD_ZIP),
OPCOUNTER(LDD_RELPROD_UNION),
OPCOUNTER(LDD_PROJECT_MINUS),
/* Other counters */
SYLVAN_GC_COUNT,
LLMSSET_LOOKUP,
SYLVAN_GC_COUNT,
SYLVAN_COUNTER_COUNTER
} Sylvan_Counters;
@ -127,40 +99,47 @@ typedef enum
SYLVAN_TIMER_COUNTER
} Sylvan_Timers;
typedef struct
{
uint64_t counters[SYLVAN_COUNTER_COUNTER];
/* the timers are in ns */
uint64_t timers[SYLVAN_TIMER_COUNTER];
/* startstop is for internal use */
uint64_t timers_startstop[SYLVAN_TIMER_COUNTER];
} sylvan_stats_t;
/**
* Initialize stats system (done by sylvan_init_package)
*/
VOID_TASK_DECL_0(sylvan_stats_init);
#define sylvan_stats_init() CALL(sylvan_stats_init)
VOID_TASK_DECL_0(sylvan_stats_init)
/**
* Reset all counters (for statistics)
*/
VOID_TASK_DECL_0(sylvan_stats_reset);
#define sylvan_stats_reset() CALL(sylvan_stats_reset)
VOID_TASK_DECL_0(sylvan_stats_reset)
/**
* Obtain current counts (this stops the world during counting)
*/
VOID_TASK_DECL_1(sylvan_stats_snapshot, sylvan_stats_t*);
#define sylvan_stats_snapshot(target) CALL(sylvan_stats_snapshot, target)
/**
* Write statistic report to file (stdout, stderr, etc)
*/
void sylvan_stats_report(FILE* target, int color);
void sylvan_stats_report(FILE* target);
#if SYLVAN_STATS
/* Infrastructure for internal markings */
typedef struct
{
uint64_t counters[SYLVAN_COUNTER_COUNTER];
uint64_t timers[SYLVAN_TIMER_COUNTER];
uint64_t timers_startstop[SYLVAN_TIMER_COUNTER];
} sylvan_stats_t;
#ifdef __MACH__
#include <mach/mach_time.h>
#define getabstime() mach_absolute_time()
#else
#include <time.h>
static uint64_t
getabstime()
getabstime(void)
{
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);

42
resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c

@ -10,6 +10,8 @@
#include <sylvan.h>
#include <sylvan_common.h>
#include <sylvan_cache.h>
#include <sylvan_int.h>
#include <sylvan_mtbdd_int.h>
#include <sylvan_storm_rational_function.h>
@ -27,6 +29,8 @@ int depth = 0;
#define LOG_O(funcName)
#endif
uint32_t srf_type;
/**
* helper function for hash
*/
@ -115,11 +119,13 @@ sylvan_storm_rational_function_destroy(uint64_t val)
LOG_O("i-destroy")
}
static uint32_t sylvan_storm_rational_function_type;
static char*
sylvan_storm_rational_function_to_str(int comp, uint64_t val, char *buf, size_t buflen)
{
return storm_rational_function_to_str((storm_rational_function_ptr)(size_t)val, *buf, buflen);
(void)comp;
}
static uint64_t CACHE_MTBDD_AND_EXISTS_RF;
static uint64_t CACHE_MTBDD_MINIMUM_RF;
static uint64_t CACHE_MTBDD_MAXIMUM_RF;
/**
* Initialize storm::RationalFunction custom leaves
@ -127,21 +133,19 @@ static uint64_t CACHE_MTBDD_MAXIMUM_RF;
void
sylvan_storm_rational_function_init()
{
/* Register custom leaf 3 */
sylvan_storm_rational_function_type = mtbdd_register_custom_leaf(sylvan_storm_rational_function_hash, sylvan_storm_rational_function_equals, sylvan_storm_rational_function_create, sylvan_storm_rational_function_destroy);
if (SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID != sylvan_storm_rational_function_type) {
printf("ERROR - ERROR - ERROR\nThe Sylvan Type ID is NOT correct.\nIt was assumed to be %u, but it is actually %u!\nYou NEED to fix this by changing the macro \"SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID\" and recompiling Storm!\n\n", SYLVAN_STORM_RATIONAL_FUNCTION_TYPE_ID, sylvan_storm_rational_function_type);
assert(0);
}
CACHE_MTBDD_AND_EXISTS_RF = cache_next_opid();
CACHE_MTBDD_MINIMUM_RF = cache_next_opid();
CACHE_MTBDD_MAXIMUM_RF = cache_next_opid();
/* Register custom leaf */
srf_type = sylvan_mt_create_type();
sylvan_mt_set_hash(srf_type, sylvan_storm_rational_function_hash);
sylvan_mt_set_equals(srf_type, sylvan_storm_rational_function_equals);
sylvan_mt_set_create(srf_type, sylvan_storm_rational_function_create);
sylvan_mt_set_destroy(srf_type, sylvan_storm_rational_function_destroy);
sylvan_mt_set_to_str(srf_type, sylvan_storm_rational_function_to_str);
// sylvan_mt_set_write_binary(srf_type, gmp_write_binary);
// sylvan_mt_set_read_binary(srf_type, gmp_read_binary);
}
uint32_t sylvan_storm_rational_function_get_type() {
return sylvan_storm_rational_function_type;
return srf_type;
}
/**
@ -159,7 +163,7 @@ mtbdd_storm_rational_function(storm_rational_function_ptr val)
printf(")\n");
#endif
MTBDD result = mtbdd_makeleaf(sylvan_storm_rational_function_type, terminalValue);
MTBDD result = mtbdd_makeleaf(srf_type, terminalValue);
LOG_O("i-mtbdd_")
return result;
@ -685,7 +689,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_replace_leaves, MTBDD, dd,
/* Compute result for leaf */
if (mtbdd_isleaf(dd)) {
if (mtbdd_gettype(dd) != sylvan_storm_rational_function_type) {
if (mtbdd_gettype(dd) != srf_type) {
assert(0);
}
@ -706,7 +710,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_to_double, MTBDD, dd, size_
/* Compute result for leaf */
if (mtbdd_isleaf(dd)) {
if (mtbdd_gettype(dd) != sylvan_storm_rational_function_type) {
if (mtbdd_gettype(dd) != srf_type) {
printf("Can not convert to double, this has type %u!\n", mtbdd_gettype(dd));
assert(0);
}

87
resources/3rdparty/sylvan/src/llmsset.c → resources/3rdparty/sylvan/src/sylvan_table.c

@ -1,5 +1,6 @@
/*
* Copyright 2011-2015 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,19 +24,13 @@
#include <string.h> // memset
#include <sys/mman.h> // for mmap
#include <llmsset.h>
#include <stats.h>
#include <tls.h>
#include <sylvan_table.h>
#include <sylvan_stats.h>
#include <sylvan_tls.h>
#ifndef USE_HWLOC
#define USE_HWLOC 0
#endif
#if USE_HWLOC
#include <hwloc.h>
static hwloc_topology_t topo;
#endif
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
@ -118,7 +113,7 @@ set_custom_bucket(const llmsset_t dbs, uint64_t index, int on)
}
static int
get_custom_bucket(const llmsset_t dbs, uint64_t index)
is_custom_bucket(const llmsset_t dbs, uint64_t index)
{
uint64_t *ptr = dbs->bitmapc + (index/64);
uint64_t mask = 0x8000000000000000LL >> (index&63);
@ -203,7 +198,7 @@ llmsset_lookup2(const llmsset_t dbs, uint64_t a, uint64_t b, int* created, const
if (hash == (v & MASK_HASH)) {
uint64_t d_idx = v & MASK_INDEX;
uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*d_idx;
if (custom) {
if (custom && is_custom_bucket(dbs, d_idx)) {
if (dbs->equals_cb(a, b, d_ptr[0], d_ptr[1])) {
if (cidx != 0) {
dbs->destroy_cb(a, b);
@ -253,7 +248,7 @@ llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b, int* cr
return llmsset_lookup2(dbs, a, b, created, 1);
}
static inline int
int
llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
{
const uint64_t * const d_ptr = ((uint64_t*)dbs->data) + 2*d_idx;
@ -261,7 +256,7 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
const uint64_t b = d_ptr[1];
uint64_t hash_rehash = 14695981039346656037LLU;
const int custom = get_custom_bucket(dbs, d_idx) ? 1 : 0;
const int custom = is_custom_bucket(dbs, d_idx) ? 1 : 0;
if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
else hash_rehash = llmsset_hash(a, b, hash_rehash);
const uint64_t new_v = (hash_rehash & MASK_HASH) | d_idx;
@ -281,7 +276,11 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
// find next idx on probe sequence
idx = (idx & CL_MASK) | ((idx+1) & CL_MASK_R);
if (idx == last) {
if (++i == dbs->threshold) return 0; // failed to find empty spot in probe sequence
if (++i == *(volatile int16_t*)&dbs->threshold) {
// failed to find empty spot in probe sequence
// solution: increase probe sequence length...
__sync_fetch_and_add(&dbs->threshold, 1);
}
// go to next cache line in probe sequence
if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
@ -299,10 +298,8 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
llmsset_t
llmsset_create(size_t initial_size, size_t max_size)
{
#if USE_HWLOC
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
#endif
llmsset_t dbs = NULL;
if (posix_memalign((void**)&dbs, LINE_SIZE, sizeof(struct llmsset)) != 0) {
@ -362,13 +359,11 @@ llmsset_create(size_t initial_size, size_t max_size)
madvise(dbs->table, dbs->max_size * 8, MADV_RANDOM);
#endif
#if USE_HWLOC
hwloc_set_area_membind(topo, dbs->table, dbs->max_size * 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
hwloc_set_area_membind(topo, dbs->data, dbs->max_size * 16, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
hwloc_set_area_membind(topo, dbs->bitmapc, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
#endif
// forbid first two positions (index 0 and 1)
dbs->bitmap2[0] = 0xc000000000000000LL;
@ -402,31 +397,20 @@ llmsset_free(llmsset_t dbs)
VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs)
{
// just reallocate...
if (mmap(dbs->table, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
#if defined(madvise) && defined(MADV_RANDOM)
madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM);
#endif
#if USE_HWLOC
hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
#endif
} else {
// reallocate failed... expensive fallback
memset(dbs->table, 0, dbs->max_size * 8);
}
CALL(llmsset_clear_data, dbs);
CALL(llmsset_clear_hashes, dbs);
}
VOID_TASK_IMPL_1(llmsset_clear_data, llmsset_t, dbs)
{
if (mmap(dbs->bitmap1, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
#if USE_HWLOC
hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
#endif
} else {
memset(dbs->bitmap1, 0, dbs->max_size / (512*8));
}
if (mmap(dbs->bitmap2, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
#if USE_HWLOC
hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
#endif
} else {
memset(dbs->bitmap2, 0, dbs->max_size / 8);
}
@ -437,6 +421,20 @@ VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs)
TOGETHER(llmsset_reset_region);
}
VOID_TASK_IMPL_1(llmsset_clear_hashes, llmsset_t, dbs)
{
// just reallocate...
if (mmap(dbs->table, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
#if defined(madvise) && defined(MADV_RANDOM)
madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM);
#endif
hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
} else {
// reallocate failed... expensive fallback
memset(dbs->table, 0, dbs->max_size * 8);
}
}
int
llmsset_is_marked(const llmsset_t dbs, uint64_t index)
{
@ -457,30 +455,33 @@ llmsset_mark(const llmsset_t dbs, uint64_t index)
}
}
VOID_TASK_3(llmsset_rehash_par, llmsset_t, dbs, size_t, first, size_t, count)
TASK_3(int, llmsset_rehash_par, llmsset_t, dbs, size_t, first, size_t, count)
{
if (count > 512) {
size_t split = count/2;
SPAWN(llmsset_rehash_par, dbs, first, split);
CALL(llmsset_rehash_par, dbs, first + split, count - split);
SYNC(llmsset_rehash_par);
SPAWN(llmsset_rehash_par, dbs, first, count/2);
int bad = CALL(llmsset_rehash_par, dbs, first + count/2, count - count/2);
return bad + SYNC(llmsset_rehash_par);
} else {
int bad = 0;
uint64_t *ptr = dbs->bitmap2 + (first / 64);
uint64_t mask = 0x8000000000000000LL >> (first & 63);
for (size_t k=0; k<count; k++) {
if (*ptr & mask) llmsset_rehash_bucket(dbs, first+k);
if (*ptr & mask) {
if (llmsset_rehash_bucket(dbs, first+k) == 0) bad++;
}
mask >>= 1;
if (mask == 0) {
ptr++;
mask = 0x8000000000000000LL;
}
}
return bad;
}
}
VOID_TASK_IMPL_1(llmsset_rehash, llmsset_t, dbs)
TASK_IMPL_1(int, llmsset_rehash, llmsset_t, dbs)
{
CALL(llmsset_rehash_par, dbs, 0, dbs->table_size);
return CALL(llmsset_rehash_par, dbs, 0, dbs->table_size);
}
TASK_3(size_t, llmsset_count_marked_par, llmsset_t, dbs, size_t, first, size_t, count)

23
resources/3rdparty/sylvan/src/llmsset.h → resources/3rdparty/sylvan/src/sylvan_table.h

@ -1,5 +1,6 @@
/*
* Copyright 2011-2014 Formal Methods and Tools, University of Twente
* Copyright 2011-2016 Formal Methods and Tools, University of Twente
* Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,6 +16,7 @@
*/
#include <sylvan_config.h>
#include <stdint.h>
#include <unistd.h>
@ -38,6 +40,10 @@ extern "C" {
* The set has support for stop-the-world garbage collection.
* Methods llmsset_clear, llmsset_mark and llmsset_rehash implement garbage collection.
* During their execution, llmsset_lookup is not allowed.
*
* WARNING: Originally, this table is designed to allow multiple tables.
* However, this is not compatible with thread local storage for now.
* Do not use multiple tables.
*/
/**
@ -153,6 +159,12 @@ uint64_t llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b
VOID_TASK_DECL_1(llmsset_clear, llmsset_t);
#define llmsset_clear(dbs) CALL(llmsset_clear, dbs)
VOID_TASK_DECL_1(llmsset_clear_data, llmsset_t);
#define llmsset_clear_data(dbs) CALL(llmsset_clear_data, dbs)
VOID_TASK_DECL_1(llmsset_clear_hashes, llmsset_t);
#define llmsset_clear_hashes(dbs) CALL(llmsset_clear_hashes, dbs)
/**
* Check if a certain data bucket is marked (in use).
*/
@ -167,10 +179,17 @@ int llmsset_mark(const llmsset_t dbs, uint64_t index);
/**
* Rehash all marked buckets.
* Returns 0 if successful, or the number of buckets not rehashed if not.
*/
VOID_TASK_DECL_1(llmsset_rehash, llmsset_t);
TASK_DECL_1(int, llmsset_rehash, llmsset_t);
#define llmsset_rehash(dbs) CALL(llmsset_rehash, dbs)
/**
* Rehash a single bucket.
* Returns 0 if successful, or 1 if not.
*/
int llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx);
/**
* Retrieve number of marked buckets.
*/

0
resources/3rdparty/sylvan/src/tls.h → resources/3rdparty/sylvan/src/sylvan_tls.h

10
resources/3rdparty/sylvan/sylvan.pc.cmake.in

@ -0,0 +1,10 @@
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
Name: Sylvan
Description: @PROJECT_DESCRIPTION@
URL: @PROJECT_URL@
Version: @PROJECT_VERSION@
Cflags: -I${includedir}
Libs: -L${libdir} -lsylvan -lgmp -lpthread -lm
Requires: hwloc

5
resources/3rdparty/sylvan/test/.gitignore

@ -1,5 +0,0 @@
test
cmake_install.cmake
CMakeFiles
*.o
.libs

7
resources/3rdparty/sylvan/test/CMakeLists.txt

@ -1,10 +1,3 @@
cmake_minimum_required(VERSION 2.6)
project(sylvan C CXX)
enable_testing()
add_executable(sylvan_test main.c)
target_link_libraries(sylvan_test sylvan)
add_executable(test_basic test_basic.c)
target_link_libraries(test_basic sylvan)

350
resources/3rdparty/sylvan/test/main.c

@ -1,350 +0,0 @@
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/time.h>
#include <inttypes.h>
#include <assert.h>
#include "test_assert.h"
#include "llmsset.h"
#include "sylvan.h"
#define BLACK "\33[22;30m"
#define GRAY "\33[01;30m"
#define RED "\33[22;31m"
#define LRED "\33[01;31m"
#define GREEN "\33[22;32m"
#define LGREEN "\33[01;32m"
#define BLUE "\33[22;34m"
#define LBLUE "\33[01;34m"
#define BROWN "\33[22;33m"
#define YELLOW "\33[01;33m"
#define CYAN "\33[22;36m"
#define LCYAN "\33[22;36m"
#define MAGENTA "\33[22;35m"
#define LMAGENTA "\33[01;35m"
#define NC "\33[0m"
#define BOLD "\33[1m"
#define ULINE "\33[4m" //underline
#define BLINK "\33[5m"
#define INVERT "\33[7m"
__thread uint64_t seed = 1;
uint64_t
xorshift_rand(void)
{
uint64_t x = seed;
if (seed == 0) seed = rand();
x ^= x >> 12;
x ^= x << 25;
x ^= x >> 27;
seed = x;
return x * 2685821657736338717LL;
}
double
uniform_deviate(uint64_t seed)
{
return seed * (1.0 / (0xffffffffffffffffL + 1.0));
}
int
rng(int low, int high)
{
return low + uniform_deviate(xorshift_rand()) * (high-low);
}
static inline BDD
make_random(int i, int j)
{
if (i == j) return rng(0, 2) ? sylvan_true : sylvan_false;
BDD yes = make_random(i+1, j);
BDD no = make_random(i+1, j);
BDD result = sylvan_invalid;
switch(rng(0, 4)) {
case 0:
result = no;
sylvan_deref(yes);
break;
case 1:
result = yes;
sylvan_deref(no);
break;
case 2:
result = sylvan_ref(sylvan_makenode(i, yes, no));
sylvan_deref(no);
sylvan_deref(yes);
break;
case 3:
default:
result = sylvan_ref(sylvan_makenode(i, no, yes));
sylvan_deref(no);
sylvan_deref(yes);
break;
}
return result;
}
/** GC testing */
VOID_TASK_2(gctest_fill, int, levels, int, width)
{
if (levels > 1) {
int i;
for (i=0; i<width; i++) { SPAWN(gctest_fill, levels-1, width); }
for (i=0; i<width; i++) { SYNC(gctest_fill); }
} else {
sylvan_deref(make_random(0, 10));
}
}
void report_table()
{
llmsset_t __sylvan_get_internal_data();
llmsset_t tbl = __sylvan_get_internal_data();
LACE_ME;
size_t filled = llmsset_count_marked(tbl);
size_t total = llmsset_get_size(tbl);
printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled);
}
int test_gc(int threads)
{
LACE_ME;
int N_canaries = 16;
BDD canaries[N_canaries];
char* hashes[N_canaries];
char* hashes2[N_canaries];
int i,j;
for (i=0;i<N_canaries;i++) {
canaries[i] = make_random(0, 10);
hashes[i] = (char*)malloc(80);
hashes2[i] = (char*)malloc(80);
sylvan_getsha(canaries[i], hashes[i]);
sylvan_test_isbdd(canaries[i]);
}
test_assert(sylvan_count_refs() == (size_t)N_canaries);
for (j=0;j<10*threads;j++) {
CALL(gctest_fill, 6, 5);
for (i=0;i<N_canaries;i++) {
sylvan_test_isbdd(canaries[i]);
sylvan_getsha(canaries[i], hashes2[i]);
test_assert(strcmp(hashes[i], hashes2[i]) == 0);
}
}
test_assert(sylvan_count_refs() == (size_t)N_canaries);
return 0;
}
TASK_2(MDD, random_ldd, int, depth, int, count)
{
uint32_t n[depth];
MDD result = lddmc_false;
int i, j;
for (i=0; i<count; i++) {
for (j=0; j<depth; j++) {
n[j] = rng(0, 10);
}
//MDD old = result;
result = lddmc_union_cube(result, n, depth);
//assert(lddmc_cube(n, depth) != lddmc_true);
//assert(result == lddmc_union(old, lddmc_cube(n, depth)));
//assert(result != lddmc_true);
}
return result;
}
VOID_TASK_3(enumer, uint32_t*, values, size_t, count, void*, context)
{
return;
(void)values;
(void)count;
(void)context;
}
int
test_lddmc()
{
LACE_ME;
sylvan_init_package(1LL<<24, 1LL<<24, 1LL<<24, 1LL<<24);
sylvan_init_ldd();
sylvan_gc_disable();
MDD a, b, c;
// Test union, union_cube, member_cube, satcount
a = lddmc_cube((uint32_t[]){1,2,3,5,4,3}, 6);
a = lddmc_union(a,lddmc_cube((uint32_t[]){2,2,3,5,4,3}, 6));
c = b = a = lddmc_union_cube(a, (uint32_t[]){2,2,3,5,4,2}, 6);
a = lddmc_union_cube(a, (uint32_t[]){2,3,3,5,4,3}, 6);
a = lddmc_union(a, lddmc_cube((uint32_t[]){2,3,4,4,4,3}, 6));
test_assert(lddmc_member_cube(a, (uint32_t[]){2,3,3,5,4,3}, 6));
test_assert(lddmc_member_cube(a, (uint32_t[]){1,2,3,5,4,3}, 6));
test_assert(lddmc_member_cube(a, (uint32_t[]){2,2,3,5,4,3}, 6));
test_assert(lddmc_member_cube(a, (uint32_t[]){2,2,3,5,4,2}, 6));
test_assert(lddmc_satcount(a) == 5);
lddmc_sat_all_par(a, TASK(enumer), NULL);
// Test minus, member_cube, satcount
a = lddmc_minus(a, b);
test_assert(lddmc_member_cube(a, (uint32_t[]){2,3,3,5,4,3}, 6));
test_assert(!lddmc_member_cube(a, (uint32_t[]){1,2,3,5,4,3}, 6));
test_assert(!lddmc_member_cube(a, (uint32_t[]){2,2,3,5,4,3}, 6));
test_assert(!lddmc_member_cube(a, (uint32_t[]){2,2,3,5,4,2}, 6));
test_assert(lddmc_member_cube(a, (uint32_t[]){2,3,4,4,4,3}, 6));
test_assert(lddmc_satcount(a) == 2);
// Test intersect
test_assert(lddmc_satcount(lddmc_intersect(a,b)) == 0);
test_assert(lddmc_intersect(b,c)==lddmc_intersect(c,b));
test_assert(lddmc_intersect(b,c)==c);
// Test project, project_minus
a = lddmc_cube((uint32_t[]){1,2,3,5,4,3}, 6);
a = lddmc_union_cube(a, (uint32_t[]){2,2,3,5,4,3}, 6);
a = lddmc_union_cube(a, (uint32_t[]){2,2,3,5,4,2}, 6);
a = lddmc_union_cube(a, (uint32_t[]){2,3,3,5,4,3}, 6);
a = lddmc_union_cube(a, (uint32_t[]){2,3,4,4,4,3}, 6);
// a = {<1,2,3,5,4,3>,<2,2,3,5,4,3>,<2,2,3,5,4,2>,<2,3,3,5,4,3>,<2,3,4,4,4,3>}
MDD proj = lddmc_cube((uint32_t[]){1,1,-2},3);
b = lddmc_cube((uint32_t[]){1,2}, 2);
b = lddmc_union_cube(b, (uint32_t[]){2,2}, 2);
b = lddmc_union_cube(b, (uint32_t[]){2,3}, 2);
test_assert(lddmc_project(a, proj)==b);
test_assert(lddmc_project_minus(a, proj, lddmc_false)==b);
test_assert(lddmc_project_minus(a, proj, b)==lddmc_false);
// Test relprod
a = lddmc_cube((uint32_t[]){1},1);
b = lddmc_cube((uint32_t[]){1,2},2);
proj = lddmc_cube((uint32_t[]){1,2,-1}, 3);
test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj));
test_assert(lddmc_cube((uint32_t[]){3},1) == lddmc_relprod(a, lddmc_cube((uint32_t[]){1,3},2), proj));
a = lddmc_union_cube(a, (uint32_t[]){2},1);
test_assert(lddmc_satcount(a) == 2);
test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj));
b = lddmc_union_cube(b, (uint32_t[]){2,2},2);
test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj));
b = lddmc_union_cube(b, (uint32_t[]){2,3},2);
test_assert(lddmc_satcount(lddmc_relprod(a, b, proj)) == 2);
test_assert(lddmc_union(lddmc_cube((uint32_t[]){2},1),lddmc_cube((uint32_t[]){3},1)) == lddmc_relprod(a, b, proj));
// Test relprev
MDD universe = lddmc_union(lddmc_cube((uint32_t[]){1},1), lddmc_cube((uint32_t[]){2},1));
a = lddmc_cube((uint32_t[]){2},1);
b = lddmc_cube((uint32_t[]){1,2},2);
test_assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, universe));
test_assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, lddmc_cube((uint32_t[]){1},1)));
a = lddmc_cube((uint32_t[]){1},1);
MDD next = lddmc_relprod(a, b, proj);
test_assert(lddmc_relprev(next, b, proj, a) == a);
// Random tests
MDD rnd1, rnd2;
int i;
for (i=0; i<200; i++) {
int depth = rng(1, 20);
rnd1 = CALL(random_ldd, depth, rng(0, 30));
rnd2 = CALL(random_ldd, depth, rng(0, 30));
test_assert(rnd1 != lddmc_true);
test_assert(rnd2 != lddmc_true);
test_assert(lddmc_intersect(rnd1,rnd2) == lddmc_intersect(rnd2,rnd1));
test_assert(lddmc_union(rnd1,rnd2) == lddmc_union(rnd2,rnd1));
MDD tmp = lddmc_union(lddmc_minus(rnd1, rnd2), lddmc_minus(rnd2, rnd1));
test_assert(lddmc_intersect(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_false);
test_assert(lddmc_union(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_union(rnd1, rnd2));
test_assert(lddmc_minus(rnd1,rnd2) == lddmc_minus(rnd1, lddmc_intersect(rnd1,rnd2)));
}
// Test file stuff
for (i=0; i<10; i++) {
FILE *f = fopen("__lddmc_test_bdd", "w+");
int N = 20;
MDD rnd[N];
size_t a[N];
char sha[N][65];
int j;
for (j=0;j<N;j++) rnd[j] = CALL(random_ldd, 5, 500);
for (j=0;j<N;j++) lddmc_getsha(rnd[j], sha[j]);
for (j=0;j<N;j++) { a[j] = lddmc_serialize_add(rnd[j]); lddmc_serialize_tofile(f); }
for (j=0;j<N;j++) test_assert(a[j] == lddmc_serialize_get(rnd[j]));
for (j=0;j<N;j++) test_assert(rnd[j] == lddmc_serialize_get_reversed(a[j]));
fseek(f, 0, SEEK_SET);
lddmc_serialize_reset();
sylvan_quit();
sylvan_init_package(1LL<<24, 1LL<<24, 1LL<<24, 1LL<<24);
sylvan_init_ldd();
sylvan_gc_disable();
for (j=0;j<N;j++) lddmc_serialize_fromfile(f);
fclose(f);
unlink("__lddmc_test_bdd");
for (j=0;j<N;j++) rnd[j] = lddmc_serialize_get_reversed(a[j]);
char sha2[N][65];
for (j=0;j<N;j++) lddmc_getsha(rnd[j], sha2[j]);
for (j=0;j<N;j++) test_assert(memcmp(sha[j], sha2[j], 64)==0);
lddmc_serialize_reset();
}
sylvan_quit();
return 0;
}
int runtests(int threads)
{
lace_init(threads, 100000);
lace_startup(0, NULL, NULL);
printf(BOLD "Testing LDDMC... ");
fflush(stdout);
if (test_lddmc()) return 1;
printf(LGREEN "success" NC "!\n");
printf(NC "Testing garbage collection... ");
fflush(stdout);
sylvan_init_package(1LL<<14, 1LL<<14, 1LL<<20, 1LL<<20);
sylvan_init_bdd(1);
sylvan_gc_enable();
if (test_gc(threads)) return 1;
sylvan_quit();
printf(LGREEN "success" NC "!\n");
lace_exit();
return 0;
}
int main(int argc, char **argv)
{
int threads = 2;
if (argc > 1) sscanf(argv[1], "%d", &threads);
if (runtests(threads)) exit(1);
printf(NC);
exit(0);
}

0
resources/3rdparty/sylvan/test/test_assert.h

260
resources/3rdparty/sylvan/test/test_basic.c

@ -9,9 +9,9 @@
#include <sys/time.h>
#include <inttypes.h>
#include "llmsset.h"
#include "sylvan.h"
#include "test_assert.h"
#include "sylvan_int.h"
__thread uint64_t seed = 1;
@ -39,6 +39,69 @@ rng(int low, int high)
return low + uniform_deviate(xorshift_rand()) * (high-low);
}
static int
test_cache()
{
test_assert(cache_getused() == 0);
/**
* Test cache for large number of random entries
*/
size_t number_add = 4000000;
uint64_t *arr = (uint64_t*)malloc(sizeof(uint64_t)*4*number_add);
for (size_t i=0; i<number_add*4; i++) arr[i] = xorshift_rand();
for (size_t i=0; i<number_add; i++) {
test_assert(cache_put(arr[4*i], arr[4*i+1], arr[4*i+2], arr[4*i+3]));
uint64_t val;
int res = cache_get(arr[4*i], arr[4*i+1], arr[4*i+2], &val);
test_assert(res == 1);
test_assert(val == arr[4*i+3]);
}
size_t count = 0;
for (size_t i=0; i<number_add; i++) {
uint64_t val;
int res = cache_get(arr[4*i], arr[4*i+1], arr[4*i+2], &val);
test_assert(res == 0 || val == arr[4*i+3]);
if (res) count++;
}
test_assert(count == cache_getused());
/**
* Now also test for double entries
*/
for (size_t i=0; i<number_add/2; i++) {
test_assert(cache_put6(arr[8*i], arr[8*i+1], arr[8*i+2], arr[8*i+3], arr[8*i+4], arr[8*i+5], arr[8*i+6], arr[8*i+7]));
uint64_t val1, val2;
int res = cache_get6(arr[8*i], arr[8*i+1], arr[8*i+2], arr[8*i+3], arr[8*i+4], arr[8*i+5], &val1, &val2);
test_assert(res == 1);
test_assert(val1 == arr[8*i+6]);
test_assert(val2 == arr[8*i+7]);
}
for (size_t i=0; i<number_add/2; i++) {
uint64_t val1, val2;
int res = cache_get6(arr[8*i], arr[8*i+1], arr[8*i+2], arr[8*i+3], arr[8*i+4], arr[8*i+5], &val1, &val2);
test_assert(res == 0 || (val1 == arr[8*i+6] && val2 == arr[8*i+7]));
}
/**
* And test that single entries are not corrupted
*/
for (size_t i=0; i<number_add; i++) {
uint64_t val;
int res = cache_get(arr[4*i], arr[4*i+1], arr[4*i+2], &val);
test_assert(res == 0 || val == arr[4*i+3]);
}
/**
* TODO: multithreaded test
*/
free(arr);
return 0;
}
static inline BDD
make_random(int i, int j)
{
@ -73,26 +136,42 @@ make_random(int i, int j)
return result;
}
static MDD
make_random_ldd_set(int depth, int maxvalue, int elements)
{
uint32_t values[depth];
MDD result = mtbdd_false; // empty set
for (int i=0; i<elements; i++) {
lddmc_refs_push(result);
for (int j=0; j<depth; j++) {
values[j] = rng(0, maxvalue);
}
result = lddmc_union_cube(result, values, depth);
lddmc_refs_pop(1);
}
return result;
}
int testEqual(BDD a, BDD b)
{
if (a == b) return 1;
if (a == b) return 1;
if (a == sylvan_invalid) {
fprintf(stderr, "a is invalid!\n");
return 0;
}
if (a == sylvan_invalid) {
fprintf(stderr, "a is invalid!\n");
return 0;
}
if (b == sylvan_invalid) {
fprintf(stderr, "b is invalid!\n");
return 0;
}
if (b == sylvan_invalid) {
fprintf(stderr, "b is invalid!\n");
return 0;
}
fprintf(stderr, "a and b are not equal!\n");
sylvan_fprint(stderr, a);fprintf(stderr, "\n");
sylvan_fprint(stderr, b);fprintf(stderr, "\n");
return 0;
return 0;
}
int
@ -110,7 +189,7 @@ int
test_cube()
{
LACE_ME;
BDDSET vars = sylvan_set_fromarray(((BDDVAR[]){1,2,3,4,6,8}), 6);
const BDDSET vars = sylvan_set_fromarray(((BDDVAR[]){1,2,3,4,6,8}), 6);
uint8_t cube[6], check[6];
int i, j;
@ -120,6 +199,10 @@ test_cube()
sylvan_sat_one(bdd, vars, check);
for (i=0; i<6;i++) test_assert(cube[i] == check[i] || (cube[i] == 2 && check[i] == 0));
BDD picked_single = sylvan_pick_single_cube(bdd, vars);
test_assert(testEqual(sylvan_and(picked_single, bdd), picked_single));
assert(sylvan_satcount(picked_single, vars)==1);
BDD picked = sylvan_pick_cube(bdd);
test_assert(testEqual(sylvan_and(picked, bdd), picked));
@ -142,6 +225,34 @@ test_cube()
picked = sylvan_pick_cube(bdd);
test_assert(testEqual(sylvan_and(picked, bdd), picked));
}
// simple test for mtbdd_enum_all
uint8_t arr[6];
MTBDD leaf = mtbdd_enum_all_first(mtbdd_true, vars, arr, NULL);
test_assert(leaf == mtbdd_true);
test_assert(mtbdd_enum_all_first(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 0 && arr[4] == 0 && arr[5] == 0);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 0 && arr[4] == 0 && arr[5] == 1);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 0 && arr[4] == 1 && arr[5] == 0);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 0 && arr[4] == 1 && arr[5] == 1);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 1 && arr[4] == 0 && arr[5] == 0);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 1 && arr[4] == 0 && arr[5] == 1);
test_assert(mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) == mtbdd_true);
test_assert(arr[0] == 0 && arr[1] == 0 && arr[2] == 0 && arr[3] == 1 && arr[4] == 1 && arr[5] == 0);
mtbdd_enum_all_first(mtbdd_true, vars, arr, NULL);
size_t count = 1;
while (mtbdd_enum_all_next(mtbdd_true, vars, arr, NULL) != mtbdd_false) {
test_assert(count < 64);
count++;
}
test_assert(count == 64);
return 0;
}
@ -296,6 +407,112 @@ test_compose()
test_assert(testEqual(sylvan_or(one, two), sylvan_compose(a_or_b, map)));
test_assert(testEqual(sylvan_and(one, two), sylvan_compose(sylvan_and(a, b), map)));
// test that composing [0:=true] on "0" yields true
map = sylvan_map_add(sylvan_map_empty(), 1, sylvan_true);
test_assert(testEqual(sylvan_compose(a, map), sylvan_true));
// test that composing [0:=false] on "0" yields false
map = sylvan_map_add(sylvan_map_empty(), 1, sylvan_false);
test_assert(testEqual(sylvan_compose(a, map), sylvan_false));
return 0;
}
int
test_ldd()
{
// very basic testing of makenode
for (int i=0; i<10; i++) {
uint32_t value = rng(0, 100);
MDD m = lddmc_makenode(value, lddmc_true, lddmc_false);
test_assert(lddmc_getvalue(m) == value);
test_assert(lddmc_getdown(m) == lddmc_true);
test_assert(lddmc_getright(m) == lddmc_false);
test_assert(lddmc_iscopy(m) == 0);
test_assert(lddmc_follow(m, value) == lddmc_true);
for (int j=0; j<100; j++) {
uint32_t other_value = rng(0, 100);
if (value != other_value) test_assert(lddmc_follow(m, other_value) == lddmc_false);
}
}
// test handling of the copy node by primitives
MDD m = lddmc_make_copynode(lddmc_true, lddmc_false);
test_assert(lddmc_iscopy(m) == 1);
test_assert(lddmc_getvalue(m) == 0);
test_assert(lddmc_getdown(m) == lddmc_true);
test_assert(lddmc_getright(m) == lddmc_false);
m = lddmc_extendnode(m, 0, lddmc_true);
test_assert(lddmc_iscopy(m) == 1);
test_assert(lddmc_getvalue(m) == 0);
test_assert(lddmc_getdown(m) == lddmc_true);
test_assert(lddmc_getright(m) != lddmc_false);
test_assert(lddmc_follow(m, 0) == lddmc_true);
test_assert(lddmc_getvalue(lddmc_getright(m)) == 0);
test_assert(lddmc_iscopy(lddmc_getright(m)) == 0);
test_assert(lddmc_makenode(0, lddmc_true, lddmc_false) == lddmc_getright(m));
LACE_ME;
// test union_cube
for (int i=0; i<100; i++) {
int depth = rng(1, 6);
int elements = rng(1, 30);
m = make_random_ldd_set(depth, 10, elements);
assert(m != lddmc_true);
assert(m != lddmc_false);
assert(lddmc_satcount(m) <= elements);
assert(lddmc_satcount(m) >= 1);
}
// test simply transition relation
{
MDD states, rel, meta, expected;
// relation: (0,0) to (1,1)
rel = lddmc_cube((uint32_t[]){0,1,0,1}, 4);
test_assert(lddmc_satcount(rel) == 1);
// relation: (0,0) to (2,2)
rel = lddmc_union_cube(rel, (uint32_t[]){0,2,0,2}, 4);
test_assert(lddmc_satcount(rel) == 2);
// meta: read write read write
meta = lddmc_cube((uint32_t[]){1,2,1,2}, 4);
test_assert(lddmc_satcount(meta) == 1);
// initial state: (0,0)
states = lddmc_cube((uint32_t[]){0,0}, 2);
test_assert(lddmc_satcount(states) == 1);
// relprod should give two states
states = lddmc_relprod(states, rel, meta);
test_assert(lddmc_satcount(states) == 2);
// relprod should give states (1,1) and (2,2)
expected = lddmc_cube((uint32_t[]){1,1}, 2);
expected = lddmc_union_cube(expected, (uint32_t[]){2,2}, 2);
test_assert(states == expected);
// now test relprod union on the simple example
states = lddmc_cube((uint32_t[]){0,0}, 2);
states = lddmc_relprod_union(states, rel, meta, states);
test_assert(lddmc_satcount(states) == 3);
test_assert(states == lddmc_union(states, expected));
// now create transition (1,1) --> (1,1) (using copy nodes)
rel = lddmc_cube_copy((uint32_t[]){1,0,1,0}, (int[]){0,1,0,1}, 4);
states = lddmc_relprod(states, rel, meta);
// the result should be just state (1,1)
test_assert(states == lddmc_cube((uint32_t[]){1,1}, 2));
MDD statezero = lddmc_cube((uint32_t[]){0,0}, 2);
states = lddmc_union_cube(statezero, (uint32_t[]){1,1}, 2);
test_assert(lddmc_relprod_union(states, rel, meta, statezero) == states);
// now create transition (*,*) --> (*,*) (copy nodes)
rel = lddmc_cube_copy((uint32_t[]){0,0}, (int[]){1,1}, 2);
meta = lddmc_cube((uint32_t[]){4,4}, 2);
states = make_random_ldd_set(2, 10, 10);
MDD states2 = make_random_ldd_set(2, 10, 10);
test_assert(lddmc_union(states, states2) == lddmc_relprod_union(states, rel, meta, states2));
}
return 0;
}
@ -304,23 +521,30 @@ int runtests()
// we are not testing garbage collection
sylvan_gc_disable();
if (test_cache()) return 1;
if (test_bdd()) return 1;
for (int j=0;j<10;j++) if (test_cube()) return 1;
for (int j=0;j<10;j++) if (test_relprod()) return 1;
for (int j=0;j<10;j++) if (test_compose()) return 1;
for (int j=0;j<10;j++) if (test_operators()) return 1;
if (test_ldd()) return 1;
return 0;
}
int main()
{
// Standard Lace initialization with 1 worker
lace_init(1, 0);
lace_startup(0, NULL, NULL);
// Simple Sylvan initialization, also initialize BDD support
sylvan_init_package(1LL<<20, 1LL<<20, 1LL<<16, 1LL<<16);
sylvan_init_bdd(1);
lace_init(1, 0);
lace_startup(0, NULL, NULL);
// Simple Sylvan initialization, also initialize BDD, MTBDD and LDD support
sylvan_set_sizes(1LL<<20, 1LL<<20, 1LL<<16, 1LL<<16);
sylvan_init_package();
sylvan_init_bdd();
sylvan_init_mtbdd();
sylvan_init_ldd();
int res = runtests();

13
resources/3rdparty/sylvan/test/test_cxx.cpp

@ -22,6 +22,10 @@ int runtest()
Bdd v2 = Bdd::bddVar(2);
Bdd t = v1 + v2;
Bdd u = v1;
u += v2;
test_assert(t == u);
BddMap map;
map.put(2, t);
@ -35,12 +39,13 @@ int runtest()
int main()
{
// Standard Lace initialization with 1 worker
lace_init(1, 0);
lace_startup(0, NULL, NULL);
lace_init(1, 0);
lace_startup(0, NULL, NULL);
// Simple Sylvan initialization, also initialize BDD support
sylvan_init_package(1LL<<16, 1LL<<16, 1LL<<16, 1LL<<16);
sylvan_init_bdd(1);
sylvan_set_sizes(1LL<<16, 1LL<<16, 1LL<<16, 1LL<<16);
sylvan_init_package();
sylvan_init_bdd();
int res = runtest();

2
src/storm/modelchecker/results/ExplicitQualitativeCheckResult.cpp

@ -126,7 +126,7 @@ namespace storm {
return boost::get<vector_type>(truthValues);
}
ExplicitQualitativeCheckResult::map_type const& ExplicitQualitativeCheckResult::getTruthValuesVectorMap() const {
ExplicitQualitativeCheckResult::map_type const& ExplicitQualitativeCheckResult::getTruthValuesMap() const {
return boost::get<map_type>(truthValues);
}

2
src/storm/modelchecker/results/ExplicitQualitativeCheckResult.h

@ -45,7 +45,7 @@ namespace storm {
virtual void complement() override;
vector_type const& getTruthValuesVector() const;
map_type const& getTruthValuesVectorMap() const;
map_type const& getTruthValuesMap() const;
virtual bool existsTrue() const override;

2
src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp

@ -104,7 +104,7 @@ namespace storm {
matrix = matrix.swapVariables(columnHelperMetaVariablePairs);
++iterations;
std::cout << "iteration: " << iterations << std::endl;
STORM_LOG_TRACE("Completed iteration " << iterations << " of elimination process.");
}
STORM_LOG_DEBUG("Elimination completed in " << iterations << " iterations.");

1
src/storm/storage/bisimulation/BisimulationDecomposition.h

@ -135,7 +135,6 @@ namespace storm {
bool buildQuotient;
private:
boost::optional<OptimizationDirection> optimalityType;
/// A flag that indicates whether or not the state-rewards of the model are to be respected (and should

24
src/storm/storage/dd/Bdd.cpp

@ -32,20 +32,16 @@ namespace storm {
static Bdd<LibraryType> fromVector(DdManager<LibraryType> const& ddManager, std::vector<ValueType> const& explicitValues, storm::dd::Odd const& odd, std::set<storm::expressions::Variable> const& metaVariables, storm::logic::ComparisonType comparisonType, ValueType value) {
switch (comparisonType) {
case storm::logic::ComparisonType::Less:
return fromVector(ddManager, explicitValues, odd, metaVariables, std::bind(std::greater<ValueType>(), value, std::placeholders::_1));
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), odd, ddManager.getSortedVariableIndices(metaVariables), [&value, &explicitValues] (uint64_t offset) { return explicitValues[offset] < value; }), metaVariables);
case storm::logic::ComparisonType::LessEqual:
return fromVector(ddManager, explicitValues, odd, metaVariables, std::bind(std::greater_equal<ValueType>(), value, std::placeholders::_1));
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), odd, ddManager.getSortedVariableIndices(metaVariables), [&value, &explicitValues] (uint64_t offset) { return explicitValues[offset] <= value; }), metaVariables);
case storm::logic::ComparisonType::Greater:
return fromVector(ddManager, explicitValues, odd, metaVariables, std::bind(std::less<ValueType>(), value, std::placeholders::_1));
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), odd, ddManager.getSortedVariableIndices(metaVariables), [&value, &explicitValues] (uint64_t offset) { return explicitValues[offset] > value; }), metaVariables);
case storm::logic::ComparisonType::GreaterEqual:
return fromVector(ddManager, explicitValues, odd, metaVariables, std::bind(std::less_equal<ValueType>(), value, std::placeholders::_1));
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), odd, ddManager.getSortedVariableIndices(metaVariables), [&value, &explicitValues] (uint64_t offset) { return explicitValues[offset] >= value; }), metaVariables);
}
return Bdd<LibraryType>();
}
static Bdd<LibraryType> fromVector(DdManager<LibraryType> const& ddManager, std::vector<ValueType> const& values, Odd const& odd, std::set<storm::expressions::Variable> const& metaVariables, std::function<bool (ValueType const&)> const& filter) {
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), values, odd, ddManager.getSortedVariableIndices(metaVariables), filter), metaVariables);
}
};
template<DdType LibraryType>
@ -62,6 +58,11 @@ namespace storm {
return FromVectorHelper<LibraryType, ValueType>::fromVector(ddManager, explicitValues, odd, metaVariables, comparisonType, value);
}
template<DdType LibraryType>
Bdd<LibraryType> Bdd<LibraryType>::fromVector(DdManager<LibraryType> const& ddManager, storm::storage::BitVector const& truthValues, storm::dd::Odd const& odd, std::set<storm::expressions::Variable> const& metaVariables) {
return Bdd<LibraryType>(ddManager, InternalBdd<LibraryType>::fromVector(&ddManager.getInternalDdManager(), odd, ddManager.getSortedVariableIndices(metaVariables), [&truthValues] (uint64_t offset) { return truthValues[offset]; } ), metaVariables);
}
template<DdType LibraryType>
bool Bdd<LibraryType>::operator==(Bdd<LibraryType> const& other) const {
return internalBdd == other.internalBdd;
@ -363,6 +364,13 @@ namespace storm {
return result;
}
template<DdType LibraryType>
storm::storage::BitVector Bdd<LibraryType>::filterExplicitVector(Odd const& odd, storm::storage::BitVector const& values) const {
storm::storage::BitVector result(this->getNonZeroCount());
internalBdd.filterExplicitVector(odd, this->getSortedVariableIndices(), values, result);
return result;
}
template<DdType LibraryType>
Bdd<LibraryType>::operator InternalBdd<LibraryType>() const {
return internalBdd;

21
src/storm/storage/dd/Bdd.h

@ -36,6 +36,16 @@ namespace storm {
Bdd(Bdd<LibraryType>&& other) = default;
Bdd& operator=(Bdd<LibraryType>&& other) = default;
/*!
* Constructs a BDD representation of all encodings whose value is true in the given list of truth values.
*
* @param ddManager The DD manager responsible for the resulting BDD.
* @param truthValues The truth values.
* @param odd The ODD used for the translation from the explicit representation to a symbolic one.
* @param metaVariables The meta variables to use for the symbolic encoding.
*/
static Bdd<LibraryType> fromVector(DdManager<LibraryType> const& ddManager, storm::storage::BitVector const& truthValues, storm::dd::Odd const& odd, std::set<storm::expressions::Variable> const& metaVariables);
/*!
* Constructs a BDD representation of all encodings that are in the requested relation with the given value.
*
@ -327,13 +337,22 @@ namespace storm {
/*!
* Filters the given explicit vector using the symbolic representation of which values to select.
*
* @param selectedValues A symbolic representation of which values to select.
* @param odd The ODD used for the translation from symbolic to explicit.
* @param values The value vector from which to select the values.
* @return The resulting vector.
*/
template<typename ValueType>
std::vector<ValueType> filterExplicitVector(Odd const& odd, std::vector<ValueType> const& values) const;
/*!
* Filters the given explicit vector using the symbolic representation of which values to select.
*
* @param odd The ODD used for the translation from symbolic to explicit.
* @param values The value vector from which to select the values.
* @return The resulting vector.
*/
storm::storage::BitVector filterExplicitVector(Odd const& odd, storm::storage::BitVector const& values) const;
friend struct std::hash<storm::dd::Bdd<LibraryType>>;
template<DdType LibraryTypePrime, typename ValueType>

53
src/storm/storage/dd/cudd/InternalCuddBdd.cpp

@ -16,10 +16,9 @@ namespace storm {
// Intentionally left empty.
}
template<typename ValueType>
InternalBdd<DdType::CUDD> InternalBdd<DdType::CUDD>::fromVector(InternalDdManager<DdType::CUDD> const* ddManager, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (ValueType const&)> const& filter) {
InternalBdd<DdType::CUDD> InternalBdd<DdType::CUDD>::fromVector(InternalDdManager<DdType::CUDD> const* ddManager, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint64_t)> const& filter) {
uint_fast64_t offset = 0;
return InternalBdd<DdType::CUDD>(ddManager, cudd::BDD(ddManager->getCuddManager(), fromVectorRec(ddManager->getCuddManager().getManager(), offset, 0, sortedDdVariableIndices.size(), values, odd, sortedDdVariableIndices, filter)));
return InternalBdd<DdType::CUDD>(ddManager, cudd::BDD(ddManager->getCuddManager(), fromVectorRec(ddManager->getCuddManager().getManager(), offset, 0, sortedDdVariableIndices.size(), odd, sortedDdVariableIndices, filter)));
}
bool InternalBdd<DdType::CUDD>::operator==(InternalBdd<DdType::CUDD> const& other) const {
@ -222,14 +221,13 @@ namespace storm {
return InternalAdd<DdType::CUDD, ValueType>(ddManager, this->getCuddBdd().Add());
}
template<typename ValueType>
DdNode* InternalBdd<DdType::CUDD>::fromVectorRec(::DdManager* manager, uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (ValueType const&)> const& filter) {
DdNode* InternalBdd<DdType::CUDD>::fromVectorRec(::DdManager* manager, uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (uint64_t)> const& filter) {
if (currentLevel == maxLevel) {
// If we are in a terminal node of the ODD, we need to check whether the then-offset of the ODD is one
// (meaning the encoding is a valid one) or zero (meaning the encoding is not valid). Consequently, we
// need to copy the next value of the vector iff the then-offset is greater than zero.
if (odd.getThenOffset() > 0) {
if (filter(values[currentOffset++])) {
if (filter(currentOffset++)) {
return Cudd_ReadOne(manager);
} else {
return Cudd_ReadLogicZero(manager);
@ -246,7 +244,7 @@ namespace storm {
// Determine the new else-successor.
DdNode* elseSuccessor = nullptr;
if (odd.getElseOffset() > 0) {
elseSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, values, odd.getElseSuccessor(), ddVariableIndices, filter);
elseSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, odd.getElseSuccessor(), ddVariableIndices, filter);
} else {
elseSuccessor = Cudd_ReadLogicZero(manager);
}
@ -255,7 +253,7 @@ namespace storm {
// Determine the new then-successor.
DdNode* thenSuccessor = nullptr;
if (odd.getThenOffset() > 0) {
thenSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, values, odd.getThenSuccessor(), ddVariableIndices, filter);
thenSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, odd.getThenSuccessor(), ddVariableIndices, filter);
} else {
thenSuccessor = Cudd_ReadLogicZero(manager);
}
@ -423,6 +421,40 @@ namespace storm {
}
}
void InternalBdd<DdType::CUDD>::filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, storm::storage::BitVector const& sourceValues, storm::storage::BitVector& targetValues) const {
uint_fast64_t currentIndex = 0;
filterExplicitVectorRec(Cudd_Regular(this->getCuddDdNode()), ddManager->getCuddManager(), 0, Cudd_IsComplement(this->getCuddDdNode()), ddVariableIndices.size(), ddVariableIndices, 0, odd, targetValues, currentIndex, sourceValues);
}
void InternalBdd<DdType::CUDD>::filterExplicitVectorRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, storm::storage::BitVector& result, uint_fast64_t& currentIndex, storm::storage::BitVector const& values) {
// If there are no more values to select, we can directly return.
if (dd == Cudd_ReadLogicZero(manager.getManager()) && !complement) {
return;
} else if (dd == Cudd_ReadOne(manager.getManager()) && complement) {
return;
}
if (currentLevel == maxLevel) {
result.set(currentIndex++, values.get(currentOffset));
} else if (ddVariableIndices[currentLevel] < Cudd_NodeReadIndex(dd)) {
// If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
// and for the one in which it is not set.
filterExplicitVectorRec(dd, manager, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(dd, manager, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
DdNode const* thenDdNode = Cudd_T_const(dd);
DdNode const* elseDdNode = Cudd_E_const(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = Cudd_IsComplement(elseDdNode) ^ complement;
bool thenComplemented = Cudd_IsComplement(thenDdNode) ^ complement;
filterExplicitVectorRec(Cudd_Regular(elseDdNode), manager, currentLevel + 1, elseComplemented, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(Cudd_Regular(thenDdNode), manager, currentLevel + 1, thenComplemented, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
}
}
std::pair<std::vector<storm::expressions::Expression>, std::unordered_map<uint_fast64_t, storm::expressions::Variable>> InternalBdd<DdType::CUDD>::toExpression(storm::expressions::ExpressionManager& manager) const {
std::pair<std::vector<storm::expressions::Expression>, std::unordered_map<uint_fast64_t, storm::expressions::Variable>> result;
@ -502,10 +534,7 @@ namespace storm {
// Return the variable for this node.
return newNodeVariable;
}
template InternalBdd<DdType::CUDD> InternalBdd<DdType::CUDD>::fromVector(InternalDdManager<DdType::CUDD> const* ddManager, std::vector<double> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (double const&)> const& filter);
template InternalBdd<DdType::CUDD> InternalBdd<DdType::CUDD>::fromVector(InternalDdManager<DdType::CUDD> const* ddManager, std::vector<uint_fast64_t> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint_fast64_t const&)> const& filter);
template InternalAdd<DdType::CUDD, double> InternalBdd<DdType::CUDD>::toAdd() const;
template InternalAdd<DdType::CUDD, uint_fast64_t> InternalBdd<DdType::CUDD>::toAdd() const;

36
src/storm/storage/dd/cudd/InternalCuddBdd.h

@ -53,17 +53,15 @@ namespace storm {
InternalBdd& operator=(InternalBdd<DdType::CUDD>&& other) = default;
/*!
* Builds a BDD representing the values that make the given filter function evaluate to true.
* Builds a BDD representing the indices that make the given filter function evaluate to true.
*
* @param ddManager The manager responsible for the BDD.
* @param values The values that are to be checked against the filter function.
* @param odd The ODD used for the translation.
* @param metaVariables The meta variables used for the translation.
* @param filter The filter that evaluates whether an encoding is to be mapped to 0 or 1.
* @return The resulting BDD.
*/
template<typename ValueType>
static InternalBdd<storm::dd::DdType::CUDD> fromVector(InternalDdManager<DdType::CUDD> const* ddManager, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (ValueType const&)> const& filter);
static InternalBdd<storm::dd::DdType::CUDD> fromVector(InternalDdManager<DdType::CUDD> const* ddManager, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint64_t)> const& filter);
/*!
* Retrieves whether the two BDDs represent the same function.
@ -376,6 +374,16 @@ namespace storm {
*/
template<typename ValueType>
void filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType> const& sourceValues, std::vector<ValueType>& targetValues) const;
/*!
* Uses the current BDD to filter values from the explicit vector.
*
* @param odd The ODD used to determine which entries to select.
* @param ddVariableIndices The indices of the DD variables contained in this BDD.
* @param sourceValues The source vector.
* @param targetValues The vector to which to write the selected values.
*/
void filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, storm::storage::BitVector const& sourceValues, storm::storage::BitVector& targetValues) const;
friend struct std::hash<storm::dd::InternalBdd<storm::dd::DdType::CUDD>>;
@ -401,14 +409,12 @@ namespace storm {
* @param currentOffset The current offset in the vector.
* @param currentLevel The current level in the DD.
* @param maxLevel The maximal level in the DD.
* @param values The values that are to be checked against the filter function.
* @param odd The ODD used for the translation.
* @param ddVariableIndices The (sorted) list of DD variable indices to use.
* @param filter A function that determines which encodings are to be mapped to true.
* @return The resulting (CUDD) BDD node.
*/
template<typename ValueType>
static DdNode* fromVectorRec(::DdManager* manager, uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (ValueType const&)> const& filter);
static DdNode* fromVectorRec(::DdManager* manager, uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (uint64_t)> const& filter);
/*!
* Helper function to convert the DD into a bit vector.
@ -463,6 +469,22 @@ namespace storm {
template<typename ValueType>
static void filterExplicitVectorRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, std::vector<ValueType>& result, uint_fast64_t& currentIndex, std::vector<ValueType> const& values);
/*!
* Adds the selected values the target vector.
*
* @param dd The current node of the DD representing the selected values.
* @param manager The manager responsible for the DD.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param ddVariableIndices The sorted list of variable indices to use.
* @param currentOffset The offset along the path taken in the DD representing the selected values.
* @param odd The current ODD node.
* @param result The target vector to which to write the values.
* @param currentIndex The index at which the next element is to be written.
* @param values The value vector from which to select the values.
*/
static void filterExplicitVectorRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, storm::storage::BitVector& result, uint_fast64_t& currentIndex, storm::storage::BitVector const& values);
/*!
* Creates a vector of expressions that represent the function of the given BDD node.
*

52
src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp

@ -26,20 +26,18 @@ namespace storm {
// Intentionally left empty.
}
template<typename ValueType>
InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (ValueType const&)> const& filter) {
InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint64_t)> const& filter) {
uint_fast64_t offset = 0;
return InternalBdd<DdType::Sylvan>(ddManager, sylvan::Bdd(fromVectorRec(offset, 0, sortedDdVariableIndices.size(), values, odd, sortedDdVariableIndices, filter)));
return InternalBdd<DdType::Sylvan>(ddManager, sylvan::Bdd(fromVectorRec(offset, 0, sortedDdVariableIndices.size(), odd, sortedDdVariableIndices, filter)));
}
template<typename ValueType>
BDD InternalBdd<DdType::Sylvan>::fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (ValueType const&)> const& filter) {
BDD InternalBdd<DdType::Sylvan>::fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (uint64_t)> const& filter) {
if (currentLevel == maxLevel) {
// If we are in a terminal node of the ODD, we need to check whether the then-offset of the ODD is one
// (meaning the encoding is a valid one) or zero (meaning the encoding is not valid). Consequently, we
// need to copy the next value of the vector iff the then-offset is greater than zero.
if (odd.getThenOffset() > 0) {
if (filter(values[currentOffset++])) {
if (filter(currentOffset++)) {
return sylvan_true;
} else {
return sylvan_false;
@ -56,7 +54,7 @@ namespace storm {
// Determine the new else-successor.
BDD elseSuccessor;
if (odd.getElseOffset() > 0) {
elseSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getElseSuccessor(), ddVariableIndices, filter);
elseSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, odd.getElseSuccessor(), ddVariableIndices, filter);
} else {
elseSuccessor = sylvan_false;
}
@ -65,7 +63,7 @@ namespace storm {
// Determine the new then-successor.
BDD thenSuccessor;
if (odd.getThenOffset() > 0) {
thenSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getThenSuccessor(), ddVariableIndices, filter);
thenSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, odd.getThenSuccessor(), ddVariableIndices, filter);
} else {
thenSuccessor = sylvan_false;
}
@ -408,6 +406,40 @@ namespace storm {
}
}
void InternalBdd<DdType::Sylvan>::filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, storm::storage::BitVector const& sourceValues, storm::storage::BitVector& targetValues) const {
uint_fast64_t currentIndex = 0;
filterExplicitVectorRec(bdd_regular(this->getSylvanBdd().GetBDD()), 0, bdd_isnegated(this->getSylvanBdd().GetBDD()), ddVariableIndices.size(), ddVariableIndices, 0, odd, targetValues, currentIndex, sourceValues);
}
void InternalBdd<DdType::Sylvan>::filterExplicitVectorRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, storm::storage::BitVector& result, uint_fast64_t& currentIndex, storm::storage::BitVector const& values) {
// If there are no more values to select, we can directly return.
if (dd == sylvan_false && !complement) {
return;
} else if (dd == sylvan_true && complement) {
return;
}
if (currentLevel == maxLevel) {
result.set(currentIndex++, values.get(currentOffset));
} else if (ddVariableIndices[currentLevel] < sylvan_var(dd)) {
// If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
// and for the one in which it is not set.
filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
BDD thenDdNode = sylvan_high(dd);
BDD elseDdNode = sylvan_low(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = bdd_isnegated(elseDdNode) ^ complement;
bool thenComplemented = bdd_isnegated(thenDdNode) ^ complement;
filterExplicitVectorRec(bdd_regular(elseDdNode), currentLevel + 1, elseComplemented, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(bdd_regular(thenDdNode), currentLevel + 1, thenComplemented, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
}
}
std::pair<std::vector<storm::expressions::Expression>, std::unordered_map<uint_fast64_t, storm::expressions::Variable>> InternalBdd<DdType::Sylvan>::toExpression(storm::expressions::ExpressionManager& manager) const {
std::pair<std::vector<storm::expressions::Expression>, std::unordered_map<uint_fast64_t, storm::expressions::Variable>> result;
@ -492,10 +524,6 @@ namespace storm {
return newNodeVariable;
}
template InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<double> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (double const&)> const& filter);
template InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<uint_fast64_t> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint_fast64_t const&)> const& filter);
template InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<storm::RationalFunction> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool(storm::RationalFunction const&)> const& filter);
template InternalAdd<DdType::Sylvan, double> InternalBdd<DdType::Sylvan>::toAdd() const;
template InternalAdd<DdType::Sylvan, uint_fast64_t> InternalBdd<DdType::Sylvan>::toAdd() const;
template InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalBdd<DdType::Sylvan>::toAdd() const;

35
src/storm/storage/dd/sylvan/InternalSylvanBdd.h

@ -45,14 +45,12 @@ namespace storm {
* Builds a BDD representing the values that make the given filter function evaluate to true.
*
* @param ddManager The manager responsible for the BDD.
* @param values The values that are to be checked against the filter function.
* @param odd The ODD used for the translation.
* @param metaVariables The meta variables used for the translation.
* @param filter The filter that evaluates whether an encoding is to be mapped to 0 or 1.
* @return The resulting BDD.
*/
template<typename ValueType>
static InternalBdd<storm::dd::DdType::Sylvan> fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (ValueType const&)> const& filter);
static InternalBdd<storm::dd::DdType::Sylvan> fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (uint64_t)> const& filter);
/*!
* Retrieves whether the two BDDs represent the same function.
@ -365,7 +363,17 @@ namespace storm {
*/
template<typename ValueType>
void filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType> const& sourceValues, std::vector<ValueType>& targetValues) const;
/*!
* Uses the current BDD to filter values from the explicit vector.
*
* @param odd The ODD used to determine which entries to select.
* @param ddVariableIndices The indices of the DD variables contained in this BDD.
* @param sourceValues The source vector.
* @param targetValues The vector to which to write the selected values.
*/
void filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, storm::storage::BitVector const& sourceValues, storm::storage::BitVector& targetValues) const;
friend struct std::hash<storm::dd::InternalBdd<storm::dd::DdType::Sylvan>>;
private:
@ -375,14 +383,12 @@ namespace storm {
* @param currentOffset The current offset in the vector.
* @param currentLevel The current level in the DD.
* @param maxLevel The maximal level in the DD.
* @param values The values that are to be checked against the filter function.
* @param odd The ODD used for the translation.
* @param ddVariableIndices The (sorted) list of DD variable indices to use.
* @param filter A function that determines which encodings are to be mapped to true.
* @return The resulting (Sylvan) BDD node.
*/
template<typename ValueType>
static BDD fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (ValueType const&)> const& filter);
static BDD fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (uint64_t)> const& filter);
// Declare a hash functor that is used for the unique tables in the construction process of ODDs.
class HashFunctor {
@ -434,6 +440,21 @@ namespace storm {
template<typename ValueType>
static void filterExplicitVectorRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, std::vector<ValueType>& result, uint_fast64_t& currentIndex, std::vector<ValueType> const& values);
/*!
* Adds the selected values the target vector.
*
* @param dd The current node of the DD representing the selected values.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param ddVariableIndices The sorted list of variable indices to use.
* @param currentOffset The offset along the path taken in the DD representing the selected values.
* @param odd The current ODD node.
* @param result The target vector to which to write the values.
* @param currentIndex The index at which the next element is to be written.
* @param values The value vector from which to select the values.
*/
static void filterExplicitVectorRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, storm::storage::BitVector& result, uint_fast64_t& currentIndex, storm::storage::BitVector const& values);
/*!
* Creates a vector of expressions that represent the function of the given BDD node.
*

Some files were not shown because too many files changed in this diff

|||||||
100:0
Loading…
Cancel
Save