From 818760e0ad8b6dcd44fb3b237f74d58e641262c4 Mon Sep 17 00:00:00 2001 From: dehnert Date: Fri, 6 May 2016 19:17:24 +0200 Subject: [PATCH 1/3] removed sylvan version from resources Former-commit-id: a81c5792d59e36fe94f0c1490ba3c4154e562689 --- resources/3rdparty/sylvan/.gitignore | 35 - resources/3rdparty/sylvan/CMakeLists.txt | 22 - resources/3rdparty/sylvan/LICENSE | 202 -- resources/3rdparty/sylvan/Makefile.am | 5 - resources/3rdparty/sylvan/README.md | 127 - resources/3rdparty/sylvan/cmake/FindGMP.cmake | 20 - resources/3rdparty/sylvan/configure.ac | 20 - .../3rdparty/sylvan/examples/CMakeLists.txt | 31 - resources/3rdparty/sylvan/examples/getrss.c | 68 - resources/3rdparty/sylvan/examples/getrss.h | 26 - resources/3rdparty/sylvan/examples/lddmc.c | 499 --- resources/3rdparty/sylvan/examples/mc.c | 616 ---- resources/3rdparty/sylvan/m4/.gitignore | 4 - .../3rdparty/sylvan/models/at.5.8-rgs.bdd | Bin 50448 -> 0 bytes .../3rdparty/sylvan/models/at.6.8-rgs.bdd | Bin 50128 -> 0 bytes .../3rdparty/sylvan/models/at.7.8-rgs.bdd | Bin 59144 -> 0 bytes resources/3rdparty/sylvan/models/blocks.2.ldd | Bin 14616 -> 0 bytes resources/3rdparty/sylvan/models/blocks.4.ldd | Bin 41000 -> 0 bytes .../sylvan/models/collision.4.9-rgs.bdd | Bin 73696 -> 0 bytes .../sylvan/models/collision.5.9-rgs.bdd | Bin 73760 -> 0 bytes .../sylvan/models/schedule_world.2.8-rgs.bdd | Bin 73472 -> 0 bytes .../sylvan/models/schedule_world.3.8-rgs.bdd | Bin 97456 -> 0 bytes resources/3rdparty/sylvan/src/CMakeLists.txt | 71 - resources/3rdparty/sylvan/src/Makefile.am | 38 - resources/3rdparty/sylvan/src/avl.h | 398 --- resources/3rdparty/sylvan/src/lace.c | 1040 ------ resources/3rdparty/sylvan/src/lace.h | 2741 ---------------- resources/3rdparty/sylvan/src/llmsset.c | 563 ---- resources/3rdparty/sylvan/src/llmsset.h | 205 -- resources/3rdparty/sylvan/src/refs.c | 595 ---- resources/3rdparty/sylvan/src/refs.h | 77 - resources/3rdparty/sylvan/src/sha2.c | 1067 ------- resources/3rdparty/sylvan/src/sha2.h | 197 -- resources/3rdparty/sylvan/src/stats.c | 200 -- resources/3rdparty/sylvan/src/stats.h | 259 -- resources/3rdparty/sylvan/src/sylvan.h | 182 -- resources/3rdparty/sylvan/src/sylvan_bdd.c | 2820 ----------------- resources/3rdparty/sylvan/src/sylvan_bdd.h | 423 --- .../3rdparty/sylvan/src/sylvan_bdd_storm.h | 3 - resources/3rdparty/sylvan/src/sylvan_cache.c | 218 -- resources/3rdparty/sylvan/src/sylvan_cache.h | 113 - resources/3rdparty/sylvan/src/sylvan_common.c | 304 -- resources/3rdparty/sylvan/src/sylvan_common.h | 85 - resources/3rdparty/sylvan/src/sylvan_config.h | 30 - resources/3rdparty/sylvan/src/sylvan_gmp.c | 595 ---- resources/3rdparty/sylvan/src/sylvan_gmp.h | 182 -- resources/3rdparty/sylvan/src/sylvan_ldd.c | 2558 --------------- resources/3rdparty/sylvan/src/sylvan_ldd.h | 288 -- resources/3rdparty/sylvan/src/sylvan_mtbdd.c | 2543 --------------- resources/3rdparty/sylvan/src/sylvan_mtbdd.h | 608 ---- .../3rdparty/sylvan/src/sylvan_mtbdd_int.h | 128 - .../3rdparty/sylvan/src/sylvan_mtbdd_storm.c | 514 --- .../3rdparty/sylvan/src/sylvan_mtbdd_storm.h | 111 - resources/3rdparty/sylvan/src/sylvan_obj.cpp | 1040 ------ resources/3rdparty/sylvan/src/sylvan_obj.hpp | 855 ----- .../sylvan/src/sylvan_obj_bdd_storm.hpp | 3 - .../sylvan/src/sylvan_obj_mtbdd_storm.hpp | 51 - .../3rdparty/sylvan/src/sylvan_obj_storm.cpp | 141 - resources/3rdparty/sylvan/src/tls.h | 35 - resources/3rdparty/sylvan/test/.gitignore | 5 - resources/3rdparty/sylvan/test/CMakeLists.txt | 8 - resources/3rdparty/sylvan/test/main.c | 654 ---- resources/3rdparty/sylvan/test/test_cxx.cpp | 46 - 63 files changed, 23669 deletions(-) delete mode 100644 resources/3rdparty/sylvan/.gitignore delete mode 100644 resources/3rdparty/sylvan/CMakeLists.txt delete mode 100644 resources/3rdparty/sylvan/LICENSE delete mode 100644 resources/3rdparty/sylvan/Makefile.am delete mode 100644 resources/3rdparty/sylvan/README.md delete mode 100644 resources/3rdparty/sylvan/cmake/FindGMP.cmake delete mode 100644 resources/3rdparty/sylvan/configure.ac delete mode 100644 resources/3rdparty/sylvan/examples/CMakeLists.txt delete mode 100644 resources/3rdparty/sylvan/examples/getrss.c delete mode 100644 resources/3rdparty/sylvan/examples/getrss.h delete mode 100644 resources/3rdparty/sylvan/examples/lddmc.c delete mode 100644 resources/3rdparty/sylvan/examples/mc.c delete mode 100644 resources/3rdparty/sylvan/m4/.gitignore delete mode 100644 resources/3rdparty/sylvan/models/at.5.8-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/at.6.8-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/at.7.8-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/blocks.2.ldd delete mode 100644 resources/3rdparty/sylvan/models/blocks.4.ldd delete mode 100644 resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd delete mode 100644 resources/3rdparty/sylvan/src/CMakeLists.txt delete mode 100644 resources/3rdparty/sylvan/src/Makefile.am delete mode 100644 resources/3rdparty/sylvan/src/avl.h delete mode 100644 resources/3rdparty/sylvan/src/lace.c delete mode 100644 resources/3rdparty/sylvan/src/lace.h delete mode 100644 resources/3rdparty/sylvan/src/llmsset.c delete mode 100644 resources/3rdparty/sylvan/src/llmsset.h delete mode 100644 resources/3rdparty/sylvan/src/refs.c delete mode 100644 resources/3rdparty/sylvan/src/refs.h delete mode 100644 resources/3rdparty/sylvan/src/sha2.c delete mode 100644 resources/3rdparty/sylvan/src/sha2.h delete mode 100644 resources/3rdparty/sylvan/src/stats.c delete mode 100644 resources/3rdparty/sylvan/src/stats.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_bdd.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_bdd.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_bdd_storm.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_cache.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_cache.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_common.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_common.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_config.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_gmp.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_gmp.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_ldd.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_ldd.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_mtbdd.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_mtbdd.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c delete mode 100644 resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h delete mode 100644 resources/3rdparty/sylvan/src/sylvan_obj.cpp delete mode 100644 resources/3rdparty/sylvan/src/sylvan_obj.hpp delete mode 100644 resources/3rdparty/sylvan/src/sylvan_obj_bdd_storm.hpp delete mode 100644 resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp delete mode 100644 resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp delete mode 100644 resources/3rdparty/sylvan/src/tls.h delete mode 100644 resources/3rdparty/sylvan/test/.gitignore delete mode 100644 resources/3rdparty/sylvan/test/CMakeLists.txt delete mode 100644 resources/3rdparty/sylvan/test/main.c delete mode 100644 resources/3rdparty/sylvan/test/test_cxx.cpp diff --git a/resources/3rdparty/sylvan/.gitignore b/resources/3rdparty/sylvan/.gitignore deleted file mode 100644 index 4c6b4e09b..000000000 --- a/resources/3rdparty/sylvan/.gitignore +++ /dev/null @@ -1,35 +0,0 @@ -# autotools -**/Makefile -/autom4te.cache/ -config.* -.dirstamp -aclocal.m4 -configure -m4/* -tools -Makefile.in - -# cmake -**/CMakeCache.txt -**/CMakeFiles -**/cmake_install.cmake - -# libtool -.deps/ -.libs/ -/libtool - -# object files -*.lo -*.o -*.la - -# output files -examples/mc -examples/lddmc -test/sylvan_test -test/test_cxx -src/libsylvan.a - -# MacOS file -.DS_Store diff --git a/resources/3rdparty/sylvan/CMakeLists.txt b/resources/3rdparty/sylvan/CMakeLists.txt deleted file mode 100644 index 7cfa8b636..000000000 --- a/resources/3rdparty/sylvan/CMakeLists.txt +++ /dev/null @@ -1,22 +0,0 @@ -cmake_minimum_required(VERSION 2.6) -project(sylvan C CXX) - -set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) -find_package(GMP REQUIRED) -include_directories(${GMP_INCLUDE_DIR}) -#message(STATUS "Include directory ${GMP_INCLUDE_DIR}") -include_directories(src) - -add_subdirectory(src) - -option(SYLVAN_BUILD_TEST "Build test programs" ON) - -if(SYLVAN_BUILD_TEST) - add_subdirectory(test) -endif() - -option(SYLVAN_BUILD_EXAMPLES "Build example tools" ON) - -if(SYLVAN_BUILD_EXAMPLES) - add_subdirectory(examples) -endif() diff --git a/resources/3rdparty/sylvan/LICENSE b/resources/3rdparty/sylvan/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/resources/3rdparty/sylvan/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/resources/3rdparty/sylvan/Makefile.am b/resources/3rdparty/sylvan/Makefile.am deleted file mode 100644 index 6e1cf8acc..000000000 --- a/resources/3rdparty/sylvan/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -ACLOCAL_AMFLAGS = -I m4 - -AM_CFLAGS = -g -O2 -Wall -Wextra -Werror -std=gnu11 - -SUBDIRS = src diff --git a/resources/3rdparty/sylvan/README.md b/resources/3rdparty/sylvan/README.md deleted file mode 100644 index 421e9e659..000000000 --- a/resources/3rdparty/sylvan/README.md +++ /dev/null @@ -1,127 +0,0 @@ -Sylvan -====== -Sylvan is a parallel (multi-core) BDD library in C. Sylvan allows both sequential and parallel BDD-based algorithms to benefit from parallelism. Sylvan uses the work-stealing framework Lace and a scalable lockless hashtable to implement scalable multi-core BDD operations. - -Sylvan is developed (© 2011-2014) by the [Formal Methods and Tools](http://fmt.ewi.utwente.nl/) group at the University of Twente as part of the MaDriD project, which is funded by NWO. Sylvan is licensed with the Apache 2.0 license. - -You can contact the main author of Sylvan at . Please let us know if you use Sylvan in your projects. - -Sylvan is available at: https://github.com/utwente-fmt/sylvan -Java/JNI bindings: https://github.com/trolando/jsylvan -Haskell bindings: https://github.com/adamwalker/sylvan-haskell - -Publications ------------- -Dijk, T. van (2012) [The parallelization of binary decision diagram operations for model checking](http://essay.utwente.nl/61650/). Master’s Thesis, University of Twente, 27 April 2012. - -Dijk, T. van and Laarman, A.W. and Pol, J.C. van de (2012) [Multi-Core BDD Operations for Symbolic Reachability](http://eprints.eemcs.utwente.nl/22166/). In: 11th International Workshop on Parallel and Distributed Methods in verifiCation, PDMC 2012, 17 Sept. 2012, London, UK. Electronic Notes in Theoretical Computer Science. Elsevier. - -Usage ------ -For a quick demo, you can find an example of a simple BDD-based reachability algorithm in `test/mc.c`. This demo features both a sequential (bfs) and a parallel (par) symbolic reachability algorithm, demonstrating how both sequential programs and parallel programs can benefit from parallized BDD operations. - -To use Sylvan, include header file `sylvan.h`. - -Sylvan depends on the [work-stealing framework Lace](http://fmt.ewi.utwente.nl/tools/lace) for its implementation. Currently, Lace is embedded in the Sylvan distribution, but this may change in the future. To use the BDD operations of Sylvan, you must first start Lace and run all workers. - -Sylvan must be initialized after `lace_init` with a call to `sylvan_init`. This function takes three parameters: the log2 size of the BDD nodes table, the log2 size of the operation cache and the caching granularity. For example, `sylvan_init(16, 15, 4)` will initialize Sylvan with a BDD nodes table of size 65536, an operation cache of size 32768 and granularity 4. See below for detailed information about caching granularity. - -Example C code for initialization: -``` -#include - -// initialize with queue size of 1000000 -// autodetect number of workers -lace_init(0, 1000000); -// startup with defaults (create N-1 workers) -lace_startup(0, NULL, NULL); -// initialize with unique table size = 2^25 -// cache table size = 2^24 -// cache granularity = 4 -sylvan_init(25, 24, 4); -``` - -Sylvan may require a larger than normal program stack. You may need to increase the program stack size on your system using `ulimit -s`. Segmentation faults on large computations typically indicate a program stack overflow. - -### Basic functionality - -To create new BDDs, you can use: -- `sylvan_true`: representation of constant `true`. -- `sylvan_false`: representation of constant `false`. -- `sylvan_ithvar(var)`: representation of literal <var>. -- `sylvan_nithvar(var)`: representation of literal <var> negated. -- `sylvan_cube(variables, count, vector)`: create conjunction of variables in <variables> according to the corresponding value in <vector>. - -To walk the BDD edges and obtain the variable at the root of a BDD, you can use: -- `sylvan_var(bdd)`: obtain variable of the root node of <bdd> - requires that <bdd> is not constant `true` or `false`. -- `sylvan_high(bdd)`: follow high edge of <bdd>. -- `sylvan_low(bdd)`: follow low edge of <bdd>. - -You need to manually reference BDDs that you want to keep during garbage collection: -- `sylvan_ref(bdd)`: add reference to <bdd>. -- `sylvan_deref(bdd)`: remove reference to <bdd>. - -See below for more detailed information about garbage collection. Note that garbage collection does not proceed outside BDD operations, e.g., you can safely call `sylvan_ref` on the result of a BDD operation. - -The following 'primitives' are implemented: -- `sylvan_not(bdd)`: negation of <bdd>. -- `sylvan_ite(a,b,c)`: calculate 'if <a> then <b> else <c>'. -- `sylvan_exists(bdd, vars)`: existential quantification of <bdd> with respect to variables <vars>. Here, <vars> is a disjunction of literals. - -Sylvan uses complement edges to implement negation, therefore negation is performed in constant time. - -These primitives are used to implement the following operations: -- `sylvan_and(a, b)` -- `sylvan_or(a, b)` -- `sylvan_nand(a, b)` -- `sylvan_nor(a, b)` -- `sylvan_imp(a, b)` (a implies b) -- `sylvan_invimp(a, b)` (b implies a) -- `sylvan_xor(a, b)` -- `sylvan_equiv(a, b)` (alias: `sylvan_biimp`) -- `sylvan_diff(a, b)` (a and not b) -- `sylvan_less(a, b)` (b and not a) -- `sylvan_forall(bdd, vars)` - -### Other BDD operations - -We also implemented the following BDD operations: -- `sylvan_restrict(f, c)`: calculate the restrict algorithm on f,c. -- `sylvan_constrain(f, c)`: calculate the constrain f@c, also called generalized co-factor (gcf). -- `sylvan_relprod_paired(a, t, vars)`: calculate the relational product by applying transition relation <t> on <a>. Assumes variables in <a> are even and primed variables (only in <t>) are odd and paired (i.e., x' = x+1). Assumes <t> is defined on variables in <vars>, which is a disjunction of literals, including non-primed and primed variables. -- `sylvan_relprod_paired_prev(a, t, vars)`: calculate the relational product backwards, i.e., calculate previous states instead of next states. -- `sylvan_support(bdd)`: calculate the support of <bdd>, i.e., all variables that occur in the BDD; returns a disjunction of literals. -- `sylvan_satcount(bdd, vars)`: calculate the number of assignments that satisfy <bdd> with respect to the variables in <vars>, which is a disjunction of literals. -- `sylvan_pathcount(bdd)`: calculate the number of distinct paths from the root node of <bdd> to constant 'true'. -- `sylvan_nodecount(bdd)`: calculate the number of nodes in <bdd> (not thread-safe). -- `sylvan_sat_one_bdd(bdd)`: calculate a cube that satisfies <bdd>. -- `sylvan_sat_one(bdd, vars, count, vector)`: reverse of `sylvan_cube` on the result of `sylvan_sat_one_bdd`. Alias: `sylvan_pick_cube`. - -### Garbage collection - -Garbage collection is triggered when trying to insert a new node and no new bucket can be found within a reasonable upper bound. This upper bound is typically 8*(4+tablesize) buckets, where tablesize is the log2 size of the table. In practice, garbage collection occurs when the table is about 90% full. Garbage collection occurs by rehashing all BDD nodes that must stay in the table. It is designed such that all workers must cooperate on garbage collection. This condition is checked in `sylvan_gc_test()` which is called from every BDD operation and from a callback in the Lace framework that is called when there is no work. - -- `sylvan_gc()`: manually trigger garbage collection. -- `sylvan_gc_enable()`: enable garbage collection. -- `sylvan_gc_disable()`: disable garbage collection. - -### Caching granularity - -Caching granularity works as follows: with granularity G, each variable x is in a variable group x/G, e.g. the first G variables are in variable group 0, the next G variables are in variable group 1, etc. -BDD operations only consult the operation cache when they change variable groups. - -Higher granularity values result in less cache use. In practice, values between 4 and 8 tend to work well, but this depends on many factors, such as the structure and size of the BDDs and the characteristics of the computer architecture. - -### Dynamic reordering - -Dynamic reordening is currently not supported. -We are interested in examples where this is necessary for good performance and would like to perform research into parallel dynamic reordering in future work. - -For now, we suggest users find a good static variable ordering. - -### Resizing tables - -Resizing of nodes table and operation cache is currently not supported. In theory stop-the-world resizing can be implemented to grow the nodes table, but not to shrink the nodes table, since shrinking requires recreating all BDDs. Operation cache resizing is trivial to implement. - -Please let us know if you need this functionality. - diff --git a/resources/3rdparty/sylvan/cmake/FindGMP.cmake b/resources/3rdparty/sylvan/cmake/FindGMP.cmake deleted file mode 100644 index 62c75c034..000000000 --- a/resources/3rdparty/sylvan/cmake/FindGMP.cmake +++ /dev/null @@ -1,20 +0,0 @@ -FIND_PATH(GMP_INCLUDE_DIR - gmp.h ) - -FIND_LIBRARY(GMP_LIBRARIES - NAMES gmp - HINTS /usr/local/lib ) - -IF (GMP_INCLUDE_DIR AND GMP_LIBRARIES) - SET(GMP_FOUND TRUE) -ENDIF (GMP_INCLUDE_DIR AND GMP_LIBRARIES) - -IF (GMP_FOUND) - IF (NOT GMP_FIND_QUIETLY) - MESSAGE(STATUS "Found GMP: ${GMP_LIBRARIES}") - ENDIF (NOT GMP_FIND_QUIETLY) -ELSE (GMP_FOUND) - IF (GMP_FIND_REQUIRED) - MESSAGE(FATAL_ERROR "Could not find GMP") - ENDIF (GMP_FIND_REQUIRED) -ENDIF (GMP_FOUND) diff --git a/resources/3rdparty/sylvan/configure.ac b/resources/3rdparty/sylvan/configure.ac deleted file mode 100644 index 4a9102b9d..000000000 --- a/resources/3rdparty/sylvan/configure.ac +++ /dev/null @@ -1,20 +0,0 @@ -AC_PREREQ([2.69]) -AC_INIT([sylvan], [1.0]) -AC_CONFIG_MACRO_DIR([m4]) -AC_CONFIG_AUX_DIR([tools]) -AM_INIT_AUTOMAKE([foreign]) - -AC_PROG_CC -AC_PROG_CXX -LT_INIT - -AC_CHECKING([for any suitable hwloc installation]) -AC_CHECK_LIB([hwloc], [hwloc_topology_init], [AC_CHECK_HEADER([hwloc.h], [hwloc=yes])]) -AM_CONDITIONAL([HAVE_LIBHWLOC], [test "$hwloc" = "yes"]) - -AC_CANONICAL_HOST -AM_CONDITIONAL([DARWIN], [case $host_os in darwin*) true;; *) false;; esac]) -# test x$(uname) == "xDarwin"]) - -AC_CONFIG_FILES([Makefile src/Makefile]) -AC_OUTPUT diff --git a/resources/3rdparty/sylvan/examples/CMakeLists.txt b/resources/3rdparty/sylvan/examples/CMakeLists.txt deleted file mode 100644 index 1029e0288..000000000 --- a/resources/3rdparty/sylvan/examples/CMakeLists.txt +++ /dev/null @@ -1,31 +0,0 @@ -cmake_minimum_required(VERSION 2.6) -project(sylvan C) - -set(CMAKE_C_FLAGS "-g -O3 -Wextra -Wall -Werror -fno-strict-aliasing -std=gnu11") - -include_directories(.) - -add_executable(mc mc.c getrss.h getrss.c) -target_link_libraries(mc sylvan) - -add_executable(lddmc lddmc.c getrss.h getrss.c) -target_link_libraries(lddmc sylvan) - -include(CheckIncludeFiles) -check_include_files("gperftools/profiler.h" HAVE_PROFILER) - -if(HAVE_PROFILER) - set_target_properties(mc PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER") - target_link_libraries(mc profiler) - - set_target_properties(lddmc PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER") - target_link_libraries(lddmc profiler) -endif() - -if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") - # add argp library for OSX - target_link_libraries(mc argp) - target_link_libraries(lddmc argp) -endif() - - diff --git a/resources/3rdparty/sylvan/examples/getrss.c b/resources/3rdparty/sylvan/examples/getrss.c deleted file mode 100644 index f3aa5e381..000000000 --- a/resources/3rdparty/sylvan/examples/getrss.c +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Author: David Robert Nadeau - * Site: http://NadeauSoftware.com/ - * License: Creative Commons Attribution 3.0 Unported License - * http://creativecommons.org/licenses/by/3.0/deed.en_US - */ - -/* - * Modified by Tom van Dijk to remove WIN32 and solaris code - */ - -#if defined(__APPLE__) && defined(__MACH__) -#include -#include -#include -#elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) -#include -#include -#include -#else -#error "Cannot define getPeakRSS( ) or getCurrentRSS( ) for an unknown OS." -#endif - -/** - * Returns the peak (maximum so far) resident set size (physical - * memory use) measured in bytes, or zero if the value cannot be - * determined on this OS. - */ -size_t -getPeakRSS() -{ - struct rusage rusage; - getrusage(RUSAGE_SELF, &rusage); -#if defined(__APPLE__) && defined(__MACH__) - return (size_t)rusage.ru_maxrss; -#else - return (size_t)(rusage.ru_maxrss * 1024L); -#endif -} - -/** - * Returns the current resident set size (physical memory use) measured - * in bytes, or zero if the value cannot be determined on this OS. - */ -size_t -getCurrentRSS() -{ -#if defined(__APPLE__) && defined(__MACH__) - /* OSX ------------------------------------------------------ */ - struct mach_task_basic_info info; - mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; - if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) != KERN_SUCCESS) - return (size_t)0L; /* Can't access? */ - return (size_t)info.resident_size; -#else - /* Linux ---------------------------------------------------- */ - long rss = 0L; - FILE *fp = NULL; - if ((fp = fopen("/proc/self/statm", "r")) == NULL) - return (size_t)0L; /* Can't open? */ - if (fscanf(fp, "%*s%ld", &rss) != 1) { - fclose(fp); - return (size_t)0L; /* Can't read? */ - } - fclose(fp); - return (size_t)rss * (size_t)sysconf(_SC_PAGESIZE); -#endif -} diff --git a/resources/3rdparty/sylvan/examples/getrss.h b/resources/3rdparty/sylvan/examples/getrss.h deleted file mode 100644 index 653e78e76..000000000 --- a/resources/3rdparty/sylvan/examples/getrss.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef GETRSS_H -#define GETRSS_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * Returns the peak (maximum so far) resident set size (physical - * memory use) measured in bytes, or zero if the value cannot be - * determined on this OS. - */ -size_t getPeakRSS(); - -/** - * Returns the current resident set size (physical memory use) measured - * in bytes, or zero if the value cannot be determined on this OS. - */ -size_t getCurrentRSS(); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - - -#endif diff --git a/resources/3rdparty/sylvan/examples/lddmc.c b/resources/3rdparty/sylvan/examples/lddmc.c deleted file mode 100644 index 9e9c9c008..000000000 --- a/resources/3rdparty/sylvan/examples/lddmc.c +++ /dev/null @@ -1,499 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#ifdef HAVE_PROFILER -#include -#endif - -#include -#include -#include - -/* Configuration */ -static int report_levels = 0; // report states at start of every level -static int report_table = 0; // report table size at end of every level -static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy -static int check_deadlocks = 0; // set to 1 to check for deadlocks -static int print_transition_matrix = 1; // print transition relation matrix -static int workers = 0; // autodetect -static char* model_filename = NULL; // filename of model -#ifdef HAVE_PROFILER -static char* profile_filename = NULL; // filename for profiling -#endif - -/* argp configuration */ -static struct argp_option options[] = -{ - {"workers", 'w', "", 0, "Number of workers (default=0: autodetect)", 0}, - {"strategy", 's', "", 0, "Strategy for reachability (default=par)", 0}, -#ifdef HAVE_PROFILER - {"profiler", 'p', "", 0, "Filename for profiling", 0}, -#endif - {"deadlocks", 3, 0, 0, "Check for deadlocks", 1}, - {"count-states", 1, 0, 0, "Report #states at each level", 1}, - {"count-table", 2, 0, 0, "Report table usage at each level", 1}, - {0, 0, 0, 0, 0, 0} -}; -static error_t -parse_opt(int key, char *arg, struct argp_state *state) -{ - switch (key) { - case 'w': - workers = atoi(arg); - break; - case 's': - if (strcmp(arg, "bfs")==0) strategy = 0; - else if (strcmp(arg, "par")==0) strategy = 1; - else if (strcmp(arg, "sat")==0) strategy = 2; - else argp_usage(state); - break; - case 3: - check_deadlocks = 1; - break; - case 1: - report_levels = 1; - break; - case 2: - report_table = 1; - break; -#ifdef HAVE_PROFILER - case 'p': - profile_filename = arg; - break; -#endif - case ARGP_KEY_ARG: - if (state->arg_num >= 1) argp_usage(state); - model_filename = arg; - break; - case ARGP_KEY_END: - if (state->arg_num < 1) argp_usage(state); - break; - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} -static struct argp argp = { options, parse_opt, "", 0, 0, 0, 0 }; - -/* Globals */ -typedef struct set -{ - MDD mdd; - MDD proj; - int size; -} *set_t; - -typedef struct relation -{ - MDD mdd; - MDD meta; - int size; -} *rel_t; - -static size_t vector_size; // size of vector -static int next_count; // number of partitions of the transition relation -static rel_t *next; // each partition of the transition relation - -#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); } - -/* Load a set from file */ -static set_t -set_load(FILE* f) -{ - lddmc_serialize_fromfile(f); - - size_t mdd; - size_t proj; - int size; - - if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); - if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); - if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); - - LACE_ME; - - set_t set = (set_t)malloc(sizeof(struct set)); - set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd)); - set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj)); - set->size = size; - - return set; -} - -static int -calculate_size(MDD meta) -{ - int result = 0; - uint32_t val = lddmc_getvalue(meta); - while (val != (uint32_t)-1) { - if (val != 0) result += 1; - meta = lddmc_follow(meta, val); - assert(meta != lddmc_true && meta != lddmc_false); - val = lddmc_getvalue(meta); - } - return result; -} - -/* Load a relation from file */ -static rel_t -rel_load(FILE* f) -{ - lddmc_serialize_fromfile(f); - - size_t mdd; - size_t meta; - - if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); - if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); - - LACE_ME; - - rel_t rel = (rel_t)malloc(sizeof(struct relation)); - rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd)); - rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta)); - rel->size = calculate_size(rel->meta); - - return rel; -} - -static void -print_example(MDD example) -{ - if (example != lddmc_false) { - LACE_ME; - uint32_t vec[vector_size]; - lddmc_sat_one(example, vec, vector_size); - - size_t i; - printf("["); - for (i=0; i0) printf(","); - printf("%" PRIu32, vec[i]); - } - printf("]"); - } -} - -static void -print_matrix(size_t size, MDD meta) -{ - if (size == 0) return; - uint32_t val = lddmc_getvalue(meta); - if (val == 1) { - printf("+"); - print_matrix(size-1, lddmc_follow(lddmc_follow(meta, 1), 2)); - } else { - if (val == (uint32_t)-1) printf("-"); - else if (val == 0) printf("-"); - else if (val == 3) printf("r"); - else if (val == 4) printf("w"); - print_matrix(size-1, lddmc_follow(meta, val)); - } -} - -static char* -to_h(double size, char *buf) -{ - const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}; - int i = 0; - for (;size>1024;size/=1024) i++; - sprintf(buf, "%.*f %s", i, size, units[i]); - return buf; -} - -static int -get_first(MDD meta) -{ - uint32_t val = lddmc_getvalue(meta); - if (val != 0) return 0; - return 1+get_first(lddmc_follow(meta, val)); -} - -/* Straight-forward implementation of parallel reduction */ -TASK_5(MDD, go_par, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks) -{ - if (len == 1) { - // Calculate NEW successors (not in visited) - MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta)); - if (deadlocks) { - // check which MDDs in deadlocks do not have a successor in this relation - MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur)); - *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc)); - lddmc_deref(anc); - } - MDD result = lddmc_ref(lddmc_minus(succ, visited)); - lddmc_deref(succ); - return result; - } else { - MDD deadlocks_left; - MDD deadlocks_right; - if (deadlocks) { - deadlocks_left = *deadlocks; - deadlocks_right = *deadlocks; - } - - // Recursively calculate left+right - SPAWN(go_par, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left: NULL); - MDD right = CALL(go_par, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); - MDD left = SYNC(go_par); - - // Merge results of left+right - MDD result = lddmc_ref(lddmc_union(left, right)); - lddmc_deref(left); - lddmc_deref(right); - - if (deadlocks) { - *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right)); - lddmc_deref(deadlocks_left); - lddmc_deref(deadlocks_right); - } - - return result; - } -} - -/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */ -VOID_TASK_1(par, set_t, set) -{ - MDD visited = set->mdd; - MDD new = lddmc_ref(visited); - size_t counter = 1; - do { - char buf[32]; - to_h(getCurrentRSS(), buf); - printf("Memory usage: %s\n", buf); - printf("Level %zu... ", counter++); - if (report_levels) { - printf("%zu states... ", (size_t)lddmc_satcount_cached(visited)); - } - fflush(stdout); - - // calculate successors in parallel - MDD cur = new; - MDD deadlocks = cur; - new = CALL(go_par, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); - lddmc_deref(cur); - - if (check_deadlocks) { - printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks)); - if (deadlocks != lddmc_false) { - printf("example: "); - print_example(deadlocks); - printf("... "); - check_deadlocks = 0; - } - } - - // visited = visited + new - MDD old_visited = visited; - visited = lddmc_ref(lddmc_union(visited, new)); - lddmc_deref(old_visited); - - if (report_table) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled); - } else { - printf("done.\n"); - } - } while (new != lddmc_false); - lddmc_deref(new); - set->mdd = visited; -} - -/* Sequential version of merge-reduction */ -TASK_5(MDD, go_bfs, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks) -{ - if (len == 1) { - // Calculate NEW successors (not in visited) - MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta)); - if (deadlocks) { - // check which MDDs in deadlocks do not have a successor in this relation - MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur)); - *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc)); - lddmc_deref(anc); - } - MDD result = lddmc_ref(lddmc_minus(succ, visited)); - lddmc_deref(succ); - return result; - } else { - MDD deadlocks_left; - MDD deadlocks_right; - if (deadlocks) { - deadlocks_left = *deadlocks; - deadlocks_right = *deadlocks; - } - - // Recursively calculate left+right - MDD left = CALL(go_bfs, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left : NULL); - MDD right = CALL(go_bfs, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); - - // Merge results of left+right - MDD result = lddmc_ref(lddmc_union(left, right)); - lddmc_deref(left); - lddmc_deref(right); - - if (deadlocks) { - *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right)); - lddmc_deref(deadlocks_left); - lddmc_deref(deadlocks_right); - } - - return result; - } -} - -/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */ -VOID_TASK_1(bfs, set_t, set) -{ - MDD visited = set->mdd; - MDD new = lddmc_ref(visited); - size_t counter = 1; - do { - char buf[32]; - to_h(getCurrentRSS(), buf); - printf("Memory usage: %s\n", buf); - printf("Level %zu... ", counter++); - if (report_levels) { - printf("%zu states... ", (size_t)lddmc_satcount_cached(visited)); - } - fflush(stdout); - - MDD cur = new; - MDD deadlocks = cur; - new = CALL(go_bfs, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); - lddmc_deref(cur); - - if (check_deadlocks) { - printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks)); - if (deadlocks != lddmc_false) { - printf("example: "); - print_example(deadlocks); - printf("... "); - check_deadlocks = 0; - } - } - - // visited = visited + new - MDD old_visited = visited; - visited = lddmc_ref(lddmc_union(visited, new)); - lddmc_deref(old_visited); - - if (report_table) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled); - } else { - printf("done.\n"); - } - } while (new != lddmc_false); - lddmc_deref(new); - set->mdd = visited; -} - -/* Obtain current wallclock time */ -static double -wctime() -{ - struct timeval tv; - gettimeofday(&tv, NULL); - return (tv.tv_sec + 1E-6 * tv.tv_usec); -} - -int -main(int argc, char **argv) -{ - argp_parse(&argp, argc, argv, 0, 0, 0); - - FILE *f = fopen(model_filename, "r"); - if (f == NULL) { - fprintf(stderr, "Cannot open file '%s'!\n", model_filename); - return -1; - } - - // Init Lace - lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue - lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup - - // Init Sylvan LDDmc - // Nodes table size: 24 bytes * 2**N_nodes - // Cache table size: 36 bytes * 2**N_cache - // With: N_nodes=25, N_cache=24: 1.3 GB memory - sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26); - sylvan_init_ldd(); - - // Read and report domain info (integers per vector and bits per integer) - if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); - - printf("Vector size: %zu\n", vector_size); - - // Read initial state - printf("Loading initial state... "); - fflush(stdout); - set_t states = set_load(f); - printf("done.\n"); - - // Read transitions - if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); - next = (rel_t*)malloc(sizeof(rel_t) * next_count); - - printf("Loading transition relations... "); - fflush(stdout); - int i; - for (i=0; imdd)); - for (i=0; imdd)); - } - - if (print_transition_matrix) { - for (i=0; imeta); - printf(" (%d)\n", get_first(next[i]->meta)); - } - } - - LACE_ME; - -#ifdef HAVE_PROFILER - if (profile_filename != NULL) ProfilerStart(profile_filename); -#endif - if (strategy == 1) { - double t1 = wctime(); - CALL(par, states); - double t2 = wctime(); - printf("PAR Time: %f\n", t2-t1); - } else { - double t1 = wctime(); - CALL(bfs, states); - double t2 = wctime(); - printf("BFS Time: %f\n", t2-t1); - } -#ifdef HAVE_PROFILER - if (profile_filename != NULL) ProfilerStop(); -#endif - - // Now we just have states - printf("Final states: %zu states\n", (size_t)lddmc_satcount_cached(states->mdd)); - printf("Final states: %zu MDD nodes\n", lddmc_nodecount(states->mdd)); - - sylvan_stats_report(stdout, 1); - - return 0; -} diff --git a/resources/3rdparty/sylvan/examples/mc.c b/resources/3rdparty/sylvan/examples/mc.c deleted file mode 100644 index 4dd8b3554..000000000 --- a/resources/3rdparty/sylvan/examples/mc.c +++ /dev/null @@ -1,616 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include - -#ifdef HAVE_PROFILER -#include -#endif - -#include -#include - -/* Configuration */ -static int report_levels = 0; // report states at end of every level -static int report_table = 0; // report table size at end of every level -static int report_nodes = 0; // report number of nodes of BDDs -static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy -static int check_deadlocks = 0; // set to 1 to check for deadlocks -static int merge_relations = 0; // merge relations to 1 relation -static int print_transition_matrix = 0; // print transition relation matrix -static int workers = 0; // autodetect -static char* model_filename = NULL; // filename of model -#ifdef HAVE_PROFILER -static char* profile_filename = NULL; // filename for profiling -#endif - -/* argp configuration */ -static struct argp_option options[] = -{ - {"workers", 'w', "", 0, "Number of workers (default=0: autodetect)", 0}, - {"strategy", 's', "", 0, "Strategy for reachability (default=par)", 0}, -#ifdef HAVE_PROFILER - {"profiler", 'p', "", 0, "Filename for profiling", 0}, -#endif - {"deadlocks", 3, 0, 0, "Check for deadlocks", 1}, - {"count-nodes", 5, 0, 0, "Report #nodes for BDDs", 1}, - {"count-states", 1, 0, 0, "Report #states at each level", 1}, - {"count-table", 2, 0, 0, "Report table usage at each level", 1}, - {"merge-relations", 6, 0, 0, "Merge transition relations into one transition relation", 1}, - {"print-matrix", 4, 0, 0, "Print transition matrix", 1}, - {0, 0, 0, 0, 0, 0} -}; -static error_t -parse_opt(int key, char *arg, struct argp_state *state) -{ - switch (key) { - case 'w': - workers = atoi(arg); - break; - case 's': - if (strcmp(arg, "bfs")==0) strategy = 0; - else if (strcmp(arg, "par")==0) strategy = 1; - else if (strcmp(arg, "sat")==0) strategy = 2; - else argp_usage(state); - break; - case 4: - print_transition_matrix = 1; - break; - case 3: - check_deadlocks = 1; - break; - case 1: - report_levels = 1; - break; - case 2: - report_table = 1; - break; - case 6: - merge_relations = 1; - break; -#ifdef HAVE_PROFILER - case 'p': - profile_filename = arg; - break; -#endif - case ARGP_KEY_ARG: - if (state->arg_num >= 1) argp_usage(state); - model_filename = arg; - break; - case ARGP_KEY_END: - if (state->arg_num < 1) argp_usage(state); - break; - default: - return ARGP_ERR_UNKNOWN; - } - return 0; -} -static struct argp argp = { options, parse_opt, "", 0, 0, 0, 0 }; - -/* Globals */ -typedef struct set -{ - BDD bdd; - BDD variables; // all variables in the set (used by satcount) -} *set_t; - -typedef struct relation -{ - BDD bdd; - BDD variables; // all variables in the relation (used by relprod) -} *rel_t; - -static int vector_size; // size of vector -static int statebits, actionbits; // number of bits for state, number of bits for action -static int bits_per_integer; // number of bits per integer in the vector -static int next_count; // number of partitions of the transition relation -static rel_t *next; // each partition of the transition relation - -/* Obtain current wallclock time */ -static double -wctime() -{ - struct timeval tv; - gettimeofday(&tv, NULL); - return (tv.tv_sec + 1E-6 * tv.tv_usec); -} - -static double t_start; -#define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__) -#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); } - -/* Load a set from file */ -#define set_load(f) CALL(set_load, f) -TASK_1(set_t, set_load, FILE*, f) -{ - sylvan_serialize_fromfile(f); - - size_t set_bdd, set_vector_size, set_state_vars; - if ((fread(&set_bdd, sizeof(size_t), 1, f) != 1) || - (fread(&set_vector_size, sizeof(size_t), 1, f) != 1) || - (fread(&set_state_vars, sizeof(size_t), 1, f) != 1)) { - Abort("Invalid input file!\n"); - } - - set_t set = (set_t)malloc(sizeof(struct set)); - set->bdd = sylvan_serialize_get_reversed(set_bdd); - set->variables = sylvan_support(sylvan_serialize_get_reversed(set_state_vars)); - - sylvan_protect(&set->bdd); - sylvan_protect(&set->variables); - - return set; -} - -/* Load a relation from file */ -#define rel_load(f) CALL(rel_load, f) -TASK_1(rel_t, rel_load, FILE*, f) -{ - sylvan_serialize_fromfile(f); - - size_t rel_bdd, rel_vars; - if ((fread(&rel_bdd, sizeof(size_t), 1, f) != 1) || - (fread(&rel_vars, sizeof(size_t), 1, f) != 1)) { - Abort("Invalid input file!\n"); - } - - rel_t rel = (rel_t)malloc(sizeof(struct relation)); - rel->bdd = sylvan_serialize_get_reversed(rel_bdd); - rel->variables = sylvan_support(sylvan_serialize_get_reversed(rel_vars)); - - sylvan_protect(&rel->bdd); - sylvan_protect(&rel->variables); - - return rel; -} - -#define print_example(example, variables) CALL(print_example, example, variables) -VOID_TASK_2(print_example, BDD, example, BDDSET, variables) -{ - uint8_t str[vector_size * bits_per_integer]; - - if (example != sylvan_false) { - sylvan_sat_one(example, variables, str); - printf("["); - for (int i=0; i0) printf(","); - printf("%" PRIu32, res); - } - printf("]"); - } -} - -/* Straight-forward implementation of parallel reduction */ -TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks) -{ - if (len == 1) { - // Calculate NEW successors (not in visited) - BDD succ = sylvan_relnext(cur, next[from]->bdd, next[from]->variables); - bdd_refs_push(succ); - if (deadlocks) { - // check which BDDs in deadlocks do not have a successor in this relation - BDD anc = sylvan_relprev(next[from]->bdd, succ, next[from]->variables); - bdd_refs_push(anc); - *deadlocks = sylvan_diff(*deadlocks, anc); - bdd_refs_pop(1); - } - BDD result = sylvan_diff(succ, visited); - bdd_refs_pop(1); - return result; - } else { - BDD deadlocks_left; - BDD deadlocks_right; - if (deadlocks) { - deadlocks_left = *deadlocks; - deadlocks_right = *deadlocks; - sylvan_protect(&deadlocks_left); - sylvan_protect(&deadlocks_right); - } - - // Recursively calculate left+right - bdd_refs_spawn(SPAWN(go_par, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left: NULL)); - BDD right = bdd_refs_push(CALL(go_par, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL)); - BDD left = bdd_refs_push(bdd_refs_sync(SYNC(go_par))); - - // Merge results of left+right - BDD result = sylvan_or(left, right); - bdd_refs_pop(2); - - if (deadlocks) { - bdd_refs_push(result); - *deadlocks = sylvan_and(deadlocks_left, deadlocks_right); - sylvan_unprotect(&deadlocks_left); - sylvan_unprotect(&deadlocks_right); - bdd_refs_pop(1); - } - - return result; - } -} - -/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */ -VOID_TASK_1(par, set_t, set) -{ - BDD visited = set->bdd; - BDD next_level = visited; - BDD cur_level = sylvan_false; - BDD deadlocks = sylvan_false; - - sylvan_protect(&visited); - sylvan_protect(&next_level); - sylvan_protect(&cur_level); - sylvan_protect(&deadlocks); - - int iteration = 1; - do { - // calculate successors in parallel - cur_level = next_level; - deadlocks = cur_level; - - next_level = CALL(go_par, cur_level, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); - - if (check_deadlocks && deadlocks != sylvan_false) { - INFO("Found %'0.0f deadlock states... ", sylvan_satcount(deadlocks, set->variables)); - if (deadlocks != sylvan_false) { - printf("example: "); - print_example(deadlocks, set->variables); - check_deadlocks = 0; - } - printf("\n"); - } - - // visited = visited + new - visited = sylvan_or(visited, next_level); - - if (report_table && report_levels) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n", - iteration, sylvan_satcount_cached(visited, set->variables), - 100.0*(double)filled/total, filled); - } else if (report_table) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n", - iteration, - 100.0*(double)filled/total, filled); - } else if (report_levels) { - INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables)); - } else { - INFO("Level %d done\n", iteration); - } - iteration++; - } while (next_level != sylvan_false); - - set->bdd = visited; - - sylvan_unprotect(&visited); - sylvan_unprotect(&next_level); - sylvan_unprotect(&cur_level); - sylvan_unprotect(&deadlocks); -} - -/* Sequential version of merge-reduction */ -TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks) -{ - if (len == 1) { - // Calculate NEW successors (not in visited) - BDD succ = sylvan_relnext(cur, next[from]->bdd, next[from]->variables); - bdd_refs_push(succ); - if (deadlocks) { - // check which BDDs in deadlocks do not have a successor in this relation - BDD anc = sylvan_relprev(next[from]->bdd, succ, next[from]->variables); - bdd_refs_push(anc); - *deadlocks = sylvan_diff(*deadlocks, anc); - bdd_refs_pop(1); - } - BDD result = sylvan_diff(succ, visited); - bdd_refs_pop(1); - return result; - } else { - BDD deadlocks_left; - BDD deadlocks_right; - if (deadlocks) { - deadlocks_left = *deadlocks; - deadlocks_right = *deadlocks; - sylvan_protect(&deadlocks_left); - sylvan_protect(&deadlocks_right); - } - - // Recursively calculate left+right - BDD left = CALL(go_bfs, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left : NULL); - bdd_refs_push(left); - BDD right = CALL(go_bfs, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); - bdd_refs_push(right); - - // Merge results of left+right - BDD result = sylvan_or(left, right); - bdd_refs_pop(2); - - if (deadlocks) { - bdd_refs_push(result); - *deadlocks = sylvan_and(deadlocks_left, deadlocks_right); - sylvan_unprotect(&deadlocks_left); - sylvan_unprotect(&deadlocks_right); - bdd_refs_pop(1); - } - - return result; - } -} - -/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */ -VOID_TASK_1(bfs, set_t, set) -{ - BDD visited = set->bdd; - BDD next_level = visited; - BDD cur_level = sylvan_false; - BDD deadlocks = sylvan_false; - - sylvan_protect(&visited); - sylvan_protect(&next_level); - sylvan_protect(&cur_level); - sylvan_protect(&deadlocks); - - int iteration = 1; - do { - // calculate successors in parallel - cur_level = next_level; - deadlocks = cur_level; - - next_level = CALL(go_bfs, cur_level, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); - - if (check_deadlocks && deadlocks != sylvan_false) { - INFO("Found %'0.0f deadlock states... ", sylvan_satcount(deadlocks, set->variables)); - if (deadlocks != sylvan_false) { - printf("example: "); - print_example(deadlocks, set->variables); - check_deadlocks = 0; - } - printf("\n"); - } - - // visited = visited + new - visited = sylvan_or(visited, next_level); - - if (report_table && report_levels) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n", - iteration, sylvan_satcount_cached(visited, set->variables), - 100.0*(double)filled/total, filled); - } else if (report_table) { - size_t filled, total; - sylvan_table_usage(&filled, &total); - INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n", - iteration, - 100.0*(double)filled/total, filled); - } else if (report_levels) { - INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables)); - } else { - INFO("Level %d done\n", iteration); - } - iteration++; - } while (next_level != sylvan_false); - - set->bdd = visited; - - sylvan_unprotect(&visited); - sylvan_unprotect(&next_level); - sylvan_unprotect(&cur_level); - sylvan_unprotect(&deadlocks); -} - -/** - * Extend a transition relation to a larger domain (using s=s') - */ -#define extend_relation(rel, vars) CALL(extend_relation, rel, vars) -TASK_2(BDD, extend_relation, BDD, relation, BDDSET, variables) -{ - /* first determine which state BDD variables are in rel */ - int has[statebits]; - for (int i=0; i= (unsigned)statebits) break; // action labels - has[v/2] = 1; - s = sylvan_set_next(s); - } - - /* create "s=s'" for all variables not in rel */ - BDD eq = sylvan_true; - for (int i=statebits-1; i>=0; i--) { - if (has[i]) continue; - BDD low = sylvan_makenode(2*i+1, eq, sylvan_false); - bdd_refs_push(low); - BDD high = sylvan_makenode(2*i+1, sylvan_false, eq); - bdd_refs_pop(1); - eq = sylvan_makenode(2*i, low, high); - } - - bdd_refs_push(eq); - BDD result = sylvan_and(relation, eq); - bdd_refs_pop(1); - - return result; -} - -/** - * Compute \BigUnion ( sets[i] ) - */ -#define big_union(first, count) CALL(big_union, first, count) -TASK_2(BDD, big_union, int, first, int, count) -{ - if (count == 1) return next[first]->bdd; - - bdd_refs_spawn(SPAWN(big_union, first, count/2)); - BDD right = bdd_refs_push(CALL(big_union, first+count/2, count-count/2)); - BDD left = bdd_refs_push(bdd_refs_sync(SYNC(big_union))); - BDD result = sylvan_or(left, right); - bdd_refs_pop(2); - return result; -} - -static void -print_matrix(BDD vars) -{ - for (int i=0; i= next_s) break; - } - } else { - fprintf(stdout, "-"); - } - } - } -} - -VOID_TASK_0(gc_start) -{ - INFO("(GC) Starting garbage collection...\n"); -} - -VOID_TASK_0(gc_end) -{ - INFO("(GC) Garbage collection done.\n"); -} - -int -main(int argc, char **argv) -{ - argp_parse(&argp, argc, argv, 0, 0, 0); - setlocale(LC_NUMERIC, "en_US.utf-8"); - t_start = wctime(); - - FILE *f = fopen(model_filename, "r"); - if (f == NULL) { - fprintf(stderr, "Cannot open file '%s'!\n", model_filename); - return -1; - } - - // Init Lace - lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue - lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup - - LACE_ME; - - // Init Sylvan - // Nodes table size: 24 bytes * 2**N_nodes - // Cache table size: 36 bytes * 2**N_cache - // With: N_nodes=25, N_cache=24: 1.3 GB memory - sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26); - sylvan_init_bdd(6); // granularity 6 is decent default value - 1 means "use cache for every operation" - sylvan_gc_add_mark(0, TASK(gc_start)); - sylvan_gc_add_mark(40, TASK(gc_end)); - - /* Load domain information */ - if ((fread(&vector_size, sizeof(int), 1, f) != 1) || - (fread(&statebits, sizeof(int), 1, f) != 1) || - (fread(&actionbits, sizeof(int), 1, f) != 1)) { - Abort("Invalid input file!\n"); - } - - bits_per_integer = statebits; - statebits *= vector_size; - - // Read initial state - set_t states = set_load(f); - - // Read transitions - if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); - next = (rel_t*)malloc(sizeof(rel_t) * next_count); - - int i; - for (i=0; ivariables); - fprintf(stdout, "\n"); - } - } - - // Report statistics - INFO("Read file '%s'\n", model_filename); - INFO("%d integers per state, %d bits per integer, %d transition groups\n", vector_size, bits_per_integer, next_count); - - if (merge_relations) { - BDD prime_variables = sylvan_set_empty(); - for (int i=statebits-1; i>=0; i--) { - bdd_refs_push(prime_variables); - prime_variables = sylvan_set_add(prime_variables, i*2+1); - bdd_refs_pop(1); - } - - bdd_refs_push(prime_variables); - - INFO("Extending transition relations to full domain.\n"); - for (int i=0; ibdd = extend_relation(next[i]->bdd, next[i]->variables); - next[i]->variables = prime_variables; - } - - INFO("Taking union of all transition relations.\n"); - next[0]->bdd = big_union(0, next_count); - next_count = 1; - } - - if (report_nodes) { - INFO("BDD nodes:\n"); - INFO("Initial states: %zu BDD nodes\n", sylvan_nodecount(states->bdd)); - for (i=0; ibdd)); - } - } - -#ifdef HAVE_PROFILER - if (profile_filename != NULL) ProfilerStart(profile_filename); -#endif - if (strategy == 1) { - double t1 = wctime(); - CALL(par, states); - double t2 = wctime(); - INFO("PAR Time: %f\n", t2-t1); - } else { - double t1 = wctime(); - CALL(bfs, states); - double t2 = wctime(); - INFO("BFS Time: %f\n", t2-t1); - } -#ifdef HAVE_PROFILER - if (profile_filename != NULL) ProfilerStop(); -#endif - - // Now we just have states - INFO("Final states: %'0.0f states\n", sylvan_satcount_cached(states->bdd, states->variables)); - if (report_nodes) { - INFO("Final states: %'zu BDD nodes\n", sylvan_nodecount(states->bdd)); - } - - sylvan_stats_report(stdout, 1); - - return 0; -} diff --git a/resources/3rdparty/sylvan/m4/.gitignore b/resources/3rdparty/sylvan/m4/.gitignore deleted file mode 100644 index 5e7d2734c..000000000 --- a/resources/3rdparty/sylvan/m4/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -# Ignore everything in this directory -* -# Except this file -!.gitignore diff --git a/resources/3rdparty/sylvan/models/at.5.8-rgs.bdd b/resources/3rdparty/sylvan/models/at.5.8-rgs.bdd deleted file mode 100644 index 8a0c1950069e5977d02e225f5bfb06cd7902c26a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 50448 zcmZwQ1-Mm3+Xmo~?ot8CBcLJ{A}SzpK)SoTyFpOUO?P*L3J6F_NOuX6(js;%pWXUD z&)PG*4_yCT-(l80v-UY>=6%=9+KY`}2IpKVd}YGd4SY$7MaLi9YKh&&!}rZeczD&E zl!uqi$#{6toScX6nN#raoH->A&%}Qp9K}d?%DL3hf&b&Ab7|D?IG0v^%(-;xBhIB) zA95~(`has8)%)UeajR*MbD3`e2fLihqTb=$-RifUyGOm%xqH={oy)4;rfUucYio$z0kRQ>iN#) zSI>2>fO@ub52$B4_n>;Za}TMfI#*CV*|~?+6P+uh9`D>E>aor}svhm!W9pI4J+2<^ z+!N}Up)e0$Hb2S3A?6}He9`+gyx?JhfU4T^RThGJP#Y1EAX(s zxgrnink(_Jwz)D7YnrR@u)4V_52HWgZPi#sFN!-lHTofO*I*U>QgKJ$IvPaWwOA$I z_A5AtNvad?j{k{Cm0#I81fibxBWN;2=Mdxu+K;4=DRB--)JXfIq>DF1Qz^;Xgw2xN z(bN~R7dPW!L30$U2hA;bSil^GHlMi_4-;-n0q62K*Tzn9F4t`-<6I8s+G&6G+fpoA z>^f-wJ-4M~w3v3%{!E4_l{1*T@GzY@O8qqEZahq7?#{y$<{msuX70(uB<5cJ@GfhL z_PajntG8XU(SFxY`!C*hiAVe00PR0_+maCNcQ0uFsoRzi=T15|So`0(ZOL)&m~$^_ z|B>64DCZ72H&pu%+_t1Sx6ip(w0}?7czxq8=Z0zj4(Eoe-*#?5^{dWJRxfpKih8kgQ`HNdo2H)c z+;sI^=Vqv9J2z7e%u?gUIK>Kn*c?;z%i&vE!C>Z@qQT==u!6~2V2UOezp@nsW05He z9)@To=G+oflpIUdL!4Wte(|@| ztujUX9a6wbXXnW+PU?nXum^BT50awMpLxkA%(3pcJ3`x zw74VXt<-mJiz(VXaEVx{?c6p~wBO;9vQpi-?WSnI!zE~?vU594&Q*+VQLUxCbGuE> zm2(c4xRpfb_L`#oZlAiObNkgLoWl}grI>REP0@aLNL|#q!|Ec=9Z?r{?x_0lXi2k{ zN1Z!vif;a}1X?NR+zC^3b9qu-z`1wT`J%TJ){@}dX;ZY{ol)mT%l4yQXussxvH>DcmO{K>8QmZk(G-`}5ts3J?r^fiwt1-R|YK$+V8sp2P#`rR; zF}^J7{dh*;ot*W)Ffc3+G#21{eSg#=3v^btV*!uO3onxeJ3ISvesmrV@AK#9@ZT>R z>s{f6&gG=BVMOPJX4xQe-*J9;eO~|l^7z*$c;{j*8`Ax>Y*_hd+0gRSvf&k=WkY;` zmJRbkdNaO!d5D$`w;;VK^x|0ojSV}Rm(Yy)d4%nuV7!m2F+Y!~F+Y#1F}^3%7++yE z#`mNe<13=Z_?}W@d_~n5-_vT0?-@15_pBP@E9PB@^@cDo=Ze!)@U~RXpKSflv3-(1 zFEr!)P)e~M=SSz^u(UtFjQ@TmSx*dSV`-!(`13+DzJDm??l?cZzP$f_&->R`@Gi@G ze0VjMc6yw-5M3pgt=V46pBI{Oekg6(kMpDRaM;eD-`;<}HmpmAvz_ZeKj+U2&G`PI zbh_jG@cJ(P`*rrO@9N!=b&2q5=ep6w&E4r@<{tF3=AQI3=3eyE=H7Hsb07LCb6>iM zxgY%`-iELa1@kjNjqwgtV}4#xV}1syF}}fSjPFG?#`lsM;~S#J_=c)6zL(V)-z#d2 zFQ&%$hN&^W;okjO7Y+l(rAFt)C8+1;vHnqP&+X3(%{V`lG3=M~e`W{n561cL_aiO$ zL*v&~pDXk(T?PNwnPmOs3`jX9|t`gHWc@a{n`p#{EGk(`mW?nL*3_&rEv1 z-5<=N_u3?{@`_5?hlsJa(}RbmivR1wA>%O zL1X_6Wfd*=2dio9pP{UwW&eDWmi==rjs0*P>rk-XulEl7;Rf%ppM_I5vK{Mv^nF63 z?5}UJAK!1Ye|>a{?3Y{EF8gcrbESWq?|<9Bek*JIyzSnbSj+yogO>esCoTKuE?V}_ z-L&kVduZ7|_tLU|?xSV@+)vB?d4L{f`^!N(hA&?Z(PeN6u@0p)E-~-0UmW!g>wP%& z7~8QPM&Bni;`@d24*T)_PWabH?WIC5mI}7Z{uBLN=|AQBPy5%u%NjrLjQ4TYvcH_A zWq&zG%l>knmi^^DTK1Ore%M*Ld*Jlm6rAQ8l4x<2v~=b2hRw+ z$Fk0iX9V7(S?3BT;_ry*oOtHo`=cIwzfeA6JHFq?+K=xS$|r1>?-%`C>Hp04f9_xZ zDQo<^FT6iwog;iV=f0%xGk-;AH-AlMGk-&8HGfOrYyOVD$NW8gxA`WW#r!`yGcF<4 zpquFFd5i{X#)C?iU_b<9?x#8uts2sBypWs2cYRkEwCL z@VFZH3s0zVzff3>{ryQb_V*%c?B7qRvA-8pWB-0yjs5!>HTLgk)!4s_sj+_-S7ZM! zp~n9GoO&d-JT><3QtIK@GS$PdMXQnZiE8Y(W!0teNL5`5k6hIy@kmzv93I)Kv7c5{ zV?V8=#(r8^js3KW8vAKgHTKhLYV4=g)!0vKsIi~cRAWD_rN(|*TaEp%jvDKKT{YJK zdTOly_0?Ga8>q4VH&kQ&Z=}Zh-&l?Hzlj>_e^WKq|7L2e|IO7{|68cB{wjA{*8g^DtpDxRSpPeyvHo{dWBu=>#`@n`jrG5a8tZ>oHP-)bYOMd= z)mZ;~sBzp=jrG5m8jF4=@7}(y9c=`6Joj(q8&UP)c`eL+>1O7BbQ5!bx{-MR-M~DM zu4jIMu45iVKVcqBKaS@DtV4M$9N_r?+aEO#p&v01r3;x~rXM!HLKig0=!eY1=m*Wi z=?Bas=mO@Ebbj+FI-hwoeZP4Ooq)GCtV6+dIZlo1aJ(AV-2^qRvx#b4SCiDZjwY*d z-AqyAI`JhI)rkKz?Qeye33!yCM*eM7V_(>$Mn1l!#=fvw zjr`oA#=fvsjeOmv#=h{j8u`0jjeTK<8u`3ajeTL48u`6jjeTK{8u`9gjeTLN_dZ|u z@)v}!VmEnUjj{bauZQo&{qzC4oB1H!#e9hFWIjxHFdw1YnUB(K%*SZt<#E>G{vCPw zPTWzWeqi(j+htumNz1zUE{(iArLRX`o>t4cc!uq=E}o@jT|7t2x_F+Jb@4q~*2N2S zK79Fdk(PDw5}n{XQC+5ymsePaf;m;vftwEKB6t&?qk~G?LMI`-tJS{;_W`8E#B^P8u9*ubts5;C||N2@ebuHTK2)O zX^HnYw8Z;c8u9*)bts7U_iBmvO}0zC|3^!_f1oAaKhhHKpJ<8q&vZU~`SJ@b@&1)g z!1Dsup&;JBs}b)%)QI<=YQ+05HRAoZ8u9)|jd=g-eS`I#`yk_=H2gBCd*Us%8u^e! z-5sZ^ksry_UGbJ)jeJR=?uP$QqxsN3U_gt{FbS*Y9Mk%k)imOS{ zc^OJ+TGqufw5*GXw5*F|X;~M`(XuW+Pv^&%FXd_EWhfPBSr;qP385Fy6KLdRW!9m{ zK3IkAvJY0JWgo0Y%RX40mVK}Wjl7BCio>WNzO~pM3gTN^jri74BffRjh;KbL;#*%W z@om6%#J8dLOMDx#9r10f{j%>hVY|e)DJ}7BMkBt>_4N|p7HpUJwxlJ#t!RmFYg*#l zhR%;KU)s_V-*&Xbw>_PJTLadiAif>dh;Jt~;@eq`_;yhvzFpObZ#OmK+uge+>pS;7 zT;DzQ{KdHSRU;pIs~6%qfg1VIS3Tdkern`PfAw5^jzEq48K|C(&k?ARPlMDm@i_uD z^6N$QbUY_eBj1Lor{Z%2YUJO`YUJN5YUE!`jr<#?M*a;~BmYLIk$)rA$iGo)HMYyX^Exf-(sEkXr4_WSODkzvm)@Xd zU0Oxw$CodwX<3)n(6TPQNhjdTm$fwVB9wJB@?t&fP>>g)Y+yU`B9x6Z@*R(8!BWcG9*Ex?ME#!k1W7 zBmR5X9tz^WSB?1ZQzQQS)rkKAHR69zjrbo@BmRfgi2o5a;(t_)_#aav{>Rn%@w`Bd z_@7WC{wLK5_=u7k@jsUElmAUC;a#UDy0IUB~Cs%0JgiS4ou{!Gg{_zNxT;IFi-gTK+T4*pK($CodE(6SEx zNy|F;7oC7FU;d_X9sI*O6kGS=`$VhI;bQ;sJj;)mO9mr9eE!_(Pr~-_+zI_jz3*ae z`4dYY4aFf_er%kD#88&m*X(;W>hODn5^( zmOL!McI4r6+K)Uesg`xF6x)%9zQossC4nwhhW(gNUt)xMSjd%oCp%IVZsx;yeT#ZINf~(VrM{o@q z@d&O-BObxEXv8D9Hf`~U)u9m&Ut)D>i$|;;ZR(*GiAO`WBOZ<7{jM}? zSr;19vMxkrfoi58`|d6wWV!7 zT|3(5)3v8ufJt*4f^)th0S+S!esw1@PrdKU&t={+B%bq2vw+ zc%H!aoaPs4S!Z9OWt|;D%Q`!h#yT6#6Aq(-`Fe%zp-BG4*e>}O<*D=!_x&UM>xZ$H z`5Z~he2$`JK1b6spJV6(`0`~eE%P~!miZh{%Y06t6Yv~?btst6NovgJWHsh7d zRgL+arpA0uS7SbBc)!dV`_@eF@V;HPPX1Q$zo+dPenA(T#q)cZXVcxybLej7xpY_a zJi3c{KHb^8fbL{oNOv?ZqC1!u)9uYm=yv9%bX)T>x{di&x-~wJz#8L?{={|tdfX9Z zopj6Dj_Z1b_RDp>lI_UTH?$vlxJoT~x|;2hhihob!#8Qk!?m>J;X1kizI<6vOCD~Z zB@Z{!l82k<1biNWHA>#3@w!aB3V+7$vs(nZ*k+#J%)EteV%|zOGH;_BnBS)BnYYt* z%sc2>=ACp+d`^LNh*OQ|IQnBZ+pC-R(ACU)>8j>^bQSY{y0ZBIUCDfqu4q0)S1=!@ z%bSnT&zq0Z<;=(EvgYG-BAzd>4h2Ctp)Q5z3+m_ad_i3t&ll9s;`xI5X*^$0KZWND z>L>5`91rIIy!K=M-&14$FQ_s97uA^mOKQyjWi{siiW>8ORgL+-rpElgug3geSC_}X zUr=NIZ>TZXcczmWV>D=dP#ODij3H-YSHRAP^x|nlc zs}a9%)XzBgts3$CPF)nAM^GcaH`PV(-!D)j-an`dJkG{4`FiOf;^f?+d{uJ+z zMXkt-pY{95b@>b1<+}Wpmh1928hP=%z8-n;hgz=7KiMwV4#(d{dW4`mMG2aQ^ zsafN?$e1+zaMYOpe0o0SKffCDUqFrde?X1-e^8D2e@Kn_FQ~@+Kdi?57gA&XA5mAp za|AW!|1mY@|8X_u{|Pnbzpy$HQ=rED7g1v!d`gXY6jfs#d|Hk8Jfp@s_^cZ7DyGId zSX_vGYVrQ)B(4M1)v^xMU_0_K zl$td1FqB%ftOK=aSqJLSvJTXxE8xqQdbF$q^=VlL8ql&1G^Ax6XhbLC%a_J9@~{c( zP-NY0%63_Io6)lFHm7CXZ9&Vr+mc3}gwlZ5KjynN+e4B0Zo_uWcU$e3b+;YcWxm_f zGT$9&%y&n9z07wfw#$5Xre(gn&=v6IOIKRvyBjU@-JO>C?m^3Z_oNf?IR)0CV7_~+ zG2eaEnD4%7%y&OE=DWWd^F2U~`5x%qiZ!mop`7~C*pGoYRvywHP+p6 zYQ$r_8td)^HR3Z-jdgdD8u6N}#=1L2jrdJfW8IymMm(phvF^@LBfc}$Sa)Zs5%1Y* zth+CG&+#>;L`(cRF!Ewv9p>WgT2h%XPVg zmg{mUE!X8Tx&pp@d6kyy@-&e)tfgh$Tt_1>LRnACI=X>IUWBrdmUVR#jl2luEn3#u%{1~Nlr1#! zB9yH(^1_!`R3rXxvpp2Vf4ds--=RkQcd8NpU24RCw;J)^qelGqsuBNvYQ%rPx&l6@ zpho-;suBM~YQ+Dr8u34(PQ>RF)QJBvHR690~pI0OP@2L_03u?swq8jnv=6%W6b$#rMCksh{8hH`QWuAw;xT4<& zd2v-O`}H-pBQKu!->)g_h2gv5IRpC_;L`W~AJFry{{}tJ{2@Kp{1H9J{4qV-{0TkF z{3$)t{24vN{5d_{`~^MD{yy$YdaCt*Ma%qrO=EsS`G&^)e9Jl%nV;|2j`{iC_rK3t z*3Fx=t(&omoF~keFR>rkZ}S!Vk+%7Y{Y2Y*#eSx3zGA=7Heaz{X`8RuZ?w%Dq8lL)U@m~X=vGJ($b0e@+BRO zbtXOQP-I`pz;@Y3GSaedWTIuC$V|(=kcH+xz&aGn*F9>Cfi-v-{U) zWi9i0A1(8lgRY1#UvknipSft6&)l@kXC7MSGcPUknLsDveI)BpFrWFWKI|Rtr(_8Htzs#7oi^iPRAPmAep4^>KSDP#KT0DuO}X}eBi&(OB6#h#^co%#|h zM%#57D^4TtO0W*sJ@V?gxT8kRyOL~|yedUYUX`XR#$U!i%g~ZniL~TZSz3Zpj+VT7 zo|e2SPbcEbmkKoUsv_%9kXK0=SCl(HKUbOOA>Ju{e-+x|?W)oiZ&!`Bc)RMf#oN`O zE#9ssZSi)sXp6V2Oe3c(SC2-#>$48U;_Vu+-Qw*U(iU&mh(^2{>+2Ek zCTdwnnzCKinP#-ayE!fKZb4VXmoF`8iFYen;@z5-c(r}H`@`HKH87C^i@k-`mtT&(w~;N44^CG%a?()#N`EA;xdSqxD2Kx zE-%s&mzU^7JYQgq@?`RO9hRn>amJ!*3Z6Uo^CC~ea|iVVJaf9%PV7v2bibO{moP9e&%U(U-NXjk9h{&+dPwQgXhOOpW|LuEu`-jvD!X zLXAamq4!B&|G(?vUOacu*X_o02lY-ocTjJ~a|iV{Ja+}lq2lR6D4f=KShxBXa zkLXv;AJfaspU_M3`32UYATK{t%eweE+htw+f|hmhOIp^&uV`5pzounf{D!`BT}1r9 zV|yrj@O(jye7LFJh0iajksm*(ci{d+jePk@{WdsF8pFsF8pFs*!*HshNK%!(^dG{@tZU{v}Z(|B|Yaf63G< z@!wNWufW@JHS#Z|8u^z>jr>ckM*gKyBmdH>m*T&tpho_sS7TkwphiAsRAXJtq(**b zR%2bvqDH>nt;V`|j~e-VuNv!ORyFcDn;Pq4b~W<*J~h_G9BSlyPBj+6Z+$#-`S!3b z`l!bLe&fci7{_w+ybtjCh4}fgsOS2vewUZ+@0%0oYv%jutLA+46?1<2vbg|#$@~C) z(flBN!Tb>Yp1B}>-uy6q&RmE-Ykq`2gU>Ip4&^jHzo32>pI=a)z~>j#$MIJL>Z7FAYUtNv(*HE9rTYNS0p_ck29ucUKA9d94;1Pow`BG1P49^$TM-ejh5j^5h zBcB?n58?TO8u`^keE^SG)X2AH>V5bpYHH+P3pLiomTKfc@|)@tNu8#UI&wrb>S zJ2lqD_G;vB2Q}8kj%ws{CpFf^&T8a$7d6(!u4?3aH#HW)lHT2YofvXIUdPwPDR{mR z?~g^zlkB9PY@cB6MUON0rpK83(4)+K=@I6B^e}UO`W3uYWgW`Pcq^+O8anWnm+eE$ zgXovcgXtIXKYjgRoT46N=MQ1~3+AEpK=aG=0P`z!e{+oPXC6lPH4mrzm`BjP@%aV4 z|G@Y|8O45#Ka|n5jDHL*;~z`Q_{Y&0|9IA+VEhx*GX9Bdm+?=cW&D$A8UGYo#y^#o z@lT^={L^U}{|p-ApUE0!B zk=|n7M89Qzi{5D7Os~gFScii9_^s--p#wjP?eaeIZF-IMZ>Mn|ze8WY3PVuK`^a5v zUum!3O|LNTp_iNY((*oXAN`v3@2BN`#?t{-d;v z{}?UfKTc!(@30O9<3FL6@ttX3BQ@5AkJb2nKT%^{_*C5q&k@vE7d}^GJYT4>E_|uR_`XtOUHDp!@qVLj zjki4B-}<_epNrga$0Fjs`8(eq`MN&|)%R@2eRC)`>1#oF&Oqb7`3HUd6+B;1$lujo;@%HP)er)cE}hs<93|tj6zKNZlF# z4nmFJ|4}v8p~uu1&*N&WLr2 zjJK-39^-{p+i+miI+X=oj$iOH*3j7d4{?gkD^)w7f5BLHD!%mbAPtYDM?4{?;_! z7v1(beawGbwudqqONAQoXs@1#rA3YSbX1SWQlv(_I;+QG=~5$pUDczp)Tt5A?&^_P z8r6tzPxWvtrE0{xw>pL?Q6v6+)rfyTHR9i2jrb2xBmM)`i2n;}#D9<)@gJ;4{9jZf z{x7K!{~>C`f2bPqe_4(AzoJI`V`{{|jrTBL=MAxs*RdWcaezD+9`DDDp~yNkg6+tI zP)5?SE{&p*2ce9nWt|#Bj}5&zq-EV2MZ%Q~=#mUUn;E$hG%8hNmkHA?vVZfx1NmFV-?SRa=8^P*14gIC!uaes}L zIKNIyT$j@l#}%~1Z6%F3y}>#Z#3htfY)4#HYd_+$MlI{}n{2oBIaZGQ`l4I^-KN+& z_AkU&yu{Yi3$SnTJY>iQdVX{qc_Tf~yosJ`ev7vKKem~kZT(wlyB~;crDt0IHrnnF zVsF#at$#a>`-Niu_uD~Fwf>zn;(}>p*)}bI?iGDm$6XJ7B`z=21INK4QP~M>}KJEl<@o^_j_4W%b*W0hOTyMY8a=rad%k}mLjq&}-Iuse-Uu?(t{?>ks z?;o{{?_ajd`2M3gzSQ_a`H@CE@ACeXmVGM;+hyNMO3S{LjFx>XIW7BE3R?E9l(g(y zsc6}^Qq$PC(y&Ge_b;X6y!GeM6gWg2()#nF4v9lLwvW4Yx=T+>95T=nhm5quArp-_ zWM&{q?x$Pf-%GF#1IJ!>mKWd=*m5d_BT;nXgA_nXkuanXkubnXe~knXkgM%-55&%vTXw=Ibe1=Bp?z z^Yb)~`FVzQD43sTy&quRJal0Hr<H6mKbUkwgx~{n*UB_IBu8q$ruto`=OH7P($8$_vhgJM} zQMcq(RkkCqs%gLERdu!_uWD#N@~Wm<@~RfwC9i7Jl2>(T$*a1w9dDVcH zylO~GUNxd6uNu?6@a0Pr8qe`UX-fA9LJHA%ju%REx?2!Zh?X}6E$J@S--_;RZcTSG zx1sSIFO;@)hafy>pxc|<)9uV1XgtRYr6b)Y2tSH$eao@v2=VX2_D~T2u4=@;n;P-& zu15TOs1g63YQ(>n8u9O~M*RDz5&ynw#J`^!@$auj{0FEJ|AA`6{{=PTKS+)E4^|`o zFRBs$m(+;=5H;dIRE_w*tnPwaDmCICQzQPv)QJCZHR3-)jrfmLBmSe*i2rCc;y*^+ z8Y$>K*4HIN?0p^meePOp`TDvy@rXjb#$Ggm?W@fb=~d=Q^c&{M^h)y-dWCr^z1%#F ze%(Bse$70Ce$_maUS^&}FU4CI)}buHTO0MF(1Eu=Y+qoWN6$0Qr{|a#(6h`7=^1z* z!~3dG)`bInj)DD{-zBU=!Tg4@l+z{vC+-AzxPsvG8^D_rurm?<4ee@8kJ``kKAyDBG`^ zkI`4m$LY)Fcj!yz6ZA#%N&15MUHU!qDf+zmG=0u|hCXXPOP?{Hqfg^ouny(jaDX4h z_7mm{^l|e=`l$I5eb{`NK4`u|?>Aqik$0h7qmgItvkpbpmFsMmb>#zE)|DHytScYV z$cs>-xJkS|X1~Pi6B_aQlyxW)ug};n@%o&Wczr=jyuPF*USH7?udiu|*Eh7p>suP} z`i^xdh}ZXO#OtOS@%o?oI6kMKM!bGhBVIqL5wD-sh}SP_#Oqfz;`N&v@%mkjc>SS9 zy#7=pUVo_(ufM%NVvXzew!cTl&;M7?$2#?&8o!TA6DA8a)~UPH`2CWou}&pbaW1afPk1xG%FD=94Vf62$cH`ei z=<9acNg3I`-JFTuX3k7+F=wIQGT%*a#OD`Shq68#;5h@^Wxvi!W4{h18;$)syS^U# z^?mBq;cR@4f$gizIceFibJ4P2=cZ-9&O^(7otKvVI)Rq``hHsW>wL8A*ZJwC`0}Ly z-3recScd{X==}iemZ1ZmW1w4@qt64%d_By5nXf{$+}}JxW4<0`9m;BaenBns^*GyQ zzMi0Ez6#SaUr*99Uqxt{ucv64ucEZf*VDAj*E94|JYQfP3g)MncbK2z+K>4u;a!k* z^YB7^UV(0A4zCX_rPwd?TiUX}ONN&`a@rfi+6_TxUqtXq||6-L_tI^6k6gjm6mu+qa_~GX^F=STH-O2UW!|D)}dhjXM2bFpQHVl|GC~V z*0_%gWgfjC2+xyfiQfVmuMcIRfBhoXq2Tq4z2~#WePAd{=y^f-)oI)ZhO&&tePAfB z(zp){!NJ}{J(H0}e#^&8?F`d6`EuD8{+#CHuX@qLq) z_^zcTzUyd-?|NF|yMdPYZloo?o9Lxj+F6H!_-W7@wOZ^_lji%{NSyX3_QTJqu~EqU=SEqQT@mb^Gk zOJ1CzB`?m>k{9RbrTFsYJl!h@Z;k1m<_mNW^F_M5`4ZjDe3|ZQzCw2~U!^;nuhE^% z@6#R4*Xa)C59s#h8+1GKhjd%>M|2zW$8_skjzvd^|EFvZ1@Zq(jrf1AM*P1}BmQ5i zSL5>wYQ+C*HRAt`8u9;Djrf13M*P25BmOtli2wi8i2o02#Q#V2QaoQ!BmO_D5&vJ* zi2tu@#Q!%n;{Ut4E1oZ?5&u8ci2q+|#Q$$K;{T5t@&8wi`2VM7{L_Zup+@}gQX~FJ z)UEOFWBlur`u6eiava9;q{N@s@!TQaAB(!*kN%AQNY3_ac9r_Y-+(C5q<>9gic^cizz`ZWIg3#>zVHyq&KNwEEd`5yYX z`Cj^{IV*kGoQ*zcj^;_`=RWphesZu5Mdl|b+hu-o(K0`|X_=oqw9HRlTIMH#mif7# zmifs?V}A0p4h8d5K#lo%Kz#zw7u1-aht!y#f@;jq!)nY=A@A(0_lF;hezmDqA z3_s@2f0VV{Up-D^|BmW#K3v$J{{(BfUwe|qex5v4I2kqa{V9Kb)GGO2l}#PeCT#IqRNC7#7;iDwB~;`tmc@hnM8JWJ6M&(gHSvkWcyo=78}Wm$(J`Cg9g zCqf7QJp?WJUY?eGuRu$_SELcoXr6Ew70g# zS4~>xs}?QuRhyRiszb|s)ul0C^;n03`KqtRd^J#?z#|tm=Btq!^VL|5`D&uZd^Pp1 z#9H#d8I61or8zD6-hxJchtiTpez#&B3i7+PcM;a}LkDgN=y~S0^jvd0dXBk0J=@%Y zo@MSx&opnWVkjs21z-D$~>9<<~~Pg?S$7cKeGo0k0OLrZ@2 zr6oW5(UKqiX}O*T(8!O0tV5CO=>@i*2p#ym0xj3mU|O!H7iqbkUZQ1v96}>MhO!O? z`SG$E`SFSx`4LkiKZdE1AH&tij}dC*$4E8uW0V^CF%-I=W4N7kJwY?r*6N=sf%qb0AV(~?&+XvwRY zwB*$+TJmZ(EqOJEmb{uvBd_MM4h4A?%6zsXuNG+kiO`Enp6!xXi)hKK#kAzr5*m3G z%2Ha^on>?@yk%k?3jCV)tE^jw4!jkkTlhLO!z=vx%UL%M9eArpH@oFnGTQdp*eV+F zTg^Iro*MC66L-|8@q3f)62G;y#BUuf@mo(z{5H@Mzm2rSZxb!?dyAI%ZKh>i*g_+I zTUm!9>%um+p9mdzo79kj%6CoS>YMN9m4(}-6nduWN*UK;TUWgm_B>}MSc z;&Z_J4c1LV2Yxi&#C(WuY(7jkG9RHEnvc>A%*W{Z=Hqld^E-51^9j0+`6OK%mk4W= z?kVGSu6X65e<$ycnOCR$c~O(B3#ZvG>%tjY^6D%td3BDKygE-yUcE<4UR|IiuP)M( zSC?qXtIM?H)fHOSg{$-_Tq>+Xkvw~!?I%MAZf)rk<`3w1%r|Js!w>0W*8dSLdHFGY z#QHy>B~L%44_W_bwB+sQ^a1Ptf|fk~lHP~s2&_@U-?u*zr%R^m{Rh4g)z>`#I-W21 z*F`;H%DYW|n*V*ZcrYaeg7K$OWBh5=-SPKSYK%X< z8spEP#`rU;G5$p4Xko` diff --git a/resources/3rdparty/sylvan/models/at.6.8-rgs.bdd b/resources/3rdparty/sylvan/models/at.6.8-rgs.bdd deleted file mode 100644 index 71ef84a77c48033d3a1dd2db70756bf85eea2a2f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 50128 zcmZwQ1-Mnk+s5G$5EL>0wx|f$-Gz8mz*f4uOA))>pooe=H;RRV-Gzaz2-u3<-Q9lA zv(6gdA6(yD*EzH9nY9ml=6%=9-iv);wUp9I`dUL@@9GPsQXIeYUrS{LAHEf?;lnq= zHGTMcxRwuJ4X^0Km&3Jv_+q$@51+4oK02zHX>m#`7YF6H=TfRmKbz7j^wTNTqn}J^ zRr>LiR-+$HX?6OM>bdx@>7kU?{0};qpVC_N{VDyAzBi?{>AO=}hrTnVb?MtvT93Xp zu)Ys(4sYPY8^as=a9(&LA6_4>@54FajeU4ccoQF972ec`vr}r|I+x2++AJTto|#fZ zdU{HY=&30+rYEPgIXy9@E$HznZAp(!X)F5Dl(wcXPH7wZqLj9!FGy)S`uvo(r_W1i z2YO^mJJOZFPCh(4yt5C_4DaH@)5E*^@YL{bK0Ghb_a0`0%Lkp*}n!e3%c5Z`EywyDA=3?c&tp52^N%u8JpB?czrl zgQ)h=uBw)K3MpxlT2*&ff7hhqDW{|$v|+!3h9Rb;Ah%<`k^)nbl9H$c`-`NjZYZV_ z$=b=yNbX|lw}}UL@!?kCB2-(1yZNwjxCm{-a1S3gSSAHh+9ai3aY{<{mr0qFHcY7x z``2G4#frtQAN$u{CMAo-bO8I;2o$NjT6mBT>xGNduNxlX!es==zB{G! z=sQyyMcq!3lrE&_rF0Q}eM)2KIVoLCUz5@$^i?TcO3zN|GWznA z#?muW8b?o0X*@kOr3v)plqS-`Bw7#FDN*!?O$ilGPCqh=1~V;G3|>zmiY9ADsF++m z3a?GaBOzBGc^krT$6v=uu`%hlxB}Q>ET+9CBmw8E2?046( zUl*l98f9oob3?^`rxb`XFr^zp#p0rriPATvn?l9nr4)D zrxcFTF{L{~#p14%kJ2`!yF$h0p-Uu6%ara375kkosVGOJbYH00?{o=9IV7bALMgQ< zUZO@zvy>hTrF39Qy2PVYrSx#9*zX>po20aW-X|q3AyM{9>9J6;-#t$6meLdSE-5`p z@08M0^bW<67A@PQ^h~ID@uwv)%2p{Y3>7afpQ9V6w1{q4yrqbi1}QBG75m-ubp4cG zpf}`O7Imzj(o5`Lmv3>@v35$YuzxMSB~r&4DZR%2)%X@l9raRL%Ko~1%cYK$QhJm9 zwfPoJ9ko(=oBcIXdWY8Wai#S~7v(CnSO0>qeGOh8|r8T})XpOHPt?{i&YkaHG z8sF-)#@Rw-K(Fg|E#Sp@`C(YF*LT0p zFV5592Icu1mOpPj*YooOQ`*SZhEbfCo3TM`yxjTu@tc-EZ^t>UmMv{;xLet`=U&|_u(e?q^OBo2Kij%J7mas2TJy6# zt@+u3*7$a$HNKr_jc;dKe`)mZ>-I~^^K!G!&t*UN>-^$89X2h`-@p8MOr;N(cwew^TUVPqr!*V=Y@~3&kY}Gj|?AW zk4UMQm)xxRY3cS{G~QOU=BG8S`DsIId~IosuN|%NwWl?{4z$MCk=FP+(HdW8TI1_N zYkXbl5h-;meYERJ9++MZ+xw@~qwFtwnnr(5x9?Y;mz#BdF1_8a^NaIz*rz0Z+TvB*3Zvnz;fs3#}6uh-oWzlgG={!y-$90N<-|u!$a-8!pGQqhL5%P z2oJM&44U268Gx&(RtCec62?HiZp&t8`aOUQxQfYwY*4ZS40~+Su=}vb7)Paq`S{z*^KW*o_5Y622e`)mcBhT~?JgVp+ub(yw|i{tZ}-~R-|n-qzuj+Re|x~j z{x;tp5$pehc10=XI+y+PfnKV*y{S@^{aUYbS>Sf8=f%&_;~(SsdVDUAyB&{zqI~?5 z<@t}g)_V6;>He;-i!Z>-l0E>-iEJ>-qCG*1H#M zt#>cF&SlelpnC;w-$eHa*uQb~zv6bRU&YVS<6q%0 zjqg)h^Ya<4@qJEfd|%KS-G*5r9XDP zMjoi%*W0Uyf3{Z(|6;Ei{?)D*{>@$`{JUK@{D-}A_)mMK@LzVF@ZWar@IUs7;eYK~ z;o|R4YKB*+p<{oQrG}6Fn6lJFpPDK~^Ai0LY+tOWKrhl$q@U9e=!F_0t^K_&t^Iu! zTKjuFTKoH|wD$MaXzlN-(~m2JwD$Kk=|`2~^a5Q1^dq`N=!bO)(GTeoqaV~INNayz zpVt1o0j>RcLt6XuMzr?l`n2}vjcM)Ao6y>yH>I^dH=wmYZ$@i>Zb)l?ZbWN;ZcJ-` z-JI6`x&^KMbxT_N^H#L>*R5&ouiMbtU$>>Tzivlsf8Cze{<;IL{dGtBJT39G_Sc>1 zk=i2Y5!ym%W&Lin_Q&1nrrM(D{j`PA+F$pgwLk7nYk%B_*8aFJt^KhHt^IL7TKi*D zTKnVvwD!j;TKnSxwD!jXY3+{((b^xI(c0gd(^~&q&|3cwrnUYbLTmj$l-Bxx7_IgH za9ZpC5wzC-BWbPwN6}jUkEXT$x1_cHx1zQFx2CoJx1qKEx23iIx1+WGx2LuKcc8WY zcciucccQiacc!)eccHcZccr!dccZobcc-=f_n@`@_oTJ{_o8*&o7Vc@ht{HBt90M8 zZdq&u%iTBcQ8udT=kvOS``cZ@1ME)Wfp&-RAiG_7u-zs+#BLQHYVR06#@<06$8nv@ z_W2;CVQ$|pe4M>)c(}by_;`Ek@Co)-;S=pG!zbBWgip3N51(Q;4xef_3ZG^-44-aq z7Cysnp!))@bJ2Bq7Om^>Y+BddIkc{`3a#sE1g-06B(3Y_Tw2#jSt>=X_>W?LcfIwY zl@Fuou6pZ4D?cuzJL|0%t$Z0nchp-yTKRJc-Cl1UY30*pbX$FPfL4Buqg(5<1GMsO z0UpTMyon<|&ydcV|@~sb|v2vHsJErW_<#)F| zBz%uOD15IyAbg+QFMPk*5nO*2O1ntcy?CSQnqRu`WJiH`G^Ip0%+qF0>oyuUK8@qP$#0 zD-Uy7>~`f{E=z3XSuW4p%Bx&nu$4!-yl5+Lia6)_jd-V*-5>Ezuh*e=B#s4Rsf2=+)Kr0`9p@-`4`Dx|HZ}eb&E`V0P{6P=Y=LKlx z&tG)^l>VlbPyf(;Q~H;7e$~vAsk*mr3DC;78gx&cPAmUv(aOISY2{ySTKQLpR{pI- zEB{uem49_<<=-l_@~3#v-P$8z3e{0bVv;@$~zqM)YgX_@B z$8~A#gX_`C&-H2TgB#Gw*9~dygB#Jx-}jdgJk8|&hp zb|Zb2WiMNKnakcb*2R772D*>nIv3?-6B_&Aes0G;*wn^8xWA2ku*$|hcz~_EDdMWb zqA0!xxjh%fw;8SYHm4Qe7PR7fFs=9=LL)H=|n4k2Gf)EQj1nT4W%dQr5UaKI+h-9q3i3|jejCawHCi*BU*1+?<-99sETp&RIa0j>NS zNo(IbmsUQWM{D03MJqqgr?u~mrj@T3(AxJdq?NxH(c1UM(8}kFY3+NL(8}*iY3+NL z(aQI+wD!GTrN@+1J^%8T*U{gt9wc`<=MM|qLUM7Jw1Cb3_6F`34?G{x=M zcc$7{m!{cRm!{iTmuA>lmuA{nmuA_G^i`J2ZLCXI*jSfl+YNLd!F4Xmi(Ia9yYk{{ z_A4)PxyJ3vi(Ia?l^40pv6UCOTxTmUa=G4CUgR>@R$k;X&sJXKa)YhB$mK>`d6COa zw(=sEn{DMqF1OgR4yIdeHerxpJPXvKd%t@uAkH`4tATJe9FR{S5K8|Wi4wBr9Lt@uAiEB=quivJU|;{PPA z_&-G}{!i12|1-4W|17QeFQgU!=V--$5v}+yrWOAswBr9ft@z(o`h~JSvW$Jd8aWA6 zcaP&2eO|ZlOLo`r%XXLWD|YAbt9GaGYj(%*>vo6mQoDWl4ZB_VO}lOQExS$lZM${& z9lKTdUAtxYJzLj7F7MlTefEKkb?`$Qug^ZRbsc={Iu~6BpU_wbKXp6S!Ov{0gP+@2 z2fwhf4t{B49sJ5}q_47kZDSq$#>P7Mt=&NP5nSh@>)?Aj*1hU|qDMY4Retn&kspS7k+8`BmF>zVC+os^fO#*Ge|> zYh_#cRk!-_f{OBM6&m|&J+~vjR<)5|tJ%n})otY08aDE4O}nwa%CeT-C`((8jr>~M zMt-ehH_%q*Iv4D->$zR|wSL+EgX5ufG?xwBj(u}O8~fx&HulB(Huk}dZLNEoxK_#c zzXw-y+4q5$>;6J{UeP^EPo18rq0uumbb7j`hMuOWqo-Ahda1kdAK9{m4`dgSm${;!&1L3p?UbIoOVMp>l|= zcpU0FuOo=ZVQyDE4zKP{yST=>aDY{Kju3fZO444mUhf%YGucKrq*`M zXKG`|e5ST`%x7w6$9$&tcFbq$V8?u>j&{sv>SSv^bLnhrKD)TiCFV1Ab-U)X8~Zh% z-D%8c54T5tr=GUvvlq|TeD*GVq-(6ReQd0=eQm6>{cNnW{q4s3D$4*H>+C=q>+B#K z>+E2=f$k%?&PD6&PnjdgaIjdk`o8|&i%+ z<~kS6=LlN!Ig-|Vo=a;!&!aV;qiD_N`LyP9bm`+=Yu~z{bbjBCt<%q{`tNCv%}+2@ zF7)}wgfFs(hR4`L!WY|v!+EissdBx~?-HJC zcM8w5JA`kr+l6nm+k|hjTZM17j}G5rAEnPJxXy7pvN$fj-RAZq!nfOphwrct3*TuU z8otXuBz(7haQGg(MfhI3dH6oNS@?eYpzs6sf#Lb~0pSPjD&1djor{9-FukAdFVOqy z{sO(X?k~`L>iz<~yY4U0yXyV|z4LOPJaX>2`OX(*1 zHx6jU=S_Ma{oVqtc)d;UrGMjqR{Y+j_ekkITJe0J-c6rJpcUT_>0MI#h*rEmrgzf) zrP802bzFi)QdWx3(Wvp$>i$a6s=WA&KL^+4=WfS!`Gt+^@=IHJ@f9Dhy!e{Nb@`3k zab13E!{V@|Uf+E%P}R1mPd|Yrb>&*T#Gozw?0kUSUP`W4>$Hn(vyfbJ2X) zqA}kqx*hXf+s1s?v76@~q|YnZ&B80&nD4qa=6e+z^Igxb(&rRh=c4&ujn;gxPHVo` zpf%rX(wgtJXwCQkXwCQ9rT_MP>bj^|GymbLHUI1Ke9ixQwB~<(TJygFt@+=O*8Fco zYyRugn*WVy&HpB}=6_SVxwddx^S>Fb`EN*T{u|Mn|HgEcUSiOi|1D^(gIm&y$5yn~ z!L4bW>aC=(u+kw_PxFfB2?nG-H+?iHDdD~CN#T9$3E_S1ap5Mm@^C-b`SlOhfu?R( z9`0Y=U!3H1w~EF(aDdyDhq)YRD-Uxy$i_O*%*Hy<+{QZ4!fvjwvK(w<9XQ0sI&i3s zb>J`?>%if5m9`GoxhM~hq_OTE<#w#QN84C;TiRH6TiIB5TieQ$Tn_d1ula84_FORE z?cA>UZqI(KyB*w)`R-_AzB}2P@6LQY=DUmAG2dNn%y&1txxUKM-Ntf&t-GUX z#rFbQ>+XfL;(ZaVb$3YVF=eeOVX3|kth~6my1!C%DlabK&rx3Fa;e*u7niYLc`=s8 zbve%MSO>@3xGpEyxGpE!xGpEz&Gl85$u_RbDK@UlsWz_5X*RCQ>2{U27T38bFLIgb zcICw^_G4YV-0jMXT&}ROPR_QK7r9(%W8J*UR$kjwPU_2pV={AmCx;%ugVv8 z%va@0JLaqMl^yd{`Pz>8s(fQ>zRFVh*4BJ|=Q_W>#(aJ6cForh)&0dup06Ki%-2tD z*L>yjv#t61h39L&el7ixYwR<>+1O`(x3SOsVK>)TS^l(}W$E`GZ0s|C+t_FRv9Ztm zYgg&3w!U)FIjZd^LQp286PtSZIlUb^Hbm@1q2 z{LbM`?M~qacE|8$c8737yM4Hk-7ehNZX4d*ZWG?Z&WpRA-$I|s z9qU?UTU*y@St{Gvah+DSx0QE0xX#zT@@mIw7mdujo!pMR+Sx{4?P9m6ewe@SY9p_9 zvyoT3+X%`YHu7pu8+o;tU8PISbuP-QeQ4!X`T3``{O9MI_&mkC{Crc|&yIMfrgp?T z?QciCQ0&7%r z*}pecTzZw~72SwSZ?`Kheb^t@Ve0F4#ibwn6_@@r;xfSPh|545aT#Q{&{tUo+lb2$ z8*v$GBQD3-h|94y;xf#x()|V3Dx0rZUFW6g+iF&d>N4GTD9sLi8{$1N~ZqMaXy%eRD4-@E%^-`8reoUe-(o11l z`7(vRKrf|f<l}|J1^Yqq-R({Q*N9v<@wDRo=x{}gtTKRV+t^B)+R{mX0 zEB~&cm4DaL%D*|Z^6xrY`FB07{F_TF|K`!kzZ+=f-;K2L?r z0qXvP+iwX!W8bX%4m|&+=wIme8^h1pH-s12^TLbmx#1=D_2K93>%uSCbHXp$*M?uR zuL-|wUmbqMzAF5xeP#GHd$vBm;5rxO*Cus*2Q;htc&m3 z%dd-y|NCywF@3*YpU}#ePw9L0IR#qz^ErLD{{2N-`Sc}y zr#`1ZE5E*`Z`Z%4Kr7$ArEk^!1zP#{J+1uvfmZ(gNGtz-qLqI?)5^bJXyxCpwDRva zTKV@oJxBkZ0vH(8|Am>DdZ&og8e{%D)w8t&25itZch z`MDykb+IcGE7gwW|@2k^V1m7#?V~w&s zuZ!iZRR8_PyZ_BmWlf*=jy}IoeSW3rdHcWq^gp-16<*tZGrW%dMtEI&X?Q*R_3--k zYvB#-SHm0HuY@#N;UM}sy_KVtAI<4!^;VEpz8p+Ht+$f&Q@RB} zKdD={wDRe2`f=S~pp{=o(vRvE1+9ELntnvLIB4ZxD_ZMfYg+l(hSs{+mR5eYqqQ!! zrT<3DOKEFVpl{@q~2DhIX9&Dcx9%7%a z&oA)tr|I(x^r>w;; z6!&ZVxtwZa{HNI%|LHcye}=8`pXoXmjsGkf<3HQ&82>po#$T~9{t-6DKhnnd&$TiB z^K6WNl&$ff?^@+#*Tag#;@fEZnD7Pmknn}}pzuZZfbbZ*U-)9XPxunM*K+R@_2*ya z_FRtD`zTt^XB<6L_Yr74zX|kU-AADHd?(QZbsvEqp!*1Pf89r*HJ)j7U)@KbHNF{i zZ{0_rHQrftPyIU+r7th*<*)nV{on(6LUfF}YWueEHTErf ztK~Wuy$+v4-;_J_md)*WA9%feL-fzJ^*VeWA3s;8(0CtsquZ~G$KPbn3EymA8@|QH z`@mc6tE2xm8}9>ex37%;JM7uz``_HG@#k`v`!)Vt?zS=hdu)vVUK``T&(`?wcb$vI z{{W5g&v*NE`9$4kurdCJY>fY58{>b(#`qW582_U-#{ZZ-TlW!MtK8`t*ZmVVuKOo# zT=!4exbB~}2k5IT&)EI4bU(qyb-&Qob&y==qCbBTt?Pa?3Z)^-0pt>^awt?T|p zTF>_-TG#!{^Z>o2r*+-GN^3l?(Yo$mr!~H%w66O%XpQ$xx~F~*r1V>5eQ-G!YgW5b z@E!erMA=_huX}TO$L)IEo6EcQn^`HnXX|zE`+WRT-Cv;fx;K{(-TqpZ?lahW-J8qD z_A6Ps&tU6yZ!VwOFJ#Z|rBn z-`Y=wzq6kRe{Vk){=r_b+`reU_Z_+X(Ht;)}hsGtV65Y1N2pvHEgUyYuZ?c z*0Ob7w)CHyi~jtz-JXlqp>=3IpLJ=iL+jCce(Td(hc=+~d^e;A>hlY>(Iuu z#!s4j<$zKe@k2MdzSf}z2?8Q z+jF^8_ZMizqb+^0?k~`aPkZ_z-Cv*;ua5Kuy1zgxex2#_b$@|YJiF58>HY$(_;#m9 z>iz<)c=x0$TKZ_kzc;P;_n{U4zO>@sk5>Hq(~AE9TJax9EB=FM#eXoZ_z$5K|Dm+v ze+;eoA4@C#!)V3-I9l;>ayik)x^$AQJjmr_ z8|&05_650DmxPUV>oi+=kjv>d*0D2el?S<;>vrYAdF)pnjH0m)obPt!K`x_htOFO=SO+e&u?}2hV;vY{V;#8I#yW6` zjdkEs8|%Pjw(?-CYnA->)!K54vdrhIwLXk1&nr5S2jkt2xKFSV=ZQArI>|;HC)N*$2C6{S#S6rsEUvZg1V|||K_E?`Qdw5@;rPm3r752;BKeIS4e1$zj`%l?F z+nyf%SK8CUSJ_j;SKG1wSFW)qNB^~UydJ2`u_s3Vb#}Zys9bN4kN&x~UN3A{KEHYP zSiJ>zoikeTy0O|tBmbYro7|3g-E1RXx7di+tv2Fyn~iwgZsWST!$!RBw6UMwWh-8H zyUs=N+N~T<(WLm?%l?Q@y3g&3PcHY{5ufyc9q~!??TAl$(2n?|hwO+?df1Nmq(|(C zPg-C{eA1(K#3wywD?YhAZYw@dxXwlKd6L%k^c1c5JWVS;&(Mm`v$W!~kXC%2qZOY; zG~%<^?TXJ5_A5Tm(~8dvrSEaA>*>YPvs`1Hd&$;1SLB%vi=y$oQl4K}<9U_VcwVD5 zp4VxOXDN+*d&BJ*&zm;#?JZm5d7F>dcyf8i?Xft|8vD*S zHujxwZR|VW+1Pi!x3Ta1U}N9;(Z;^>lZ}1nXIuNuFRoRFmvz%>{`b9j{h{mP*RsFp zKpcK^JL2%WjX3;aBMyJsh{IpD;_$cYTo8wU+>Si@*H#?k7F1(}NHoTMFCcLxVI=qYB zD!i-RQv1AXmHfHDszS@%htzerdwE{bjl9~!?aHe?*^j*1%k9dmz1gq4+J{D7?dx{r zRTCR|wV#c=YHA~|_P3E&RW|bK02_IAppCpb$VOf@vxn)cEX{4*hs&jfeN2`<$6)I| zTrP*$L$Y+A#nyeeTn@7bW$AMa_Q3EF_JHt_c79W!!=vne(SNkvH{8=D5qWHI`75@&j;@^>0{5#Q#e`i|p??Nm7U1`O?8?E?vrxpJm zwBp~BR{VR>ihplf@$W+`{(Wi1zaOpm_oo&A0kq;jkXHN$(Te|IdXQ3tR{V$3ivKaR z;(siy_z$BM|Kn)Ie>ko9A5Sa(C(u2$w3I%vtefQ6mv!;?u{Y_`JrozA`+*o~`=}u5-CUFHz`O zxkL9E+&&{b%AOWJ-<}d4ZBGhcU{BEdh}zM{zvsQk{hHq~u5;1+=5n#yF~67CnBPn7 zxw@6>Iv33EShr(-$Jv z8MNkiCaw9MMQeU9r!~J`B(D%%QXaA&*y4-rlx?_^ShRwt|_GT ze6ORYD#d9%|GD&Jg^<>GZlEXXJ_4=r-9(SqM+In&_ZE7rLQwkFvOW~k;ltwZci+}W zGWfW+^!pL?oAIDK-2O)RPJ3ziF8lTH-S%tYd+b-k_u8+7@3UVH-*3MZe!zY)Jl}pH z{Gk23em}x>E=%5Qr)=ff z)2?&Dy7G+Mv93I8V_jKjV_kX9R$k;%#0~LU?0&>+iLH1&?>ZO6>jk$XUN72+*Go3y z^|Fn4y<#I?uiA*$Yc}Hbx~+IEb)Adi^#-kYy-6!xZ_&>vk7>p09a`~vmsY&qqZP0B zX~pXUTJidjR=hr<6|awJ#p@GV@%ofjygn-`kb@6*j59;#^eB1+Z(w}a>FZ`E%Pxx>9uJAwh9pQiN+w@g8+W7lPE4cmU=&NCC zzs{wmt^K+dAFus-MS5O7Tc2Za``mCH8~gQ2Hume4ZS2=|ZS2>p*x0Y@+1RgFwXt8X zW@Eoz-JY$lvaDhE(0vBixyWmkUek5=+@Zh6x4VU}^}dVwTF3pEuXSy_zFE)Ke68;~ zmwEd90*(3F(CwJ7jcm+UeH-(&v5ooK#KwGWYGb|{*qE=)Y|K|fd$#T`xXwlM)3|h= zpUv5?`PriM+OE6i2kQ4B>@MN__}sF!`!T=Ul#k!mbuN1RcBQv+-8pyY-*dD(g?F$! zhIh0(gm<#rhj+Hyg?F*rhIh5wgm<%Bhj+JIh4-*q>OO*Nl{K6%Dw|eU|9vm6`-k-V z5#@PBht`2y_ICUHEZt|Yu`cXu-yi)=Y^)Re+4n|&Qyc5X{`TF`Uu9z*Il#U%`VX|R zt{i0F9{tU1uQO$7Zr_@v-;=O!(R~Hix#&7NgvNDrsM~cN9malLM~Bn%@(1meqImPDxFtkcy`r4Scem3T8GSpJVuV&Cjv4=4Ti^FaI>%e{j3z=NkSz<#8^@yI*;H0w1qG??f8$ILYmZ$I13Q z-5TWi`twdLJ=`_oahi>IoNgl?XV{3xnKt5amW_CvZ6hA%*oa5Po~^&qaGi_he`M)A z|L3w_^M79Hp|15hE|*dEj4a(xvJt=0wjQ6$1?A%}be)SHe^Kf4UF&sVE@SLzS^6l6 zt=ECMTw?2WU@n*1dL5X{Wwu@i<}%jS>%d&b*?JwA%XnL_19O>R>vdqhesg?t|0MV0 zdYfz`zEf<(cdCu}PO}l;={Dj!!$y2(+KBHg8}Yr|o~=vFbuNnUY+CWTl2&}Kq7~n( zX~pLnTJgD-R($5riqCbl;&VN%_{^mhpLw+6a|5mT+(;`vH_?jE&7~*0*6Z|JO6U6t zRpnfKTU}R*O4t3Z{5g0Xc$?dG-QUiBT=#dlUDy4c?4PGCmsVcna<|)&7x&o6i+gS4 z#eFvN;(i->@qmrIm~SI59<-4c581QzRhEbCVOhGrU>_S^U>_5H)E*js%pMYc+#Vc$ z!X6ZU(jFLo${rAY+U_5I#_kt>*6tf#X!i*}XZH>-vU`OW+dcntr8rXjpLcsMivJ6= z;{PJ8_`gIe{x8$>bjyKO{9mON|JP{6|8-jNUrHF-ivMS{;{Q3V_3XKh;ZvktBKS+{g8 z*9-C=tlyKcwOi076z;<=TLcy4VYp4-@n=eD-u zxt;4=6wmExW+?>pPb_g!qovzR9x7De;5o7;0i{_pN~tPgwG zn6Eu;%-3Ev=4)>o^RDuIv3^l5v4bAJw12m-&?S! zg^#kQhL5(Vgj?E^!>#N|;nw!Va2tC@);f6kNoIpBR@LX$dAr8 z@}rB5{OD>UKf2k-kM1_|qlbSyD6>TlzE z8en669B3;)2D#2f`7xMQehi_NA46&7$1$|><5*hxF^pDz97iiZhSSQA<7wr`3AFO# zL|XZA60Q6=nO1(BLMuN`E#2PrSbZe0bpCsXs&X!h->1y;qIKtV{v513XSf}Cb*7EH zI?G00ooyqp&ashK6&raq!bV<=w2@cm+Q_T(Y~|G`*SRRKayj4a%B#`rUzmIKD{T>bb;ai-!orP|dGMEs_@9r2rHBYx9u#BYX;_|3Etzgafo zce#!DU11}Bvu(xiO4qq4epk_07p`{u!hE9M%G!wEwKn26$430Fvk|}RZNzV`t$5`! z&qlm%uoa(NZnPDjn_TCj_}pB2qU+ANL-!NxPT^baj^W$v4&mGF_TfA1cHukiw&A<% zHsQPN*5P~XR^fZ?mij9K*D5{hRM+*Zt5p1+dwJ}&k^uJ&uZ(p<@jsBNxhs@LsLIC`r^Nb_UU$3lU@7~R2XD~GgE!rdJb24S9=vTM58knn z2k+X*gZFIY!TUDy-~$_Z@S&|d$mJt@SeEWH*tlQyiG57;e`@1?)o1pQ=>Oav9R9){ z6#ml2{i?6*0nz`pt@{7M%hM)~tg<*Zv!cQt;{D z=9GN;r#Tg$UNxuY)8EW#`1BWZT0XsEPRFM|CVn0~i%56bxeW0W|Hnn=GOEuzmr4Dd zbD7oWoXes<>s(g#Y3FWGpG?H!zo8S(-S{8y!ty4{?YPq&)$@#&Z5{CxU_`4&F?++2W9H<}Cb>3Zi1u@7ynbGL@) z>_2htHuXo&6;`iyu84Z2bGNIPJ9meAsdGiu9~$oD)5Yey_;jJU7@xjxF3zX(%q95r zJ@egs`i}V?K7Gr4FQ3jam*mqo%%%8rrnxkqzGl9UPp6y9@aa_Z{d_vvT$WEKn#=L& zcyoC^ebxK`pN=s<$fu*s75H?7xgwtqGgso%A?C__I>=mwPb24&*oQX2xrf4Y_Whiz zs_x_5!|GnnRa5tHuDZILb2ZdmoO?vw$xxF|UozL?(-+LO`Sf{n9X{<~ew0t2H9y9u zPn+xV>67Ni`LvyL^=!zwHqO;ow{ot5x`lHM)yKo*WDRfD)RgQm$6GlU z|B#6dnzM^{Dq;N9&c#7UxFx$dXbIy-O&09dv?K?+mMocV>7+OWe3ob^k|)A*_9!X| znR6(@r*s}gD3cjVJM&2g#R zVSbTMiO;P`w$LFbPoTe6)yrlbvFnTkhPuI=?j8DcQLXotvxki<6zgsDL>wLfzH2|>=GgV zVHWB9Tgfgd;+_3K=ii8bBpaFO+!9k9eC&d?>CP=P#mU7kTbu0M3R7G>2$8k%&aE=V z#fgww8{^y>Q(XK=fwd9NeQb&ku20lMocmNg$T_6gTIAe1lXC;&lCTjhA{$K3^+|Tg z=v*)7KGXRg$u2SD#c-3(cS&|h8ZWk+b^fJfm$32qev8gOpX`!1KHqQC`Dc?|BF87A z9XkJHvPYjkI!ZkIA_f&fyTY*37wmrugKJL*7~==k}Z8iw7(b*6KNT z&=jBV52+t@?y$PHb4S!QojajbJx^(9(m(OiQB)vFOL3r3Xen= z#ogmoZ}6j$<9uCn3i>f~O8QZAD!Pt2HC@}BhOT8!OV>1~qaQJ+r)!uq(ACWu>1yUo z^uy-NbX9W}`XO^xI>~$kje^O>K7>vR>PC*E;NtDVycF0?oX7U@c6ge@Z=W;qel9!v zi+K2!i$;OR+l4_X=seeLA6}o=zu(RN_4&MWvzG%XKP?B?E%bNcFT&+7EeB{pS`Oes zv>f2K(r5AT?KWB#h{CiiAVug?xQD_%G{ol)HTJit8u7VPjriQ9#{LykWB-b)v417h z*uT5g*uQ(!*uQ(#*uRo$>|ZH0_OG=1lymoa-_HJI*iq-o&{%-)_w(_LEYM{+js-m4 zF1$<@?DCw)_VIRj`heg5LH~Z`*dGfobglx86GpsU7?u-6rR%m2udm|Yud;uAl6OV+ zazc8DmJ?Q0T25#W({jSAM#~AYIxQ#68nm2HAED)hTa%U(axEGs>^LrX8aKqJ4#z{o z{ywTkd>&IHK6TaDzsJ?szj|uyUwt+9uYnr-*HDf9Yox~hHCAK)ny9gVP1V@HX5O{g z?+!cWTyuIIE>m>-wKm_9w(;Ay_3zh;{U>2-Tt3ks`|ZLo z-aoV_uiHMn{%QYyPx;qBZ7v44Hl*uQ>i>|cNHm)U<9b_^c}=(f(iqT9Ez`H17K{dQp(+lMxY^VmM#4o?UB z?T7gH8_2#@*xI?FbW6Wo7{>dDHvGEn!|O--_Z#6~KgxR;`xfEVxK}_oH;1{hOi2{=KHg z{=Kfo{>@Zl|7NMNe{ZOQAkevAC;Kk#0_ z{=x8S94hn!<|TA_^HRE;c^O^Syqvz@yn-%cUP<3)UPYHSuck}k9t!)=5TB3K*x!%U zh|ec##OG5r_HV5k`?pSw{adfb{%ufW|2C?zf1jzbf1j(df1A|Uzc19-zs=rj*q01D zihBigLFcyU_62NyE5~o~+l66lAKG@#%l0{FTyO65@0XpuT#xUf*+(ZTu&dSPucb65&9$^z8$5%#w8*9&`#Lr zyazab9G8@Sz8w2wVMDx>K8pYO`FIBJ7uqR~prr-e>-S-fRAm-ecEeKhbhM zc7MP9LM|pq4RjZ(EjAO zykGqH%KYDc{vZGPzu4pNyXO5n`;WrA;X4BK8gu;ngVp8~xWAy+V=3vCHlK=KVNOji zH>aVOnbXos&FSbRcD<6Gmg|)abX)vdgnejj@Erl~aQ%_lJ6u16O|x*kCBAc@ugCj^ zb_2)pe%W-sb(qEHdXBd;$A7QP-{j|W_}6D=kH0UccUJZ-?De_m=H}dVGjkpq=kw5R zrke!eb`{;&oR4l~&QCWq-$FMq7oh9g`Me-4=kr2z6?{j4eQ1?&o6S3%p9_13^K00& z2*)cp7r##!#QTMI2j}sAMg8mJ@k(J9_W?Lw(H#H1GGENk7x%Bfi#`6n65hA7$N4U_ zyJ?*7Lc52?`7X43X*r*lq;b9rtrU&(U1+6gobN)rkH+~fv@$f#ccI-+%lWP>E$6#( zbV2+`>_aPnAK7~ckI!4MOWxb){9!}vI>+;27xeXbztAdi9Pd|I=kb1_RpGe2U;OvV z{6l`es(*bFd;EP5dsk$iH@q8e&CxfTtJ8VRHR#;tN9bJUnsiQcEjov}Hhq)14xQcn zD19S-XTUgxhW)S0@zAh;kE^l&_0-tE`fBW712y)qp&I+wNR9n#tj7K|QDgs_sdI9a=j&QxLu@K;s^D zXiw4^g794d8uzS2dzwxcgzpN_xQ89uvvisud{=-@ZSFv)GCxQ2;oI|kF1hU}bC`+Z z&2WGAMIFce$CuQ&9(-Ai>%mTHTn~0u<9e`*8rOqe)wmw)rpEPPcQvjDd#G_e*i((` z!Cq=y5B64{#^-O@36MRQN{jqZs)F0v3yy`Xhj(~bKz9XPsh0p)$mH7OxUV+d5>gD+SuU>}F|LUdq z{I6bu&;RNV@%dlf7N7ssZSeVD-5Q_&)vfUPU)>U)|J5yUy`XN6&-3bL`24SKiqG@v zCisqkx-mZAsvF_+yt<)t^VJRTxnEu1xdrNa_>O?OigSz9mGK<`btUIMP*=nyr@8_z zN!1VHl2!cxE@{=}amlMLhf890SzI!!@5d#zx(sfCs_(-kxw1bK>*AItM<_ zt8c>Re|2_zM?ig}bKBI}aK2aL{I^4m^WRQ2&VReqIREWd@30!rkEn6}JF3P>?*;E;zJDn`5nlIu%UAp$y5rog zzxf2+*Zeiz+kBGlX+A}FH=m}vn$OUk&1dO*aVvy_HnlI8t&6nsq%$Mog%|FmZ%si>F zG#rPa{l;+|ccJ}G<2VcLDvjeRv_EJZN1^>mM8)6tUO^fdCDL0>QV&B$@dZzfvu zo0*pUW}zj&S?R)f_;v$*TM#a>Y02-6bRm57U>_Rtdy^XZ&7nqqbE=WwTx#Suw;K7) zqegyj_Wp}`!aC?%6!$p3^Xc{j@X=0l`KG;Py>g85-FN8>q`YEjL zfe@=vPq(YPAw_D`S5b8r9KveU+g<8TC=oU4ueciZS3-^YyIYO=yGM=syH}0+E2&2P zl~SYrN~=+S_o-2TWz?v@`_-twvg*S49IrtZun&U?*iSr=Q-vM#oyWnFAV%evT_mUXcWT^JAF z+S0Nvwxdy(p*=wt!oMkC9~$cNDK+ZwX*KHZ88zzcSvBgay&84YL5;e3PJR8li2Qfd z`B!nhq((iws2+p+1!~mC%j!|MU!X?4bXJeR{Q@=Wr>lAx?iZ+0PudV=>W3@%lLXzrpBr zZa2$3lYZSii=JVAgPvxdO;0h;p(mN&q$il)qEVM`vk&*}P?zr{91rq(F|>C%F6-iZ zw5*GBY1HLBeLd=OzFOAB_c<=>;sRRM#f7x2i;HMk7Z=lo@$l^fTGqu6X;~MS(1q~u zZ7GeqT*f{$)M04LIgYvuZ3T@w3vDHhx(aO-jXDZ#HI2GSb{@2RyN@_;`F0=EmT&h7 zZTWVe(w1+xmbQGmb+qN%t*4Ri4eUchzC+u{apXI+&uGc_=d|Q|6D|4vf=0eKvkwjV z{!%UZ-okOo_f}f+y^WT9Z>J^SJLtlA__mXleD9(q-@EBTxR1a-G~|1)8u{L*M!vsN zBj5Yg$oBy?@_kT^d>`^&!~Xj7Ao71iw|@io5!9%MW9pgsJgG)~oKU}p?+>U^FDKR0 z@zn)2>gTk2D!%%lMm?QXPsaTMHR|h}dLr%@s8Mgt+uOr=IWr&i-Um`08I zPOHXwFr6Cpo?eaf;9>vsW$@!k^5fxY;yj4D$e5Uq;%U@HCVd}Smojr4b&*BqQ5RX& zsEg2U;5h0cv~0AjOE=Q8E@h`>UAl>ubtwm31P|YG(uIR?KY^BYDK{95n5py zbrD(-8g&uc?KJ8lv^!|jMQBB7TL;~pH0r{)DDIK}VjK?*`7f?U{!6Hl|GU-5|2=Bt z|6VonUs8?ymr^7DrPav)eQM;tjJgQ!BdC%8vTEeNoErHruP%i92x{d2K{fJUL5=)Z zR3rbD)X0BjHS%9Yjr=F6k^hI($bVHe^8c_J`LCu%{;R8z{~BuK{}DCvUsH|z-{oD) z_jP^l>*#v+YA6W&lO}FA)?9}kV}6t#ZGMa%Wv)w)G(S#{FxR7po9olV%nj(F=7#hT zb0d1NxiLM++=L!zZc0byX0%+NHK%bLgw}$Vb+9Fk;~=zFG>(JT>_fwG&_*rmU|WvM zI@peub?^yV*1;!fSqGn@i{Rng)3mIE&(N|CK1<6w*q$zghi@Hd90$*_56#xS#Cc*| zm~c@?ZfEroy+ETreEs}4|5D=pqWG1Vf7$y*_EtYpC)(;KieG2-7IpF4clEE2-w)e& z^X|;v>MiO{TfId+XsfrVCvEi>^`fobqTaOCThxahf=8l7eQB$=s2^?h7WJpC-l73? zg!>5WL*+@mMI4uU8%RsN4WdzRgA=a@&`@tf)Tp=6hH_l$Z5S=}Hk_7v8$nCGjijaC zM$tv^@NG0L^)`lVk(Tr5BwEg!lW93m zPNC(zIF-gaIE_78xG&H#!Q{U;8i4x?e!FL?Pyc(m|K>kPynhsLhxre^7qdsaeT$aRh<9-O zI&2@j%x}Nkzdjzv_A9)XvPXV=i&oOePw*-l`3YW4BR|1wXyhmOBO3V${+LF7fKR5 zTRh!98u1M6D;n|K&ptGZr#rxL#PgueBc6xU63@dNw|aL+XvFiVZjX2#^WMx}*4^W@ zth*;@S$DstW!*hV7s11~Q?#tRr)gPt&(N~&o}~-n;oCPf*4=aLL%StB!Tkh|=QDpt z%ewnLE$i-iTGrhQG}hfXPIwwO#Oo5rLzDWu%yFr|xK3sMM?e3QfBg^aC7xGkiRaI> z#Pb(g;`u9G1P|YSqa~id(-P0Cw8Zldx)AOoun!IK{7a2^{;ftl|4}2J*VKsTziP%a zO?Y$kh-V7#i`)+9v6SAmQzf>OQz-`$_p{$f%+aa1{Va28dZsxI{kl0V{hB!)J;R)y zo^H-SPcvtvruEdBy(1JqWK1Tf;k&K9-&|#);%28*%OWjWu0_4aU92W z4xN|dIw!|br@3?pDq%FOCMV5umCM}Sdf-F zEJPQ=5PP(6pQ}lNE+OVKsVrRnPC`{-)sGW5gd z`{}CYvh+jda&(fpJY5C%7ubhZDLlb_298%SSD+s-SES3CE7A9xE7SLxQ}XynypuQ{ zn#B7djw9Yxbsq5!?O~2fysOa?@9H$-T|-|l@qUEk67QO{#Jd(<9S`4X(-QAGw8Z;S zTH^f}E%B~PCt;V_hlY69QzPE>)rfZkHR9b+jd(XwBi@bGh<6k3D(p+4jv4pR5dUVn zJ>uV7jrg}vBmOPbh<__J;@?`0__t9b{%zHWe>*ke|Ae|azK@_r{GU=I{!gnB|7X;Q z|Fh~Od>=uL_;*lO#(e}e^6|X7BEFBHMt)vUKZx%msFAOi)a7v>L5=)%QkTV&q((lw zsLNmpQzO6K)TOcHsgds<>XP{PE8abQj}XMIW9qnz;y=g6f5w0G;&!9Wz3GwWKJ;*N zUwWvyA3fOIpB`u)K%)*rdxb_FM(jgF9fmfL%eGQ)`2m!tOH}|B;0~x9~zP{PK~-7uST6sP@}FUs!>Oi z)To=uYShUTHR__zb?d-1ok#qqs}cVhYQ+CFHRAue8u6d0M*L@~5&t*Ti2rOg;y*`S z9rqE`i2qw^#Q$wI;{T2s@qbsHg!>3;#DA_D>%crU@-bhHb>Mw9^0PpVbzq?y`C6pL zI3cZ@S(VXStPc#XO3X(b zcb~>FRj+wt<%8aw9Fr ze> zTH<|#mUthfCEmyAB-~G99~$C)LXCKTtwy|0suAx~YQ+1r8u31(M!e5@?_iJPBH8y0 zQ19n-d&K`+HRAuB8u9;LjrgBeBmNiEi2p@3;(tkv_+M5d{y(Uz<9iBf#Q!HX;(tYr z`2Va%{C`m=;d=^d#Q!%n*1_M^$j4PR*124k*~kiSO@=6BY)S_SO@=A z^Y}>{iUmE^!4zubH>DcuU@A58om!1`@EiaBY5X`sqAhV>7LRqvv>caNXgMyk(lx@?_?{SD-JFe<J|M#eC;MTet`7fzP{!6Km|I%vY|2{SHUq+pTkgAdYvTEeN zoErHruSWhKP$T~js*(Q+YUID78u_oJM*b_Sk^d@cFAo zf4g^e-`DWDe<5}JB#QqWi$hUgH^y&-?h%fUHrJ#_nQPG_&9&(f<~sCn^P}`I^JDZ- zb6t9f`Eh!%xgI^pT%R6jZa_!ohBWFvv_`aCk2I!b-D^Ul?n7%zqwbrr4-IwSTrKNf z3y#aW*OHcXuN7S*yb7dzD!&GqfWHtKk7_d{-Z8*g!>EZLnTQ5yK!9d-<_8H_n?vgo{853Xvlvr zHS!-?Z;ng;`_PjAzO>}OA6)|v-}=*%{{eKhFpKXi(31a%mi!N-CI5ryB!rB8XmZ{g z!g1t(sGom^y`0yE(Q+OePRn^~1TE*Ok+htbM$uS@Mzco?pUWC0nEdzNl9w^u9(ft- zUl(tVyu7NG<8mCwk(U|1F2nDg{a+JJ;Cx^Ez4Ju6kDoYrNlKFYCZZw5$Uk(>3t$?Gswoflp~!2iDTE4y>bP9avAxIBZQzM?o)rjW_?=9@Hu72&^Gfm=$$mV~R8xxLVqRvn1``EhSPH`M{ zep=_H&d+chc{{7~$jdis$=f-OOJ2UEB`@F6HSqB5ds^~xo|e2^pd~LCY01kaTJmz4 zPQv{K_GsZgK$8T+pNa1|VSV#0`jOk)dKUde+xit

4g2ex_}GihiMOJ&Jy%ZT*RU zqhH2-2liooeJOq(|8bS$FXFyKVm>kP0`5DgJKFYtar}An-}H0lf9MY8Yjk__zx1=_ z_`i|$j5$R*Jb&7pl77mZihk0ZntsBZhHht0OSi@S1@@t}4o~p!BskvEoPlm`&PX>k zXQCUMGt&*tDO2G&;+>V_p-H@N;5g!)P3IBs&~D_o#5+4J@xF;hymRR5CEhtX-X0I% za?uj++_c0y4=wS&nU;9xr6u0^Xo+`zx*fi^z&H6+avx()rkL{YQ+C8HR4}PjrbQ=BmO1Si2vQ{_PD>GM*Qzp zBmO1Th<_g_}`~S{L84@;S{Jw{L8A_;FPIGKFX_G;S{SzejZe}z$sacd{tC8 z!==0$`KzqPB3MO@d?u-JQT~t``K_vMfTdWCd{b}P*QCs4B=wtsIifVAX z(dI|!k>;B8aC0qssJS*h*j$GmXnvGN9r_kMMxzevvJdMp>M*p&IWFr!JzCa*`n0SA z4QN?+8`824G@{$%;ag)`)`2FptOHGHSqGZYvJNz-WgTci%R11KZinwLun!G&*jkOc zYokV;wN<08+Nn`TPpDBhPpVNTPpMHC$$k%u_&=lbi2t){#J{~7@$aBU{GU@J{?Drs z|BhEcI%{|0Olz|FRnK@1#cjJF5}@E^5TTtGXRNs;CkF?rN+9J=DlYPc_zo zUTWm0w;Jm}A2ssTSB-U`pBnk=uf{qsK#hF9qQ*K9sgd7-YODi;)X4W>HP(Try@&Yz z@lcD2KJj~4)Wy)md=w9(E{5s*$ZFZG!qt$X;j^TKFJbW8V z%W?TCEyv|JT8_){v>cZcXgMw?(sEo*qTAu&+hiJbF@=3-sDsd^avXUNZ5oX{hc=x? zUPGHfBaflIMk8;@eh+Kwpqt5g#5=TEw8Z-jTH-yMmUz#h5$`wIhlY5+rIvWV&GGhO zLwt{cmUzEQOT6EsCEjysiT6BO;ys_1c)w4#!}k~1hlY4BR3qMt)QI8BUuTcwBH8a@5&z}7J>tJYjrgxrBmS$@i2rIe;=e|X_?~_6;Ci=whVNn-f zCFY}e7{9Y z9G73ya$KII<+waW%W-*{Zik0&XK2(#XlH5E#W(CjlXdYN$59udeM`$a`5leA2<>}X z*3I)Y>LRoYw5+2SX}MW&iI#QsGL5LRqCXw*e$S7_9QZ&BPM|G#iN zH01wRHS+(P8u|ZSjr?C#BmaM>k^eu{$p2sJ_W1sS8u|Z6jr?CzBme)Zng8_Rhewb6 zr%)sRDb?-p{RK7hpIVLlr%@ySY1PPoIyLg2UXA=`P$T~t)yRJ)HS(WXjr?a(BmY^| z$o~y$rsN%-(IdNDQr9P{5`l!M!iHs_>AnRC%2&AI6j z<~;Op^Ud@yb6$F=IUha5oSz)2MUb zq9Qcv{C4)?ol)m^BpeUwI#-nA9TLO*^G>?G`7T=4xni`ebH(YWZTk|mtaEqMvd-N@ z%Q|;2-3|}mO46wFQtU%RUBApcqt#(=`F8hlJLKE{p4FA1E#K~b+Vbtn(j)NjtsHIn zcI9cyw|juLe7gtfA$a&!fwp|RinQh1RiZ85t}-3r{sQ~ZEZ;7P!i<+vYB%W*%3Zik0& zV`&`sud)wKu3yJ-T#ozkv>f*nXgTgD(sJBSqH$2ial+HMAzo8B9-7qKRE}G{CF)Y< zr~COC{`J$?cL;BUe-A-RJYT0Jo-=8Q=PX*{`35cVoJ~tS=g<<*H|ch`kH9`O#Pe-6 z;`xpm@qAZ}c)q7bJm;zr&v|OZbH4Xv_E<;X_pa{O(QN)_N%nhH)cFE^A6X|BavXKO zNav-_7jqnW`#|TBmk-sFw|GsoXIe@V|bZ=vUzx6*UX+vxYq+v#`B zJLq@JJL$L0yXd#fyXiO0d+0g1@4!B^SMcv6)cwN*?lW-Q&M$61ZRZnrfVT68J4oC4 z!X2VmT=IR6mVAFpBj4ZY>*s}6 z`$u{X{(S`d(2(ydYUKN8HS+z78u|WJjeP&6M!tVnBi~oO53`r^fBYjL|9|TCYjJ-; zje7W7{R!?bs8Ju+)F0vgf|~0?hOo2fSL6PI8ugP>y%P5q)TpP_>gBkpv7 zIn=1XoNClxE;Z^ew;J`AM?DAs2@*BxFRyw4{*xtY)MI{iKYUL?jruI0#`SJNHR`pH z8rQqGs!_kUsd2qqSdDrvqQ>>^?P}Eb9coG=6mS<=6mUV=92Utb18ZkzQ4dew4LDzzQ@4v9r*r& z&Tq%}7u4Hq`?4J0YA#1_F_)*mG(SLZHa|#zVXi=LGFPNOH&>!RGgqcJnyb(o%t`cm z^FuW1GPJ5R>hNLqp`q?VtHyEES!mU1)KzFTXw*??kI<-_WamNTyB5boL%wUPk?%Tc zqrMcN<#r-IiXDOH%fsA>U7^k?$wf$oErfW7e7ELd8*tVX@OtUiQ~ z-fGlOXY~PmbXTLEx~jjzM}IZytGjwHZXKvmZ#~t!aqB^i`s=Mm{q<3!{`#s>fBn>` zzy4~}-vIR%9MWpkU!+F;4OFB42B}ehgVm_NA!^j$P&Mjrm>TsrT)iH*T-B(*k!qX= zN2yVdqt!SMj!~mN$EtB2d{vEl9jC^5aJ(AzJ3)=};6yd*d6F9E!O3dW_Y^hGgHzS0 z_i1XJ2RnFA_kBlyK=>~4dGKHSyNtwq6pvrCjb7vUKltw~_}9lXf8)Qip#ICYpT+S% z&2P|um}k>h&2#AA&2Q4bnct#+HNQ>&Vt$AI+59ej#rz)qlX))vqj?_v13p5q5A9NT zf{z*;zhGWKe{Wt$e`{Vue`8)upD}+xpTd0v_MxFJLtDae)ZtQ{N8N?CjN_=Y(3aDv ztI$@^sH4zU(x{uz;=D<|R&!qRwT4E%K4Kr5`P!;RzP726ukC8&Ylj;7 z+NnmqcBzrC-D>1(j~e;f>%EFSj%(kdxX1DOm2QvYb-xj5>6*Mn;0|BxE_KdeUn zkEoIVqiW>;m>T&%u15Y(sFDA#)yV%zHS&K-jr^ZhBmZaA$p2aOWgK#9)WbRTMI54P z)W>(~^Ejl{-{Zfhp#Ba^fEx94QGE{oJq0!D>9YDPmKZha>qqrzl)M`Cc13*>hm0Ea z_lp|mv0v4w$KTXAkNvJjeO^`LJobkg_4=n8=dr)ksNcWUIFJ3KMm=9s<2?4Sn(I-< zxRMip;5?Q>je1Y1#(8X?e|;)HUQ}Mrr}6!z#kh~4uUljrrQ!HOb6R?VIUW7JIXyk! zoPnNa&PdNSXQJOTXQtmZXQAIQXQkgZ-$1`*&PKm!zLB1T`v~kq8;AP{>ak%0_Y*ii z+MJUfY0gCtH|M5@n)A?u%{S8nue+~+zds+xLwgnX5!Cp3Zc&fHeFQarzJlsexR0R5 z&wHzS1nwiK@$(l}55s)~HTLIr^$^@gP-Fj!st4gdf*SjKmpa0I6z^ibzrGIZvrchc zU0h$cBLoQl9)jbzt`6;PdRq|wJp{egd@sGlT$09hb!esN%|W=IK;ybPwEO5yLAakl zNYoYpL<`*H)tr z>!`6mkE&6JkEyYLb=9cD$JN;1dg=)O4#c~@@9z!!p6H|adLGvW4HENs1sbjkLTkuz zTo;7ah?eWZ#6)o3=t!cR~Y(vX+ zVOtv41?|{}_56xWAzO2KN`#XK;T(eF~qi)N&j=$8pr% z^ExlbK}U|uaqt2y$H9xV90xDaavZ!&qb`zN7vS&j%<<509CT6R=jp1(anMbTpRc=m z6z(sm@$>dn<2dN0#?RkdjpLw?8vD~%jpLx78vEB@jpJZ|8vFZ-I>P-G@5uLM{l3S) zM+|XT6rLbt+-{+H5WT=Wn0_Dk9oUC9AE{8!3lq5i!11}};q-gv5%jy}k@P#}QS{s9 z(ezv9G4z|}vGg2VFQ>OL{(ddAahzWqg!>P)?EeH>_J1NR`#*_(ANL*Dhlc&1qL%%i z%5mBMX|(MBbXxX*1}*#l8ZG<(IxYJ@la~FTMbE*1$B{kStL$gTPvbvk)3eNT=-16} z(lg9&(bLRt(^Jgv(38yX(i5(`4#M9*m*b(mflEa-exCX2nFz5OKi>lNYe%P@g|C&BT}=1=J_%xmdQ=5_Sv=JoVv<_+{l^G14u z`7?Sw&cnKYyKH_F=W*Q^+84C!|7KeD|4Ul-e+#`8|6?B-_J5mN_J2FaW&d~3vj012 z+5cU%?Eh|B_J0p8`@ff#{ohBg$9)I(XrHr}<8D7K$K3&1j=O`j9CwFkIqnYAa@-xE z<+wXaqYjf@_u=n9&hgN2+?`P4=lNQVIPT7>@$-M9#&LH} zjs5vnjpOb+HTLg&HIBRUYV7X?HIBPu-WPpe!|!{1pEktdpYR0tCAghj=Ut}%viToq zT<87h-!Go|1D9HAxz4-7@!#$BKhwXNf1!Uh|4Pes-f#5JHvc;<*LhdzpKSgQ8rOND z{Yn3TU0@&DrSJs1%JB>4f9UVc*XVD}|I*)>T_!v~V@^Szy6*Rg|Jr;i&f_{ewA8f3 zKMgJMPfJVu)6s~3diJ3q{u$H~|BM`$_-CRe{+Vfse->KepOu#Q-#|k^a^u1dYQRAy#)XL1@@uIb?<{5m+RgN^rEmK{yPjbu6rx#>lfg^ zzo35Kwy(nR`Q{{Au6rM%=h}Q#`aSc*^tG53RRhRP;ug7WmoKTNmfbTD`4-N5Zpq6+wR{5(C_+>st+ zet{lnevyvw{RQ@D#dsb>E12lQpHcih;kEc)Lt?uqp1^S)S|^Tw5`_B?v>f+c=#Ol^ zD=q6lH+r?rcc*1t=s~Zv`JS|_6TRr=Hs70;b)yfx)aLurSVuzZM}HWE`wukMl>zKS z!*TM8T8@*5<2X(R>ihzPUM=g&V2;aiGK7}nWGF4i$uL@uli{=+CnIP%PDavloQ$I7 zI2ldLaWaOUgQbBz+RYgfebYqe;&s43*Y)64ZodFakbhmgiR|Zij>~?gW_=-U6Lou8 zUnX&ULHsW9ACqZ$zbQ1{Zz_AV3G5|q(`bp?bXwvzgO<3xMoZjYrzLJPX^Gn`TH^Kw zJqHipX49|W(u93z@SEOq*bfL3cx$@9zdsX(5ubPT^@z{A>IK*(wZvyG#}S|S`}N`d zP{*Oo=RE58eSJOtz6EN@$3l)T2(L!!X#9PPb$k4MA9&AWFZuY8mV7LsB_B&^$;UEU z^0Azje5{})A1i6e$0~XbmJ;@%A^vN;L;OF|dBp!?@3-0a3opcyM)x&``L+6b%&+tQ zlzkuDem&jWyn*gz-bnW}e@6E(e@=HdZ=$=Izo5ICH`86rU(%iJ_zd|ATWsaL98cS5 z$=`Nb^0$MQ{OzPAf4gYO-)>s+w}+Pe?WHAu`{+5izra2;7k*`yMNf>u>aZi{c*nPs8!hkpHx5>pG%GW=T;;CdDO`N&1&R7uNwK!r$+ws ztC9a()X0AUHS%9jjrj{g?66 z_>YqGW^*a}3v+3DlleaSb8{K`GxPoQMsrzugSi~N-dvu>`6lkeF#I6L(H>wg*KZYQ zoPXk;8?eEZ{Pq>u%k@NM8t0?<_X6CO4e?C!+sEFH-|MRp&#G#P=ffOFJWDa}q3sG! z@cjnPqaJGL>+$zJqTX)X*W|e5uNIBJueNTFzpsvWHTIIfM`_95W3=S2E-m?coR<96 zqa}a!X~|y$TJqPBUXO=wjcCbNV_NdnghqZsYf2+O&De*A{51Ej!XDQPp|zmpdaflc z*K@6Cxt?oH%k^9vTCV5X(sDi5j+X1WCuq5zdy>ZWLO5PSelQxhr#Uaj*E6)_=UH0v z)1H?6bf6_a&(V^f=V{4LM_Tgp0xkJ@kzSAc3+zKfeqL51Kb_RbPiHmq(?yN^bX6li z-PFiWcQx|ULyi3OR3ksV)W}b7HS*I(jr{agBR~Dr$WMRor`Y3qZh&|A{(XfI3*Xn` z@p~MltM7BvHi|fY*gTLvXdXoGHxH)wnTODOaO;SDXtGWW& zp<$gErIvMRG{;egp^c%X4#(0`hp*C7hvR6e!|}A#;RIUha3U>rIEj`zoJ_CB!?!8) zI9#H!4-KB?J(c~dVFH(a^jP1AVfZz_{S5YF!UQfY>Cxsp=Z9XmWdY_hfEuayf&=%5T z@R5XlXo$~Z@0sjJg$dkGphucNq(_*S(8JA3>0#z&^icD1dWd-iJ=naG9%Noc4>Yf) zBiu(|k5)fpqA!@}!k*tet241(6i?u|4((%(9}B|$1zL{tPw68zzm}HcejRil;9Cg_fF9C10W^N7m{wZ!FXj!Rrl z(h`?bw8Z5!Epa(ROI*&<5|?jiiOV@!;_@xM9uMEXqh|-6zDU1r zzC^!fzD&Lc2mw3BvDnX?#8l?H77d5biI~_h- zDmC(-T8;dtQ6v9p)yRK3^?KZ2P$T~t)X0BEHS(WHjr?a;BmY^{$bVKf@_&OG`Ol_C z{%=$x|Jl{Z|4nM-KZhFmZKbE%R4+-l_is?SFrKmL$UN7_u1m%nh|L0|W$&E@6z zALe}YRdas&ck?auZ{`B@ujYdEFXlq@&*od{E9TqipUj2nAMw2g_M!a{p5Xfp9KVG7 z3p#(n=8JOtd-I+2x8}R(Z_LH$Gv?y-DRT*W9KPqkJ~a3q@4MN{b;!N+Sln;$^YKgI zQr;!mkBLX)KT6Z1|8vxU=lOrkWjO!0`FplFur%u5A9NTf`323ami;jTJl+)mVDNr zC7+MblFynn@>z?0Xvkk}@9XndhvQ?zhWPgrH1hSB-#(r}zUq44$9|M;|2RF;T#uIP z$olkfn{Pl5GdHA%nj6tW%#G>6<|edUM>eGg+I%xQ!uU_wxL6)>R9? zT|6Y~L`#m#I?;-jx@t{JUA3X5uG-R4SM6x2t0!owt0!rxtEXtGtEXwHt7mA`RcO!B zm+|i=*oSs0Ji))8;P^%JbF|dm^YnR}??_7>zCeFx^Doj;moL%hZ2o0h>a-Jm*5*6Y zQny{`(>C9gmUXEceG>N_*rSE-iG|XO*Nw0~*!tmmaC_vrr+;1iV##waj!T|<(~{>t zwB)%jEqU%oOP>4FlIH=mvw zW*-{jGevy~m!fLKXPO%EnXX2BW~dRL*VKs5>)s>TOI^;Salb9@!_Nc1;kTc~9{1lu zn@#t}rMsVx$Kf}<=dkY=w#KjQ=)Sma%w%IcU%sQSmwJDfH|=litea{(>+Tu4hk7txZ>#kAz}16uO=AuaX3ghoD>vJdT2c!IASaa`(sIW6_R zf|hz;NlU%2qLI%yPIwwO#A^-5LzDXdh~u(8d`wHcKA|OEpVAVqwY0=*9WC)%PfNTu z&=RkWw8ZN(8u9v^eQ1c+CiNwJ6jdW$o7ITdmukdoiyHCTsz$uFd9P-V`roeesP`S- z*Vp?_j-$Rq+eM?kckAm>-+R2@X5S}ljawyjZ}UF7m-#EYr+Gi!!+e15ZazqNGasV6 znh(=m%tz?XcK!&*Q`q7d=cPW5(^4NNXsM5{X{nEswA9BbTI%C8E%kARmijnLOMQGp z%kgxMMtywCKD0~W2~O!8m*eSsT8^jlv>Z;G>r^VW)M09lOC6@6 zr4G~5Qithisl)WN)L{l%>M$cMb(o2kI?PN<9cH0tHiO!JNO z>*nnAYv!Bi8Ri`HbaPI6nmHFe)tsB2V$MTPHs4H7GUufyn)A^U%=zi@I5aZY827&b z$3w&Z7gS^a3#qaHx2m!Kx2duJh1J;qB5Lga?P~1*9ct`T8m1HtUGRag{l2kJ3y-YI4kxC|+VlxHj-Jpu}u9YGGSAh{Wp_2m*(;} zd8?dX^}MUAvny7*OrtB|;i2cm;FE$+4sIi}|F$yWX)^itGS{brOxQ8Flg#|HWx^i? zpBsFB@C7pG-c6={JDKnznf>;Zxz@d8!rn6V`pASo4ZbwkcZmHBP@a0NWx^|D@`Gf; ztAnqRIiDdi@5iAs;V_x{H^_uH$?SK8%=lI^;Vr?p%8b8VCcG>79+~}*mXFBeF*4y; znfl{o_B%ef^88df89z~Z!UuvM3Z5)8e~L`_NbsXF=QCYq{tTILrc8cT@`SSYu@sFg zJ-3W1!Q<&=VA=co6m>8CWEoXH8(Rb~3|?@P+ClmIU zsXriiVDJ^egMzP0u727-n^&Y=TzS#a-)}Hwi0Uxxy5Q?&!W(4n+i;orBZ5cDgrj7} z-xhpF@Lj?8$XvJ4GT|_p`uEA~cbv?9880(_f=oD3CjUV2L%|Qr>~D%pI8>(IqrtzC z+3yUQ&%K#4^JmF~vt{yggCCdK-+YM~ODfc{m13ssQyo(O3I1>J*D~QDnfn}nu~5rzygXf7W@;Uo>2-sT z4n8Khfy{H#P^SJt?T`A$D$jnK$b?O0#y68G)m$e3gWw+qpD44xRx5|U&$ae4<2%Td?kJN#EBG9l{dJa^zd!8neC4TkLGXn#`|U3CKJOtjzo*Pny=3wi z%j~aDu$VEwpF48*m&p&1b7q$ZUm5)K;HzcMZ?H`LAu{!c%H)U1 zk)qb6Z%$F8(zldR<qk0M2*{sQIM z-$I$^e38tw#WM4k$mE|5{!{Ss;G-x~G{Tk2v!7Km_i?q%v^6sG*UIEy2woT5JgmP# zdDh=3^Et3drv7G`Qd?y5TZ2u;zv7PEuLr*w{C4m=!8>H?@02;8T{8LIa?WgzOnz_j zwzBvA6s<4)VTx9i{y0TXl>W4gsGen?%Y?mIq-ccuLVmwY{(ww)P$qv!=KYfKCN3JT z6GG*Gy)N3!3KO^`|nf-N=8Gj^2ibmKu z5!_LxUMHFPXUpU}%jCPr-{d||=6%yuW_+LW4rl4-{q7;(L#AHOk{hq z3HiP<_4+0EPDiHpSDt#UL%o3^e}zoFLCFKsk(+gu@?5Xt2If(vU8_7j*9Bi6d}Hu% znffDS>W!4ikCMsXDzpFFlLx0GaD zQ*SXvibi;X@^rV5sn;^Oc{*|`t(2$U^iZ#j@_9OID^sssa_e;DqP16^@x#LP>KO88 z1)n3cpUyH{>mpPCJemD;P41A6jPIsA^~Q$t>!Ey}&U(t!>y_NS?Co7fjIQ~2K-^b( zu17zarTfc_9}qk+x%%7U>TiK-K98J7d>#jdI#T$xaf8fx z43~3eBZ5cD%pWB)|5lm#w+G*uT>UNgbCk$i{IAjS_g{E_jShA04Zbh%j|Q4 z%nl~XT-Ql5;e#^ux)nRm!}R^_6y@ogDpPM-^5k?(&!h6xYa8m#RGxaXWa`aMo{^4B zoufSChlcAnKjfbb{zLFnGNl&DT))LK>n)Ml-_qoH>B#tH%2Tg#INue@=jm*vOubdf z%gf%?WyI+G{;D3~TIE^)d71I+g4YLcNUr{Mp!!??n$Iie5uevhq0VNR^VlNuyl$0q z)_6Xt|BCXQ*K0E8^@f}?drM~gcA4=zWXA6d-W9w%`QOFWt^PIQM5dOFA^Uh!7maoH zhC1)bT!#;2&hsOgaA&CZneur$`$DGPzT{8Rk*WKYC)^b39aKI~XNP3!RsP3{1F4sj zuT_W6Z`%+yT{O3PwhjU4l?`fDDymZl5^I0 ze%Nwn<+-n2WUkwJGWo7D`EGK~tb1^e;GV&~lB<7xI;pr0XHlwboLBL{m3@6eoxU>r z>L)Ylvf!TuUmkp=Oua!edx`50R-W-gf)~iFzfflVMRLw;aqyDhrNPUB zmnUCbJYR!~*Y8ofYKkKvB*=Mcobk4bjqSWKk^vBePv+;*4 znVyNFY~q~pIER?|xx~5SaUL`S-`zs-4 zSW--0O3bmA7ISXOia9ss#98BUdGWdUkvhK_=RKo)j;cibzbP_1HKc|{2MH<;^~ZxnMr>WLZNEavmmK+LeA`BpLYnur?kc8UH!;Hp%{|S%&3(=N#q>Ww%xlm4A0#>b3>GsSYJSW- zLd^P+VuoYPW6k5m^gBV!Yv22wBsu$=EN1w$`5E)G=I6xJdtS_NwwV6rh`HW*fAb`# z-hA@{F~e8QubG#amx<};b#vLeuf_DUL(Fied6$^^d&CU)iRpiT@aC}P{&*maQXBfO zB4+rL`DgPl=3mA1^P8CAaWVNxF~dK^^m{t^VA$q}w||9EYD2#@#0<})s64Wovzv2> z8RjzQ5z|lJ;Im;5+)wjK&Xf7Y3=4?KFEL+gzRXp-hFCsKOur+{qr?nHiLXFBd6p%o!flAIj@+VvDI_`l`TLxrlE-m$xw*KQeoBa$f2Ekbl$d@>2Nw=o?vrIDj~SH{v%b8T z{Qu0?nk$>DnyUwIp>%j$7rZ9%^}!1h-w-@Cah>47iR%WZYBALJ9Zae(d3ax7ZYXAW ztC;mo#0;B?$(xC(-`w27+|u02+}hm6+}7OA+}_+FIQ8x^iYlki+hcL3nICmCGwLjL zc-?dnGqsz!yO^Ey5Hr8Gn7ogeyuW#XnE3<6%zwoEsClUQG4lv9!;xb4=g0TB<>SQk z^Mv_HG4&>inLk-fK2=QqjQLsfbLN?1hO@-n$NhNbO3wc0iDO1Dn_n@%YF;F!-eU7o zG43tQe-6qlTS{C&lhlE-m$mAQ=hYBBwk6SM#F!6m|$`(6df zV@4Iltgj>{uWGJtzRp}zOubrS>eUI}n7j(=2Cqnblg+<5cutbv5Gv;i z6UkZBR7`)(#N^Gz8!`3TimBJm+}_;5+|k_0+$A{UUz5w(W&9YG z`XJj{GG0?=+2$9_FPdKx)Bk)iuS4&Dq2%Q^x`22lDRxXzE$ZpPQ&LyVbJm$P&%3f%`NKC&4QVv_*R}_?-e*JyLWs=8n zRMcEdOn=42%)dfRUQ$ecm6-lY2Nw!k-e;7Roc{cM#y=&G)dog(jF~g4LPUbG=ZszXh9_C)=KIVSr z0m0?cm!%uhAN{8hqNrCoi4P&-xCTibJXp+}q2|ZTBg~`BW6WdCeuw){Qw`{obC^tWD2{g1@#|6{Z4p6StNlHk)_-mO z#=O(KTg-KJk2rk4K+N&{&-V)?Xa7H%e=`3prvD>i*8gfgWarGn%!ql#kIR}zy~ z5tCOFlV4}P-h6|(j=65|#^hCTQ}C+9HwVv6d`s}e#EpXKBy}HencU%1_n#(`v#6=L znYp>Sg}J4Jm(SD}KLw4c1PwFu7e)9w7&gQOSe%hv+n0ixe ze?2YlZSHICFQ)zgG4)?bK4U2U?+BJZD&}=IM9loh%p=UB#O!~xnDwv8@lk)AXFilXj-!vvpO`-t)9)rR>pwSt zA*R2r!Rx}7`^+}U>Cf*o-$)+E(N6Pj^LJwU+bd@M_hR}v5WGEX*&LLde*8Z3v*d9c z{bK&rd`wI~$HnabWbmP|CBkCQ5otmf?IoWawQD!GFvB|bm+ z@x&Jd4@`VvaQDO)1*aO|c=Y?exS-{Q%!SQG%*D*bgL9|vUis4>N6Q&V4Qt9XoX3(< zhv+JE8S~ZVYs7pW%ZsU3!CX5!3JO=6{K)*Idl}|CsME-)X+fe7BhX+lsjbdH?Mtr=Jeun9&2~&gQOS)^`)L z{y}q3b8j*I_7U?U_I?LQ&i)39V@8jdA2km(KPIN$a542piRo{&nD^h_-#E#sH^Dqn z9LLd9<|*c<&CiJGXS&(%!!spkf3wXmnCFV=f1Y{1n0l|6Up2ob=K8Q$%zb&OnB(#L z@|%*!arBmXrI>zKiJAYdn0$?x{C)EWV)|Vl{Ce23*&sRn`hEIS$>TWMBxe7c#pGX@ zzZ5fno0$1ui|KDi@W)}x&F5Rm>Cf-mdnBj+UUAImd-D(GAH}RcB&MIk!Mnni``i)9 zV@5~Ctp80+e!~2_`497-<}<-Fl4s9_(c_8F<3$(sOPn>hUE=J)4HM@KW=-nfhx+sL z^q)s^jyJD4pEbvGuIHazNVP@H;Bn=i^=PnZ!+I(zQx?g+}M1Z zn9oyFG5z@QG`GBkn11gt-zlbEYccchHs52u*LX8I3THGLJEj6;p4Vn0il$>2IPqe4kOw z{K;bKO*KywGyhrhbLN@m*<$*cW0o&Z%119rj$byvVt!Rj|BJ+|Ut(Tne#5+6OuZFi zt`{rC+y_^QIX-`%^q%B#9KCP;!2F?@{cjMn{uA@3=1pSOZx+-4mf+Q4%Vw+O^zZMJ zzLq?Wqi@VR&AY|ye~*~;`^@{r%s(Ke--E&1!j_xoA<602-zWVdc^pT-nva=Jh}r*1 zG3!r>$xn;v@2}v)VaxlZvy#Wm^oIyopM@8VJhGc}nsb}aH(wAuH`(ij!IKhS6g)Wb z#lf8tUlQCb@uk732Kn05pKGK4B9<2ulNT4WzJ!>(q?pfNDRXIaS#vpad2*?^@8rpMWK!SHuPJp{af7*zxvu#pG53-BV(Rs<{WY|_vH3Rh z?PBUT6H|Yf)o)?>9p*d5)N3uK-ejxSR&wUIGy8que#xo#fVs1|tC-hWH!2_RaDbTO@$3F0lE-oMsClUQF){rQ7qfnpd5n3in106v4-8wrxF<+XzkZ#6 zO7b|4rkI~LKO?5!>0;JDXPzmhzgfW(!>wk=2y(Gis^5WnDtA< z^s_X0ZrJkDeO+?;@qh2WC3zf2Z=2sSzbmGnHDdO^Hh6j1a?x2QdCX|NnDraPcw#&F70*pI6NKg?9Y;Ex%aI`hsHCUn(XqEGA!T^@~~l5Azjb>XkH0 zIm$<6B*#~quQC5qO#KRC)?aI`Y_4jqF6QgFhL}%nO)>lP`$Qed<2b5ozR7&En0gJw ztZ!s)Y`#rQzfFT{g)R4qW|C9i?-T!#JdUF~%y*ja5>u~@nDzIV?-kQu`{3qb%YCAQ z1aWVOL^AqNYV%AR*vwn*C>EKeyv(JQ4k;Ko2QNF~_B_pcE zzmJe|^6cznl)6vo@43*=T**29dFJ`%1?Gk3MdrokrNJ3LjkhCxA9;Y*Gd(8Cg^50w zOC7vI%)+DGO{@HxQEahn@I%fF^^Y7+A#2nvgG4;-v&zaA&KVX4B%Rpv5c z`YkJ_{x#-*is`RHaPhF^zEe?h*86>@s^!(q*O{*u(_bwy_3Mb~=f>blVau0xJ;|Bx z_nliTZ)9$4zD-O&O~vfLS#bTZPp8IbEu+QcV<}#&=?8rempVK>QcONtOg`2;-u#64Ts(iGn3Fq6OuY)p8Bcwm zY^vqc%+H!1L?IrX|& zzuPU}Vcu!pC8pm!V(RZRe{cRlOuq+%w}mZVT!$p5-aza3h~-Dk$IQpY^m|fF{Xfiq zn$L*o_iXUtunliU&!@`Yj}bDTXR}Gp{&R@QbDPf>Ge57G{pSnL61H3-^GnYBJmQ$q zC1TbW3N8?~TxSbQ&iY*9n9=3te~779BDhG{vc9C`)YspKBQGsE=dY}}oVmO>o*q>& zS2R~LR}s@sH8K6v2+sI<(2SqHF37Hu2lIZD*C9bIse@~anSZ00^HfjF&)wD+)0h7J zN7gr#JZ5yOnE6e_%x@}Yelv4(a|?4zb1QRea~m=Jv=!4&yWotUPS5zM@wT*Ar=H(GCrBR0(M0o8<|*c>V(L#5Q}0>xb7J~^K6qT%a{rtq zIraShIal&Hj^>$PHoszCD5m}*G4+<1mx<~3_24;S%l&h?w~ML2LrlG$=G|iY-4nbi zY`OmLm7MjR%|DnAirN1mG3$R8lOGYY|D(bC!35)*`j3d|cW`jGu;qR^L~{D|`{i)S<2V{= z9%CMB9&er?rr(KT>OUo>-^szl!j>2DRLSYr@0ZghkK<^Dd8T=``33V_G5yXHQ~zZ# z{VoWe7Pj0k7fMdQe!pBSc^pSe&2O0BG{0qDDW>05V(Pyurr$Nei^7)c`C7@De~bA; z^GD{7#q58hd6Ss^Zw_7;w%jkbNY45V;+WAkG5K~e`8Q(fe;d3tY?;4Ha=cy~Gumg~ zFJ}G$G4p>Ev%f>Zdy;1lC!_e^>T#}re}(l&CC9&sdA%GLv;UJ~=ASa34(^}6-VaWH z%+J*1L7hz$m&aMF6J79k$owo~jx(E>`(q9g5$PKc6|jxqz5@1;x}W zB&J?rbCKYT-_pwX?W%T6O&+}d{NIbktxgFs`z2_U8M|512cfyPCU+>9@O>etQJB4O_mr zdr3~een0Fdc^pRr%nzF%F+XY^Y91!0-{E5V9U0swY`GtfmYjb5emG9@IF2TmpEN&Z zo??F5JWWi$)5Y{VBlz*K<$C?Rl?=w0)B=CxwhuM@Ms^}(x>XE!9H)csKZUKz)` zQF7{S632`-i1ol=5I4^H}43}`0d4v-#WaIrOAW39k`guW0%$0BWC_yG3Q~w znET}cF=g|JIj%!u>Kzs{|A?6RN5#zlP0ak`=9A`A!N=3@7gE0s8UOYdOOprpAHU!H zWp&P)Bl$mGq^~Su3THRxH0L&-FXp`F74s>|C#GM&-xQEMj-!I+LSp(YEM|UD^X2A$ zn6D7iZ^_{NVaxrdl;rg5_nWek$8l87TwYAS6~xTH)?C?K)m&XnzcqqOhb=FTnv&D6 z-*0M59>>v*=6YiKtuJQ&E#^k%#^&3^^xHJJR@idCX(l=S`u(Pb66KzSn%8n0`A1HxFCxHytIXUw=Q^Me;a~x|zF+>9>cN`Mu43&Hcr!A0VdRfx(@^ zmizZ0$(i3z95Wg!X8o|>!C}jFYq;dB?<0;GjWLfEQ*T`G$gpMo1j%tPam?r`^Az*b zV(L!|o|rs4JsG8beo_BEC;NL|az3B4%yZ0h&GW>ZpZQ|yFA&F!7Md5C7n_#`XZ#j% z#&7!;pj7f;KbvS?9?PxH3NiCniaBqq#N6*zi#aa+?~SZqD|yUloq4@^gZX1I^*4&c z@rbFv*}TQP)x0gZ9hdC*k?~u}y;4kXId1ly@!;!Yht=6>-fjNQywAK}%yAtM^C>$h zCO;&O867tNV*b^9%zQ#j{gYyj-H-QA$=UxI^EvZ*ncsRvS;Xuwo0#LxAtuiyjv3`K zUtqq_e36)X1c`0$ssI>WN^EG1Dmlw0%kLOy; zE1RpDtD9?x*Yn)FFzT52kz^Dvjvs@?)Egq^d=C?o4;RObMh3S@o*ffL4HAzHqY8<~ zhf%@APlVB_g^X(ub7zp zpqTt8G5sADGyjN~`A5yi%qPUGKPhH?H8J^VG5HxW{hSpuKg#@{FrqBx>|*BU5VO3g z)yre~1!DG}PfWf1V(JwTlNS{8b*R@#=3gc`E^5BqTwF~366TU(>J75vD;@Gu$*ilB zQR;8RmJ?IIyqJ0wf(s?jUK>Wa5?2nRQ^{Vd1|LXVJ$Osv>ynjJ|KICoEy+25waqt* zxenG7(|>(2>l>IGidlcFnDtG>tZ!;=W^Qh7VQwjo%SElst<7!BZO!e1FHe6L^4j#r zJ1L#%vB$2X9jwj+=FVc?PjnG;Ub>0dQFn7sb8mBBbAK`Q2Z*VE*7_f0`J?8cV)`E@ zrv7j-^+%e=n8%vOi>Wt3OuZt>D?D}Inq>JD^V4GbohGLKbTRd3m}i=2i&;NM%=#+U z-#p7-HoqdKzlCDzFA`IKv3Z%8`LBza-^BV^VfowUcf|CwT1@>lV(PCIldlt#7Za0j z5R-o*Cf_I~Z*To?mYn%p#LORP^S6b3YclKWFxr&(n`9Kyu~q zh0*fF--prM#6N`5#Kb>_(ZIw%B_pcG|6UVwT|8nwD(1TSo0xu&i&=lte9C-UOufIv z)H^Gtp8n_`<ewh_jhKHV|0`yGcQNyCwf=fp-q+mU{E(Rb28!9=ATj$JEG8c!CNCn_4f5fZ zk1~%DGyich_1f9-O|bk)G3zIZS>MIhPnDec)5OdlVDo2$e0nl#W*ALQJUfiWC4M0p z#eY*Kt~XcA{^p6<-~8ZV$+NG7QIEv0hEbcuuZ2;A#7n}cLgHn~i0c1)A6YIr*Zmb@ z`dcaHy1vT1+PucRR!seMV(PCKQ-6c`WAjGyCi7--TrS#T-fG@v-frFzyd!--IgER7pAb|3q?r1r?f6bxe#U&x zd|u}N)D>kFb9~vv>_3N?{pT{DZ@$2Mp_qF4#ndZgU)KdKztnu0xv2SaF~?I}%>GM= z*?&p%Rpv5c)|VBtzM}PC-tzx5Uu&*xt}3SgYGU?ZL(Kkbnr{#@zqXk94XxjLmfvi? z#oWl;SWLf7#O%MRnEf{slQ$QW7qbJGyZ*Tei<_E;o>m;UL zC#%=Z@(0DN?;&P=4_n_ya_097?v*_IP#ASd{BRhxOZ-SOO8sfp!D8wS5wrhc!7Y+! zM}*OhiARM|xx{0_s6gVeVRS0F%8XAEs>eTnmn5nGo-k2z`k5r=zBAc8)jZ8S-8@6g zeeQWN`@3b>I#{{ zajq1H_X}e3)#8}Z8ZqZh&pY$iNzVNB<_+eL%^S^|%$vD1Sw2IvaM8fV4Yz6FYEILJOk?v9u zTi@$?_Kf%M|IX*LJLfxdp51-s+;h)4!>(?XW!d`r*hC*Umh>q~UOv3xzs9_LG&Eev zCkzg+XAcTjwg-l**cD%;d{ot+uZjL@_Eq8P_7!3OoY`gJ4eU$88`>9zYuNq6HSP1m z8`=HB8{2)t9+RwBcvHJacr&|ucyqgJcniBrcuTu;cq_Y8cx(HN@HTeG@V0h`@OF0l z@b-4Q@DBD#;T`Q0!aLb*!#mq;!nN$y;a%)j;o5e~a2>mOxUStSysO`;eG5q!~5C|!u#3v!~5Iy!Ux!O!w1^6 z!w1>5!VT@6!Ux+sgd5r0g%7c}2{*R43Lk235kAb`EPS}VN%#nRqwtY-jc^ltgK$&3 zdbpWgHGGs^Io#Z?6mDT>;g;q<{!}jc(XRfgfIjD~eE3KB7$5!?ZtcT$;bVQcHr&RC zKZlR=;p%W(AFd1^@5ANc6MVQde4-B*hfnh1!tlvHoF8uI!@1#8d^kJY-iNcor~2^w za0efL8$Qj4Uxz#T@XPS&KKvqlh7UgvpXtMC;Z8oB5SI3awt566YO_;5`4 z93Orf?&`yj!`*y1I^5ldAB4~K;d|j8K71$K(}!<`d-?EWJct#((N%fSD_njn z<%?9|n_ZPJYK6;hy&MD;zSUJZXewMDs&bH3_;y$2psa9tXv;xd;X7S*Eg$<+UcMAv z!gu?ybGUrzI)(4`;ThrbrS2HM--jK-<Q!sSppDg3YxPY9PouWk5IAGQe( z_hIYsV?JyZE{Ar@@Do049)8k?&B9Oluu1r7A082Y#)pT6pY>tm@N+(F6dvKjhT)Mu zJTN@Uhx>=0_u;chJKrJ%grQ@i}R{ONUn zu2nuPe|p28cM8Ah!yUqJ`Ea}N+dkYT{EiQ|3cu^aEyC~laI^6HKHMbyfe$wdf9S&+ z;n6>v>`_t>=j;w4NuX(t4hlM(cTEI<4o4&uKkR%%Jr=@dd5tiJ7#XC%&Y6 zl(Mhr?jgZa^zF~it5^f39(Ycg;$9_Fed{66n;s;vK6SHVNPy9&hd15xL=ZQJA zo+swgdY+g^>v>{6t>=jaw4Nsx(ydC_BD!S>w^-_EUdooRzgY>lWa?;A%9gQT&lAgO zJx{El^*phX*7L+FTF(=!X+2N;MC*CtXIjq_YiK=Btflol@e8fziFI`SQuZrduY_A% zb<{0ozq7w~3Ae=Rs8!1TWWSy#{-X6f@i(pKiGOH4Py9>!d4dyE&l4qD&l8nsJx{Dh z>v^Iwt>=jrQmpqD+!H)E|eB{(=UuW6Q>{oJ9NJUXH+l5y0QwT;;vaCZZxhiC%C>ifc zPv9f7P8*kHyRm-^AF5Q2$F=b^!Zd;1-1XM`hVT>>t4`3-v#nWe2nWX>M_-|H&*ng#C|m zOGN#{v+PjzKf)~(^*@wlhqM0yZn>!czAQVE{r7N-M*VkXSyT4k!7Uy2-1vg{c459F4V`tvM1mi^ao3rqc1W!Z7;zk*v{ z>c1?@j%WWR+#*x|MOk(t`}=cCP5tL**~#qh$1OPZ_sOzT*x!p=cIxkuWv8;gJGc1M z-!;olV}BQJ397$ymYvT2PTWFN{~1|!Ci^>b%TfIuvg|DOx91k6`rBpM+3Y`wTbk-W zA&gCR+>%v)lPv4a{v)`BtNz2X ztS|c;bIVu#jk4@K_BZ4fvHB0pvJ2S1Kev?Czi*aZ$o{>#1+D%)v+QE_H{h1F`s-)e zrR=ZAEpGMK&9ckcUz=Ov>aUe$SF(R6ZlSAxhb+6A{o8TNUH#i+*|qH7s(eeWj-qUl zWdqp18MpLx&L&xQ9s4)pKLMz}MwVUA{tfs~2I{Y#Wkc9swfsm>d~;QmvuqgqD``uD z)+u9(v-P8w%Z)|9hT?y7XE|G_@%UyNSDcyUH~HMQ;hXK_!nfFM!nfMThHtZ5hi|u! z3EyG23g2lT9lpzM8NSaEPTJ+H2i?wq}j#~x=!yOH;fOt{f}~A z`P0KTZa5#Ye~bP{ZQQU9x7S7gV>WJhAGg;={}VQDn4h$Nj{c`?+;Bf_ua5p_Y}~Lv zYp;y{=WN{YkFb|V|417T45RF&(f_=S2aXr)#aZ^E>s0i>@)Er;b!6GgZpQ=9EB5^8 zf7QkV(`)wJ=zraw6Mn;<9e&fs1KV5ntmuE+#sl9w_V>~Mu8jxA_v~+@|9u+|oFCX< zNB@U59#}`)UuM}yu2azi@5l5PsUyoiaXTKEKea!P{?BYYaF4O4W!YHQspx@y96cq= z#?yM>pFmH_vWc`d3?|VN(ihIM$!^Dn!xVd5^iQ?NhNsy|K+|2Pa**BiT&_A!(8)n%ep1;1P zBwbBYmadXzhWDKon7Qr#OKyN zPw}}8U$6MwmR5XjM=L(Jrxl;6?BI6AXDU0|h|isD#OKbo;Nr5&)sdrX9IgbeH3L68}YfPjriQlMttsVBR==B z5uf|oh|m3O#OMAt;`0C-@p+()_&mr)d^WTZp9kB>r;TjH=OMP@v$5+`6rYFEiqBLI zb35Yma9iarl`xTF=Gtz5@imP?s8$a@$4 zW$SsT53T2&zOdX@GF z=#|_6X>OdXS_SXpexNuRQ`<&pX%A zbG1i6&(R(MJzIMOw4Qf{(0bk(O6z%N7_H}>8)!Z6+(_$r=O$XuJ2%sM-noU={o}2) z?jLWXl@D*Hl@ITrl@ITvl@ITtl@ITxl@ITsl@ITwl@ITul@ITyl@A}Fl@A}Jl@A}H zl@A}Ll@A}Gl@A}Kl@Ev0%7>59%7>5B%7;(T%7;(V%7;(U%7;(W%7@R;%7@R=%7@R< z%7-Ip<-?J*^5H02`S5vK`S1l=`S3+r`S2xL`S4|0`S2B5`S4YG6aBLSt^D{py^(&c ziB`URldhp(gQAr`-=;UvuT9a)r|;6$_0J--^6UF_RsC8Pt$h0-U0J`zMJxY)MCEJ{Aft`eSX#}AoLvDb_KsdlCCG`kd@ZpXrue{RP=yyi3Pe|%24%>lea^qMmA8wMe`PCgCx2}#Zzq3aD{m)%Yb$Rje`hOiCx34%Zzum?&rQtoS@xXp zkM`{FY&-U;@;P?wQ{{8**r&?p*|ATR&$nZrDqmp7K2^TZj(w_pksbR~`C|K3eN>cu ziTz4qmM^tm4llD`3NN=`46m?X2(PrC53jOEg;(1n!#~+0!av*3h1b~6hS%E9gnzN0 z4zIJH3jb<98UD?FBK*7kc=!+d*6^S9E#bfHo5O$GH--PPZw&uy-w-Z8!ww6Vlq2}N zgO%(d(Z8NOI9%DjK3v5f6s~Gt7p`Ux3|F@Ygx9z8@CNp^;SKF;!ZqyP;hJ`@@J4pe z@WytJ@Fw=T;Z5!C;mz!B;mz%?;VtZQ!du#1!duyAhqtynhqtlM3U6z73U6nh8Q$JL zBfNusdU!{>V|XW9>;BHJ)5zjEsFvHc?x(Vgt#v<@+P2pHRO;AT_fx5BYu!&}S6l0T zD)nrw`>E_^Yu!(!zP(419(n8r;Rg2Z;XUm7;XUo$!h6~E!h74hhWD}ShWEAW=qO%=ysid5c_q0!@>u+u9eO%zOMQk@qG0kQutukJIDFuudDM@IrRU1 zy~B$04=-Nd*!527Z0!%&JBE+6cL+DJw+}b9w+lD3w+$aDP&t^14PXx(45rFDOCJgxhS6KLIEoJi~b;v`!4 z7bnxYzi3D6{^Ar`_ZRJH-Cvwa>;9qx{jYv^fc{5QnAZKp>9p=I&Y+hlMbNsx=tM74 zilG-O1vRi557aFZJwUfmbgq2? z`daM^(AQ{RfY$xRowV*R?xK5YUx4nReF6Gh?F-P|wJ$(-)4l-RRr><;IocPXyJ%m4 zK3n?&bZ6}g&}V61fbOJy0b2JLkI}lnc%0V##S^sdFP@}zfAJKp`-`V(-CsOI>;B?d zTK5;v(Yn7FLF@ivB(3|4QMB$ao~Lzx@dB;;ix+9#U%W)`u15^Iz8*p7-Smh;*VDcL zy{q;G=(^e$pzCN~fUd250b1+(+jK4M3(z}jUx408`vSDq_xEY7?;p@w-#?_azK^E0 zzJEk(egBx&`u+*6_5D*?>-%T4*7q^A*7vcr*7w$h#}&0wJWIvC!s7p$^6@@zQFwyA zFg(#-5T0bu4^Ot|g{Rna!&7aoV@1iQ*|QU~e7dc5tSI^Cw$`!a8MfB3 zxSku@xSlm^T+f;|uIENJuII)!uIDCpU40Z~Q@f6)Fvm;RXLGlwqU*B-jq9_e+i`ui zvT=R3wsC#7v2}g6b)5>X&vtId_1WIW_1VG3_1V$J_1VeB_1W3R^{Hj!`s`xk`qZ{@ zed^e_K6PzepIvQSpL#Z~&u%uZPkkHLXLlRdr-5Bp-_ms|x;}dr-psY;+g|L~eA~P5 zufC48(hK$0-L7@ED0z7v>iX_iaeiKI*Y!u5V)-*Y{8x*Y_|R*Y|K6*Y^k;*Y`*p*SCp{>)X`E^=)S3`W|KD z`Zl+5eOuVLzAbHB-=l3@-&S^AeH7&wTh}+0);99#v3BIstc_huzc1iA6 z`>0fow{;(t$_cjaqf$B1)_qheC)v7>O66o5`y}mb?30{gW1pnGjeU|+?Jf0Dln(Y5 zN!lNBy>V(kmii?lC5FVwyOy&%gv)AO}2K+n^@0ImC{bLctR z7ocZrUx3#AQ+HbTPv_FQf9gT&{;4Oe`=?&C?w@+ox_|0J>;9=Pt^22bwElhgJbI?~ z1?VrdFF-3F^rt`9z5uQKa1lLC`vSD`#U=Cb_KabJMKabPOKc^Rd zqNs?_Kj%b%Wc`{m(h?aRW?*_Vb# z*q3O(z;!AYYrlZLNc#o!h1xHm`)j{|zCim0^!e!vYu~`_=Y?Of`-NY%`-We$`-ES& zduzYIbt=8mf%XmD-ZT7`-6Q@&lk+Gm76vri9?u{-M4$aN~Gr33v+s@pq+$J?idC)n-76YW#Nlk9fk z$@asrHHCY4!=>>GtvA&+WG18TN7EFYGqqnf9^aFYVUhuk2&OU)!y;Rp2_6 zqtk)*2i)Ew{GEMN_YnAUt>LTkP+r8VD|(VFkeY0dW)wC4LtTJwDst@*y1)_nho)_nh& z)_h+>Yre0gHQ#@sHQ(3Kn(x2Tn(x2Sn(x2Un(u$mn(u$oUG*y|wC4NYwC4LiwC4N2 zbZ1@2D(PaX)_gC~n(vip&G+?a&G*W*=6e-d^Svsq`Cg6Ie6LPxzOPSfzHdNlzHdlt zzSp2N-)qvE?;FvY?;F#a@0-w?@0-$^@0-z@@0-(_?_1EC?_1KE?_1HD?_1NF@7vIt z@7vNX^(aEO(4z|7T#que^1+UDGd&8?$`3o!P4p;5D_`tFAE8GvTKS_6eV88QXyub# z>Bf2#q?KQGqZ{c_l2*Rioo=Xo$s%4G6zzHmOJ2U;P~O?2qCYQpD(|GSr`wfxQrXK^ z-brO|TX|=niq}^(EAN!=Ptr+v{eJ)N_4^mEKj8np{=mZfx>nvwVJb z+sa$1G_sYqQaQxNeM(~+_bG?kxKBCE#(m1+Htti7u$8xtbe)RwRudZcDNWsu`;=xj z?o*Dkai7xM?xv5Tw6MD->F*3|<*ihXwsD`*%Eo=lF?MHt6s5I|`;=pC+^4j$mA6tk z&c=O8TU&W6mE&#YtyE61mA6tk(N^9{0skN_bF%ExKHV1<38mq8}})lZQQ4vZ7XkeajlZ_mzr2A zo)0u`=Tw}Rmpd_TUEPjx>tdGC7`O9njN1h^#;w1Nal6pgAYJ4-73KSjY32J=E^&K{B&8@@`977)Y~}k@F1MBM zQ@O&%{os{$lhmt6H5>PbSKG?>sa#_#-=}h|t$d$KZY$rXGQd{8Pi3HeNRsvwY$WeN zw(@-{*W1eXMaj#x;%A84Q&Id3r4>KJXvNPBwBqMRTJduet@yc_R{Y#TD}HXJ6+gGp zil5tQ#m^nI;^$6U@pBig__>=_{M=LuTz^CYeKd5Tv2JWVTpo}m>#&(eyY=V-;x;KCz{ zdU=YSin<~%{i%I~ivGOZ{D<}t_;r4d7d`Lx-@-50zlLA5*M(oQe+j>AuMNLquL-|u z{~UhJ{we&ry*m7cy(;{sy)yiky(0X!jX`+FUKaiD+DpUl*-OIj+l%#k2d-0Dq~ANB z7p4yV{(;*Ugg>(9hd;LGg+H<9hCj9Egg>)qhsW4IhR52o!sF~8!sG4l!xQZ9!V~Rp z!;|c9!jtW*5>d>EJx_w4?hW)wzq?qSV*Pq1G)8hOu z-99z^l|3c=wLLlfjXf#+tvxaPojoD^y*)nsgFP-h%N`s4(H;|?ZC|ZR=sJ}v(}6Cv z+b>rLu>aENFOQ?fFO>y8Pvf_c=WF~H(Hg&07Q0>Jm&y`b7Eers%u-&$Mam&z};#&4bLR5X6&I7-%d{KkHb$M3Yp z;}2Tn@h7eE_>0DP{Oxv($3Hg4<6j%&QU33dVmwM!)sOM0WMe$mvoRi(?J@c&N)=n< zQPp)S8jotU#-n=SU;Xnap4Kls-@Z7#u!tkfzYTq!#;XRup2n*tjrq5c+cEz(wl!Xx z@O+KerZmQDGq+>BHn%ZeTi6({Ep3d~RyM|KYkQ17in5K3_}SLR{M*jfcx~@G70kaK z+^+H3vFP8x^@ZugB2Qs{?d*PyPp#to@|hZ+U1*I@ZCc|~ht~Mir8Pdg(ioq5ZpZlS zW@CKn+ZdnS?J@c&N&{Qtvxn|1yz*B7K$>!0rJ z^R$FI4pY(i9l+Nkz7BLd;_DzA^Rc0g`FOCc@oVHd6^-8^G{+cAEJ+8V#Zc)rH( zaC%Jo!g@q-JL2m|TjSS+=WG0$(ul8SZpVB)%GUTb=lL4H@;a-oa`{U81nk#(do(>$ zk5)9++hg30`QF;rd_R`w>w2`Ibv=%wbv@eBx*o^Vx*jLcnC~aL9rOJpTi4@cp0DfC zj@I=!h1T_GPmj@~Gp*~bHm;2?&0osxA3_(_LF+p=R|)`yGyv2 zjs2wFcIW8tV`D$5uZ{hre)gGh{&_a`lg_tKkNyknj(TpY6m6bA7rH%_tF$jb>-t?x zU!i~cq;)+nr7zQ#CavpxIem$Kzkt^DzLLI3`vJ7B|J8JV{eA(hc(|56Kg)7j@iBnz zr{6E26))G(ef0YUbZ`BB0j>BMOe=nd(2Ad-wBlzN-Cg?vwBqMRTJduet@yc_R{Y#T zD}HXJJ8NHnR{Y#fD}L^v6+d^{lLpn8rHth})H?QhC%?oToC}R-8Y^*DKB+rxoW< z(2DaXX~lUePq|%jp32j<;`|w&uQ-2}R-C8uoZFG-M%aq;R7Tp0^HfIJit|*Sw-x89 zykINNQ+d%=oTu`VtvFBRWm|Ec$}2YVXF9F; z`J7h#%%BxNU(kx5nY7~POIq>s6|MOBnpXULLo0s1rMqiugjW1~Pb+?YpcOx}XvNQu zwBlzr-C6qrwBlzjt@xQoD}LtFik}6v;%6bP`1z#pqM|-I#ZEhuij&0^{du`r zak7M82YF_x+Z8AK`E{wNe>u-r|BAxPTq}-NvR`quibft_bboW_xr8zPp*;oez%eL{;-kv{lMl~*>P6)&l5==QiiSq)p)rzX$W``w7v``wt<``v`r``wh*``wJj z_1WC*xISChdcRxpe7)bT3RiQj>$5fcb$zxe{Ex2-)}?LTuDr4x&&Rs7z1x*nijtS( zQtvl;NB76|&3Cfn`sO>^aeebzc3j_l7dzf>UfYiMo7b`9`sQ`*h{t?aJK`~~XU966 z?`FsQ&FkB-4(Geu@qY6Lw%+d^t`$E!xW+oXr+u!wD#~7V_ayBb*jR`6v9S*CYhxYW z&&E2uzm0YH0K2n3igKWhb@(6~>u^IG>+r!g*5O7r*5N~J<oUAB_I;IBNV-xzOin{4V18 z8o!HajbAF4xLxCy%B8l(?=qgR@w=SH_+8<4jNg?u#_uW{<9D@<@w>*x_+4vb{BnD& zK8iBH#`q1iHGZjFXKVZhxlTpnSB|4(jmKd2YdnU~8jqp0#$y<*@wkD;c--iAjK@tj z#^Yui<8h0P@wnB-c-&@VJZ`tg>Z2%k*cgvHZH>oWu2a!?+)Zmd?kRk|YsJ&Oh5Ng{ zKfSPsBh0`1-LLU_pg6yLrpD_*8uRZVw`2Z2Y-_w8;rSY`M`?`LaJOT;9h1U4IN@ILpb34Z8bsOXJhCR0YQHGYkc10`5K@1X^qbZg-5x*Tel+Y*LpFU)_Rf3M{Y;H``A{#`-JB! zUO%N3uboJAK^_c4RvHB>= zG+WnWI?qSEe(rW%j~VRO_4tC;^_WRxet+q9U5~HWuj}!3;SXKkssDZi`!)XG(pay* zb35|I_qN9W2cEC-pG9l@f21}3vuTb099rW)mqxyr=XT_a`L@P?0ngX?FQhg8i|Dbs zg`+k8OK6S%QX2DNncFq~%h|8-UqNg9SJKEAtK6>fU(J4v|4)U#aecdPf!Tkn+KRHq z?YAW9R@%Nf{EK~4c%6M?_*eUe@Nf38@bC7}@E`V&@SpbJ@L%@z;lJ%c;eYJw!vER> z!{z_a>40#lx{mX3CHva&diFK{@84N;y{fo9mEn4xp>_SL(T`|ffY$X~pMFUD0<^C0 zhV%paJpx+SyC!{~_62BN|BdN;v@bv_9yX=#(!Kz#_}HAjLw}DzD_*vwZ_~a2t@zoR zR{U&3D}J`66+he2il6Oi#m^43;%7&CsQw;-9-_ZTpcOy0XvNPiwBn~Wt@x=!D}L(I zil1HSTz`*1D}HvP6+e{=*DvZzQtVXJ6~8~YUn#ZbyZgL-!wu|x!h6_zhxfGi3h!m_ z8Q$C8BfO8@AiS@=dw4&)et3U-x9|aWz3_qduHl31y5WX)o$$eS?QkP|m+&EWt#D)e zF@02&{7`#%A;kM>oQSu{f&ATIArx~w#mr4`2Yu=^O)YiO9rJ1dHcT~me zE1EU$%0E~7dhzpREqvbaiqrhLr5!(CcC;NoU)IXj&v%ULRPgh)b~}E)V{QC=ZEXE~ z$MN<0`Pvq4?i%aD@wW0pDks>=3#puFD=(ySlC8Xu%E`9!LMrWS<%LvEv6UB6X>Thp zoa#Ci<%JG3)|J!Tj&-G@jdkU88|%s$HrACh?Vv~%AYcQ?(HH6mu z8cJ(^4Wl)`ZlE>4ZlpE8ZlX26Zl*QAZlN{5ZlyK9Zlg87Zl^WB?w~cl?xcq*MbMgG zchj0*_t2VO_tKhQ_tBbP_tTnR573%l57N1QkAT+vdYIPy>QVTSqHbHvt@3{#E6ua% z=|Cyk=S>X{wTqvtgX0B zJI<5HmoL2nIpcOw~(2Ad#wBqMWTJiH0t@!zxR{VTJ zD}KJE6+hq6il6Uk#m^74;%64E`1z4m{LH2mKXd4z+Ap9LKl5nC&wN_(vw&9oETk1b zi)h8qVp{RDgwC~JKr4Qh(TbmOg_jpKLflbQ@w{&OPS^1y~XUwNPgtvpbZRvy@hRvy@x zRvy@dRvt)YQ@7*!Vl!KLU~`_YJg^0=Jg_CLJg^mwb!2O|V;$MX#yYaCtvs+DUypTU zd$%hOq_TsJb!0~y>&Q+v){&jwx@^Y>DzNgz$X;#YiqBZ~brkj+qeQ4!_ed!}g*?zS0 z!~XPPrR)G&`QkvjaVa~9R{m&6H!5WZ)5<4}=!T{25L)@AF@0buJCs(wIgH-FlpRhh z{~SRp{~Spx|1_bMf11+DKh0?6pQC8ypXRjkPYYW4rzNfYb2P2|(~4I9IfhpLX-zBt z97`+zw4s%Mj-!=-+S1BD$J5F`C(y(7r|`7$&q=iM&&jm%Pdi%q=M-A`r#-Fwb1JR; z(}7n0IgM8S=}0U8oK7qMoIxx9oJlMHbfT4i&Z3onI@8KOXVc0*U1;T>b77p?r$n^ykmLo5IErImmB(aJyP(aJyP)46_+fL8wL zPb>fIQTW25zB=Wein`)HL~&J={34$>T)$sXaeiKIS6p3EyuNJ3RVtUdU2&DlWwzq# za-Ofax`M{@(UopjT$R60`Xc&zSMz**y=!QFy=!TGy`0w98&LQv*H|Y9+ObaN<@c%g zSCo8^`{VuP*W2;_^1-%#o*}N&3ZS27Xobs-eEu8ecKtl5++geHxsm7V=eeoyb*}OJ zce9P>zw)^1`=xTL`}O{BD_&o2*Y~@f*7v)E*7v)U*7v)M*7v)+@GY+Kym^m}=goU< z+$Y>;Yo7kk?`0~!9&mdq!_`k~{2!uY{IiGM9^;=qVr%>#<@p+q;k3r%F|JRkQ>&$~VDoAPqp z#r4l$bbrjJ{3Sc$Eq~dL_|IRl5&y5+@qY8y?0CQV>vmlK{0%#jIv5ou0 zPi)*Lern@B@iQCuiDPWsCyupopE%A|o=#=FohNDEz*e44WumP-U6j0BYraf&dn&`V zUqEZVOrA!t_zs9ap(fF0)C|Toi82dFIhtnF5BWR7sk+jC6361e+>UNArGaKV^l#TIdZcorh zQCip-kCryZ<7gY>(aOen9Aj%dTDwj~<8dsl@n}={P}hp5;|kYz{eF635l5JR$GczS zbwY7|`Am)1i8SWlNp8pdJK5HFwd463uTyA@S9`Z(yiTd}wHpZ)?jqy6& z#(14!BYw`bG5m2uMe7Y9rm(SGrbfYys z-D!=_xwOWo2d(kxNl%FH-^=Y7pWZgcr;m;C>1$(r`q>(v^IWHb@j2h^8lO}yur)sY zdA`QyLR#Z&^4;Y;U-5bct$4kX*7dlG*7dlW z*7dlC#{9n4?U>)Wt?My>=j(b5q$lXsp4RmkMB{o~?{-{|!M3i)5T1{C9qM*nk74ZB z^|*o7^|+D7{JzQUx*j*PU)ST7!WX-KQ~%_`evRL4H0Jy5ZpVDT!^ZgCX=D8EvNe8p zyG}*pcMm-wb?6b(?TF|5Y>nUjJYVDY0F8Kl(Crw%hir}C!#rQ(_Xv&o{;1nAe#32z z-(x&qKkM;jeq;);trAO=U4rpE9_vsJxcL%ht_lNX*`nv;K*Z(8>9sPe3 zX~n}Q^jrG716uL%8U2Q)Jgs;cOTVU&rxicrX~oY3TJbZHR{Tt&6+e?{#m^L4@iUcH z{7j=2Khx&lj}fXC|%q`I1)rd_^mMzNQsF-_VMmHwu4S)csTJRMZvw z)2EbDYyO?jYZw0BJ~{k@eNuRqePZ}W`-Jdp`}pu2yKQ)`eO!2+-6lNWJ~q6-ZXI4| z9}`|=w+b(|j}9-fTZWg~EyBy}=Hcab8$w`<<5VZY{GDr?=YdG`zZHSfw_H=U%f|10}P>%UV#>+Anc%s;$)`bmitP3^lk@_e~O?yO=exJa`y0Ed0bzu`5 z>%yis)`iV%tP7jlSQoaiu`Xel?^u zzYeA~zZ%h+Ux(0|UyW(auS03gufu4~ufu80uOn#9uOn&AuO_tSS5sQ^s~J5~e|JD@ zel@2xzgp0mUoC0PucK+ruU53?*DsVUzYpcR-iu#0NZk7N0U}>JsOb1%R zeBKN#ZTvdZwe-(l7b`uzf0aeX?Cb>j@TE3Q*H(^gz};`xf}vuLavo!yRg<7^x2Mi(3F#yK|D zjjlGvZLAv?*jP9E+gLX)w6ShnWMkd9*v7hXiH&vRQd@ETKfiaY__^HesVIJ~pcOw? z(u$v}XvNRf^l0rD(2AdHX~j=YD}Dyhil2eB;^#VA@iT~4{9I2feg@NupCPp3XDF@s z8AdC9ZlD!EH`0oqn`p()&9vg@7FzLhE3NpsjaK~JPAh)yphxPTf@#IiU9{roZd&nk z53TsQmsb4TM=O5rrxiaB(2Ac2X~oZFg&!*FAw}%`&+j9gTgo2h>$;Uvgk_Jo{haWl z_Sxa#_F3V_>@&lU+oy-0uuls=X`foko^qW^Jl|wbyFH$7vS;jgzR8}om48;! z%0H`U<)78G^3PAS^3Tt-^3NJt`DZPy{PPQ~{IiZ${`r+w{`rko{`s9&{`rGe{`r$u z{`rem{`s3${`rSi{`r@7{;83IN44@#iB|rpL@WQSM=SqSrblbPfL8vgN-O_Vqm_TE z)5<^V)5<>^(8@m>(#k(IXyu=pwDQkJwDQl!wDQj;wDQlUwDQkpwDQm9wDQjuwDQlE zwDQkZwDQl^wDQk3^eFuv0Xoy>_993>_{vB>_jX7>`W{F)S{JtcA=Gj zYSYR;-xcvwr)a+t;iJQfea%?c^141xakXp3`FZ(t#Z^6iUBy)@ySZI)l}dekwDu8r zzT&C@jdg7gx5v7cm%p#RUs3YC+^_Gqcg5@Ta=X6YKKy#4%dO>4`?_7+v9p>t!-V;V|l*Dw+%g7w}Q0ZZ(CaL_jp?4djgH?d7|6n zdS)ltdcP<0e7)axwBGM2wBB#~!biKt^Yy7Vp07LDxGy@*R$l7pIu+a(o$mIyFUret z71uXE)BO=oc_%yKDL>1O_nUXN|^6Tu&<5#z7yv;+sebK zTwvoqu)mG_zzc2M2VP|3KJa22_kow#%EPH#YAX+?a+$3>oXX|4@^De|a;^DtrQ1`{ ze7TC&e7Ty|e7T0!e7Tk$t%0N!KLcpR&p=x7a~-Yt8AL07uBR10gK5Rj5L)pwlvex< zqZL0l(2AcMX~oY?wBqMxTJduWt@yc>R{Y#XD}HXL6+d^-ik~~_QQ9w{6+d^=il2LE z#m~L8;^#hE@pC_|_<4X<{5(i2ejcI~KM&K2pDPMKQq+Ts*!iD4ymKjgl&{;VlntkM zEMN=GTqW?9wuOEKht{#5Ft`>gN zt{Q&Jt`dIRt{i^HUN8KvT`ByYT?)T%XWGuv?r!q4g==TrYJ|q04JzX#1`BU{J>B;(*^u&1mw{9OF{>~oz zpY!sO=JyYLz2^5UTJ!ryTJw80t@%BN*8HAJYktq8HNWT6n%@g(&F_V@=Jz66^LsI^ z`MreJ{9a0HelMdnzn9aR-z#X%@0GOX_bOWRdo`{3{S&SE{WGoky@uBOUQ26!|3Yhi zucI};f2B3Qf1@?Of2TFS|DZL$|D-j)|DrX&|E4v+|DiR%|D`>@YyKBQ<-a>9(VFj- zXwCQaXwCP^wB~yiTJyast@&P!)_ku{Yrd~f&(?kct@*wot@&Pq)_ku?YrbzpYrbzx zYrbznYrbzvYrbzrYrbzzf2k#k*8Ja+{z6L}t$eUG{kirFXyu1(>1kSGY2}OU=_%ST zpeJj;fS#oN0$TZGXL^G63uxt+UFdPzFQAog>d<49Qj2)1TeLT=gm_B(2g*ykR`loP zUge=w>bYHcXE%Ob<(X9KyB+rpyW6-=Xkg>MU=JJj0ejlY3wyav1?&9YZpS*mkF7kA z%Dy(%`TcCH^ZVOa=MS*4&L3!Foj=INI^WR7I)AW@b-s~}b^Z_=>wIGy>-?cM*7?J1 ztn-K4Sm%$hl?RS=oywo-K>G%6|0CSg{yp5x{!RM@eEqN5FQC`O`7PZ3OSq-IHhi?b zCfv&YIed)$Q@FLgI()3XD%{3i89vTl5pHWQ4W+ucI2B2Y~-8%HuBAdw(`wIu2Vt2 zx!CQ1LuD6wM2D?rL`DTdQk#C0D$T!1m<(nJ$dgYrNY2=%m+>U&6vyFUn zi;aA9tBrhfn~i*PyN!HvhmCx5r;U7bm%TzCMY-EXzPZOnzPZ;%zPZmvzPaC4zIniP zD#|wx(#SUtxgGiDVH^485gYmDQ5*SYxQ%@Cm_1t`MS0vtzInn%zIoC{zIn<5?aDXx*suKY z3a$L{DvkW{n%j{-Ubm4y-msBB-n5lJ-g2D^^2gh5NB(%nM*eu$M*euuM*eu;M*jG~ zM*jHFM*bLWBY%8kBY%8sBY%8iBY%8qBY%8mBY%vskw3=T$RFcu~^K9gg z`8M*$0(-VTin7o~{#axqe=N3BVSY<1JtahD> z^2blK^2g7#^2Zul`C~1u{P9cSmtE_AWgYuY-=O`Y-c0?Y;Pm~>|i7R>}VtZ z>|`VV>};>pM^S3oE0VHo7aRGfwvGH#$436CYa{>cYAgTLbDfIv&u%pGPkpx||Lkrf z|1_|XfA+AEfA+MIfA+Fx>!T=p+sHrr*vLQo+Q>iq*~mZp+sHo$*vLNz+Q>f#*~mW) zZRDSWZQQ>!vT^@%h^_pSN@H93CzV5O<)6b`r=t9GIIaAX$`Ni?{z>IXdy1a>dH&=q zYg+hs=Z8t@Z2jJV+b4#PvL}R_+vCIKe}B^RSj*z;m3x%Wj;58*TG6

  • zm~pK8s1 z<+EdHAyp4Qzf{lE3qK$lZl8tw^xBcFA*ky>i`K*tYa^favysovvysov zw~@~-u#wOD+sbDbx=uy;>>?WZ>|(bgpIu@jpIvGrpIv4npIvSvpIu?k)<;pUw2{xQ zvXRfOwvo@Sv60WNwUN(q8~JR2jeIuHRzADVbt=kdgJ|Tl>)nogHrQ4^8^ZII&xX>< zXTxaavm0pTvm0sUvzzEC=?ByA4Y*zTER|bq<+EFPzVg{^wDQ^QwDQ>@v*9-K*<&{H+2c0y*%LPM*^@T%*;6+1+0!=i*)uls*|RqC*>g7X*$5l? zY^1GxHp+D>$Y;;H9r^498~N--TlwrIzFzt4Wg7YH6}Kawy=o($y=Ehyy>26)yk#@e&>QIv5u^4WMB`D}uXd^XWWKAU7CpG~%r&!*VOXH#w5 zpG~uIe>UAlKKtB8KAT}HpMBvv73H&;wDQ@PwDQ?kH15y7c02CRzOkpIFRZs>BcFX| zE1!MuIu+%!A86d4&2qc)S^2&!osWN?o9%wxpUvUxmCxqV%4hRv?03y~yYkrr_A8$) zq?OMW(a2|u-Hv>=#6~__Y9pU5vyso1+sJ1tY~-_*HuBjj8~JRtjePc#jePdAjeNGo zMm}3>BcJ_ZBcH9akM|4tLZuw<+F`wjeNGLjeNG5jeNGbjeNF+JzF0|+0sTn+sZ~h+uBAx z+r~yd+tx-t+s;Nl+ulY#+rd^o+tGC@%4a*#%4a*%%4fA`<+D_Fal7(aDz$CpvpPIq z`K&Ije6}k+RnPzQ6g~gb%4hXy<+D_Fcf0agDh+Jqvpsme^4Xq+e{?+S{%kMykJaA` z6u!&-WAsr|AE{`5?aT8uzxJawzxJm!zYd@^zYe4|zYd}`zZ%k-UkB5gUyW$ZuS00f zug0|I*P-+(J^$01Ux(A0Uq{fIUq{lKUrlJuucox-S2J4k>nK|Dt2wRt)q>XiYDsH; z9ZhR~wW2k@j-fTbTGO-j_X4!$R~uUM>o{8Tt1Yehbv&*4bpoyVbt0|#brP-lbuz8_ z)sEKuI)&E!YENr^ol0wdb)YrBPNOxyI?|e7r_)pQ|1Y35zs{sJzdF&HUuV&pU!7^q zud`{*uP(Ia*EzK2S65o|YoEg1ih8>=*DC6KWECC6x|MhLd74K>$yK z2W`aBLpI{*VHbXU)J7Z)w-HB=*@~l79=DYrQ+dKxetgn(D$0*f(aMjhJneSn z$5fuNl^>tw`O1&a(aMjhjBxwZq~iPP`=v6<{rY~-^Y!|EFVOmasl4cReZTV0l^S*a z%m45ES7@F8YT=h$>*sro{ZsWPRE0;nR{l=q4O{s;mGaMt@p#Mq`uW~2USB>>-|rn7 z3u_KNYj}{od#K`hFi2e$zGX$3C>RUZhgK4*GtneB^$8zmJR8m)oc6PyT3q zzfWm>zt3oWzcIAF-`K*VU2A#@>*t;AIu-rAb7)+jxo+3bTORjxzQ%t(&&PaU z;C96KLR;g%i0AA5ET;8-me5m`qG`RKWwhSUavJfy!tIFfmA2l`DxRhM4Is_?(|%5b)cj#q?B_VREgds%orduh0` zy(C=4*1DQXRa@(7HP@+NU9IkRtgGwWSXVc&v94}tV_mIbV_mIj&(=p#HnOpgG1q)h%qSt6SPySGRJllJ3v?S1^69`1}3K{|l;oYoC8vcpLlD z@V53P;qB~;!`s^zg?F$o4DV?75AS4O5Z>87KU~W`FT9K0FI?O18?Iyb3D>oIhj+Dm zh3nZp!@Jo%!u4(aeL_+4-EIBdRB{75_CxbM?D)ILd{0~ZqeaR0vh{aU<*%EbBjW34 z`?z0Ue_z+B#MjUEb9;RKY=1kxes+MZuYX|0>+`)`Yd`j&!jk@|F^XqC_^XnQ~^Xpn# z^DCz{zXs5nUju2)uj^>duR*lt*Y&jK*I-)nYY46RbyDG>MLi_Vt%|y0-MsO?ypa#{ zdBaNC4Hf6-<v(pz9qV{@j~(lHcCQ`lcy^y1>v(p*9qV}ZfF0|2_Mjc>c=nJT>v;CC z9qV}Zh#l*A_Nc8qkjij7*758yTX`Ur$8F_-CtRnZJn$r~Jn$5)Jn%G)b@myzW1W50 zRvvhc=PM74pp^$k(#iv)Xyt+DY2|?zXsoj@x*hB6OSbaB%RFCs;1ybV;8j|A;58cS z?CWk<9(aTO$^)sq>2~FTRNk_&&c1Cc52W&rjdk{28|&dZ?y8y@3ivIAGGq%pS1GNU$pYi-?Z}2KeY1CzqIqu zrfKr2R{kl`%0HE8<)8Iv<)6y5@=q07`KKza{8Noq{;5tY|Ey0d|7<`j|7=Jr|J0zB ze`?aoKO51?KVysczj4vt9&cZV6@R~~INGG5KQDJHjyC1j!8*2?+p&&qZYz$q;Q5N9 zEosG3DqFc-aa5GN{C)NPlDBcczTdVLug}Zv`hMH->*@QYvc22&{mMVre~tS8ApF1c zccOLv&V_eyt)H(J`}OnfQh00Eit|)z+lup4>eyIk>)KdnceNGgsnoL-=c$zc`$+Nr zv-ZCNudVm9AJ5nO*`Lz8>+>$n6my*&#N5-o`e5-a~Euyob3?ML+N1G{*l3 zw`2T|wDt2g;raS`n-*^98uvNPY~`7wcs}lPn!7#jbMkT=$9&0Kxre4-Q{o zHw^c;4+>vs9~i#KJ|KLty?^);dq3?XxK3r?=)cVE`-CsI_YPlS?-jn%-ZOlay+`*9{M{>x8ejYljEhyM%|>wMyAg*DA-aUs0zx z`k()9fA>;0tT?ZHRyXY<&|Tx}-stvo!Z+Dn!Z+Jzhi|bvhi|pd3g2dT3g2#@8NS0l zBYdZQdiXB8WB6|SwD3K4hw#1jsp0$V_Tl^OQ}p)>u2X3j{SUhR6T^?# zCxjohj}H&G+lC*rj|)F;w+TOC9~*wsZXJHgJ|_IM-75TyeRTL)yJh$}yG3|}-CX+! zu2s^1&pX9YMQc{E|8&EDuggdI{9)ne?V;LVsQ5a0`E^4|*^9;3Eqm~P=Vvdu{rd3B z_Mq@9_I2S`?SbLf>;d7|?L7R3t>@8H-n6d?zhz$?e%rn({EmHP_+9&o@O$>8eEiHePgfRu=UqKK#{x=|$T?m)T)R8&Mggovcw zfh`tx$9vy%X8C;ozw2Gsb(s0AnSIXLGwXiV%&^fdvMj5ruiE;$woHFSE9I-#{MT8^ z*G7jo@D3xx8`>knmFz3SmFk7A-tn~Ot_xiKD?9NE?nPk6K-I)3OBS{hIh7`hj+1? zg?F`^hIg|M4exF@3GZPy4mYw74DV_0AKuH}H@vsKcX%Ir&+xwX9^w7$-NO6ZyMzz0 z8-@?G>xU1r>xCQJJA@Cmw+lD1w+SC&Zxue&-XeUMy;-=aT{nEVT|3;&t{FbUt{!e~ zR|~hWtAtzH8-QCWgy!u1sL!^e5`>+tbj{W5%lSHB3K=+)1{CwcXg@X22ND13@n*M>WK z_5JXvUVS&*$*XUNPxI=V;m%%tJ$$-XUk!Kh>dWD-UVSm#&8w@!XLxmGxVu-EhkJN+ zS-7WHmxgXh&xuTBbI;MED?!CoB~zR;^lB-WR^u ztM`O2@#1r=Z`Fg&Vzcl|P^1jk2FY;fy!ugNN-(-cyc#$8J3g^FCeh@2strz)0 zuW%GWNYK8N+o)3Zw-{?g?Xeyi^s(g@D_+~HiL0RGa(B^}(%z*hrHS@oDc0b;qhK=6`tVLmf?wBZ62QF)n?(zUTqql;?+aLQ@z?GJk6_( z!_&QbV0eaC_YcqX>b~JwUfnzVuvhmC&-Usb;W=L2Ej-t&yM!O{YQz6ZP$}P1KmRlT z<1znPFJI07nCCxt2tV%C?ZQuZb(`>$Ufn7@->X}MpYrNv;RRl;8-CiWwZqSNwPyHP zuT~G2d$n44p;xPf7kPD~@M5o43NP{M2I1$5)w1YPJt@ncr$?4yiqVD5`iPrtZ%e3w%UZHhA@hYwRiPvb| zPrOd|Ez91Z`;=jd*M{C@*<0M+vkaF6+R(i$dxzU~Kk+WD`-%5x-A}wv>we+`TK5xc zY28nJNb7#$BU<+pAJe*@_=MK|#HY0GCqARwm1UpPZOU+or46mhvUS|vvJ97G+R(f# z`-waQAt^0}ZXx&eIPwRf-2U_we-NTK5zG(!QTyKy^P+ zM(ciJ16ubJ8`8R;s6^|2qB5=fiH+zgW!c8GHmWO{zFAdTKV07ett{Jw)|=ChjHZK8 zgZ_iJJlgHIEUU%sIyrhv(R4EF&^q}#1kp5DHl;PVbjYGgpKXetKV({J;ZQ@g#HWjk`aij#&^G!?U*Xca#V!DuR$ z4QLft4cTZa#yiu?dCRQbmSx$l+`g2z*xIg1WOr`Yq@xH$E6=h<^fSDL*KP~4Y%gxt zq^XET(G}?Y!mbKva zTlh#v+i%LUBf0$sJ_6GA>$0phw_n3YM%q3)%i41LNIqiH_7Pcj6t`c=M^f5e%Ce)m z{c=9S()P=;>{xCe%12(>esPu^$L&M-h)mlDXW0qdK8TOhwEes+JBi!Rd#5Zr zgWEgu5v8`DoMk<@{X{;})b``EtQWU;;3H6NKPJo0RS>Ty1ZfW#@4Fp?u`4?MBVuhoFw4&8_Wk)tS=;x` zvJ1FH_C4biXW~;r7Ro6?HlNk1g%|`7JKVg&)c=db{&fUTRZb=q0Zx4)~eVu zD_!S(JA|*dj}6~o9}~XOK017p-9CJ?eN^}syIuHJyKVS3yG{6ZyLI>uyH)s3`^fNJ zcFXYHc8l;mcJr3C?)5Ugf821~=l<98ZTTPf+qhwQz(f6qROl*J5&TrH!`sUdSV*BHE zexqN}_k=w^wm)g-4+<*!=G#xi_NVOpK|@8~0()L;f7;F;R8;gmV?P?(pSAM`9Tk1$ z_FTPXFH_Tll123Fv?0qDyZ_ta(Q|K-e*BSOM;ScSd!ynn2!&3X$uA{#u@iMhkjpa|>Un6bMU$5AkWZCE3zp8GV=#8^% z9bH)wqcvW?qBUMq``Y~)uc>`wYrKBT{WV_K(;Bbe(HgJc(;BZo&>F8l(i*R+{p5a) z*VKNtHC}(={u-~p(i*S7(HgJ6(-^ORxF6&7Pg~>lFYd4L`Zta7`j7iDUjMZ*UNgo(ciH-4jh^_H?sF$f}d>%$? ze5Tga{TQEz+ZvzExWC3{UMJIwG#;CCyT)T`E!?m1m|9C)q)kMVe- zjq!Mrjq!N0jq!Mjjq%vg#&|r{#(3;xV?3T_V?1`YF&`|TjK^*^#^V__ z#$$I|>*aop$KKqo@pvY!@z{sfcuegq_iH@%Ew&%wrRwF`g}3lB z=CYEM`7c=d7u$2|erEvvnw}BRuj&~A{feFu&@by50sWGm5zsH{83DaU&j{$%dPYF6 z(lY{jrJfPcEA)(jUan^Z^b2}MKrhoX0{VG9BcPY+83FyAo)OSX^o)RBtY-xDBHc*R z3-yeEF4r>xTK79w(YoImLF<0!YFhU@BWd05jG}eFGn&@@&KO$vJJ-;<-?^67{mymt zV_9}R{ir?`K|hjZH_~(U*ppU0yqQ)#yoFXhyp>ixyp2{pyq#7(yn|LfypvWvyo**n zyqi`%yoXjjyq8uzypL8ryq{J*e1KLxe2^Ze$Ii6s;X}0Q;W%3La6GMgIDu9@oJgx4 zPNG#0C)28jQ)t!0skG|hG+Om=I<0y*gH}D9Nvj^tqE!zcrd1DT)2fGaXw}2HwCdp_ zbZy;I)2fe;(KU5TPODx%PFL40J+1orBwbC91ZdUMr|2qrq(G~_K22|=M-sH^?Xz?x zJ<_06e;3kPUqq`OFQ!$5`xRbNEbqz-^M8Mo^5<2mOGPU^=lxWdl9$@5OUcjMixRWa zGFx@2Xr&kI^2DsP+>Ylkr4@EOhbgVJ<2g)el^xGvN~`U74pUlV$8(s{i*`JRDZOOJ zbC}Z0c07kEy<*36n9{4Z>Qd24ui2_g$*Bms0z~ZjfbvdYM{%Jr|&N z(#LOU?Vs8|?$`eRa=Z3V_pX1tzn;E|mVaMu-(bVkqh&qqUbxK59b;Smef|B4R@vL@ zCFx^F#r_)?udn3g4zaz8y?wZ15R^=~^`^>2Gx z^=}7S^>0U7^{*bS`nMCU`d6P;{cAw0{xzgk|8}NT|8}8O|8}KS|8}EQ|8}QU|Ms9& z{~FP%e|yrZe|yoYe|yutR08OpDiL%Kl@PkSN(`;~cL1&WcOb3$cMz@m*O*rQJD67e zYeK939YU-A9ZIYI9Y(AEHKkSm4yRTBn$fC%N6@N&&1u!Y7PRVLOIr2sNLux;6|MT$ znpXX5L#zI^rB(mh(W-w((W-y#=>~c(K-brE0b2FvSh}8`3(%@p$I+@k$J44mC(x=t zC(^1vC()`uC)27wr_icD9ck5{Q)$(oPPFRJX|(E3XIk~=bXxUiv%+19<%(2$DwY-3 zWvWB@^;w!%Rj0b~`)Kk%u2m%)v2PDdfTc~ z$!FTCQ^|d7)v4sOY}KjczP9RA^4T`7)B4%CPU~;uI&FZB>$G!h)v2PD&b3vik_Xz4 z=y`#c>3Ge_*YiKlcmKnBUQn^UV#CbXeu4X^hX>nJ!x!3z&BqixlN)W+DV3)gs=S_3^Vpj8)AyUzWp3q>pC=T+zPhKl`5 zdB4tQYB#!H=kq3hKb+5--H-Emi;eSntBvz{n>}973%pDX=kpHt<9yy}<9yy_<9yz2 z<9yy@<9yz0<9yy{<9yz4<9t3~<9t47<9v>_aXufiaX!b{IG^KfoX-h1&gVoM=W~*+ z^O@RYyMdn1d6}Bd&s19HXBv(3Gu{0-KQnBcpP4q!&n$bqBJE{rI6t%9kMlFf#`&3R zI6n()oS&y{oS$cGoS$cH zoS$-A=VzgpsWs4)P4o~JhOK$HXr(-dI=|0X>|e_Jb$(M@ z=6;>u7b><_bmROkcR$YW3LEElr9EDs6Yw%MoZr>%$N61j?6BgmVNAHYC6B46kh73uCr46)Yf%YYMZQSR5V{fjnqJ3*`mZZ-O*qesGv+L>*dYRfI`n&)=SDzQ4HNX5s z&(5-+X^r<^=vn%_0Il)=8$Cmx7oe4gKj>+CE!8^`G=mQ=K{3zQz^Y! zEhp%&Xz1~JEuJc-%GWlu^0h6k zd~HW7U)$5l*ABGuwIi*3)uWZKooMB&KCOH;pp~zNwDPqxt$gi5D_`Fi<8RlZe-wtF zRx9pfhW*!B+Rgi2rczk3e<|N`sY)fk@6iAD&l)8E4zERwS7jojomHW*6tc^ zXLku7WuG2yZ+F)70xwfLEv@Lef%`j!kF`$?cd$E#kF!q+A8(%=KEXaIe4>3~_$2#; z@X7Y^;ZyA6!X51n;ZyBn!=3D7!l&6shdbNt!>8Lvg}d18bV$8St!-M-p?81l@EP`z z;qG>ea1Z;4a8LX2a4-9?aBusN@R|0(|GAW}Deq^wKQ-mOFRi?vO)KyHXyv^>t-KGQ zmG^UK<^5b*c^^nC@8{9V`}wr;K8RM{FQAq8!L;&zA+5X*p_TWGXyyH4T6w>OR^Erw z%KN3X@_reuybq(5_sePJ{R+Cb3Nfv`52uy)D{1BZD!RKqM?fp@SJTS-NLqOxMJw;4 zY2|$kt-N1DEAQ9R%KLS+@_s$7yx%}8?>ExQ`%Sd+elxAS-$EQDl^ZI$k>mTR- zdi@iHAN5joE43$W)veU#+p1fsJ!PwIrMAFU-Ae6g8`mk%*tkx4*2Z;8xsB_Tg*L8J z7TKy>i@i)ub!!QY>y+o*kL#4BHm*~iw{e}a%*J)f3wCdP6>Yh#x|P}r8`mi-ZCt0U zvb*c6Xsc~pr>wDYo${isx|P~XHm*}%wpF)Md&O4WO6^r!bt|>kY}KvQUbj`ZQhUQz z-Ae6E8`mjs*|<)5+s1XuJ2tLU-nDU^@}7y!^|T&H|w<2vPI z8`mkH*tkyl)K=a4%uBUYztqK6aetuW_IbsArFexP5EmxUIKw+`hAM+`hMQ+R@N7;56f)T`_wkDRqs>V z&{n-qt&**JpIT*G^**(Y>_d|DJi$isu41d+r&iT&oRpRFHI1K5+@G4pPjy=3rv|O@ zQd+cLb!m;CO=*pv&1j9E&1sFFEohCOEoqIPt!Ry(t!a&)ZD@_3 zZE204?P!gk?P-ml9q872jzG84a|BxBXD3?Yr#`Lm(|~TS=LodM&(5^Q&n~pa&#tt_ z&u+BF&+fFw&mOeKPa|66XHQz=XD?dgr&{5?i{;m8>{Ki(>e3r}j!?0^ly|?b=Lq~h zuf>b@bN{R1{q0x62iPx%542wjA7sB6Zfvg!A8fA53`qto7yjg z54V?vo7vBYkFb}9o7>NYTi8ofTD(kcu}TlUC~Z)Qa{t0`Yr8z$#(p;3)_x}3&VD+4 zl)WI_-hL{4v^_t3jQwQzSo?`^2mA5xarV6M@%Cfk6YNLBC)$sMPqOD~YVtC*IhwlY z*=d8OIQKssKGmM3OA78kQ*i*w@?J41I_T=yx_M~ukdt$hU zJt5rF9v|*yj|=yomMj>mO{FZEL6>H5Nby}U5J zuoy?k-;Lg`<8@Q9f4--V*UdEYcZ>Uxzguk`uiLo4j@Rupj@KRT$ML$;#__t#o~W;) z-EHG|-DBf;-D~4`-DhL`+;1a)57;_h4|_ zq1Zp)Q^#i_t>ZI^*72E4>-bEeb$q7MI6l+dKQVrv={Anf3>(L1rj6q>%hvIE*vr&# zd}g~}$0xNpwvNwS?yuwX2(9DuXyI{Qo}XT=sl*J-a6Car_p!ALDDWt>d?Z`|J2UM`L^~ zbwBd>yshK6jQi{O<@2mI+SBb{W}}alfJj{Jn09!YwZ7{ zjps=}*{8?$pKU!)8oNRCIX}O;KefyBT!7a3`<))D&lk`-pMTO9>v;gJ^ZPeFM4vC9 zb-w?l2kRHvDE%^8>in0{gY@|VTH|3u`aFHUfY$h^OrNW7PiwqvOb^iK3+Vp(d;zWT zQ;pX6*@V{ksZMMB)S&z5xd45po(s?#KecI%pE|V0PhDE$XH&Yno(s?#KbzAUKU>fm zKU>loKU>inKU>opKfe^`Yn!6~gyM7!+%R7$<$tP9rnas3Q(fGS-$!+Dds=mG2O9Iv zj_y}otH{6k(@)cL1NV;)H?TF%Q)_5zobSxnYn<;wYn<;&Yn<;!Yn-RHyZbS( z>|tx1H{$*p=X=r~=SADg{it($+ZyMo?PF`4r?#)Hah}?Kw#IpC``a4lsU2WzoTql6 zt#O{(LAJ(uYK?8w$%Ac;^Cn)VhB|qO`%xzkwfm$U^=Qu4I8UvqjXHU_t#O`OGaGgC z2pe^>x!qk~MQdTBPPVjBCy%sICtKO5ldWyk$u_pe`G%FWOikmbo%>VM_&JK!_-RjT z{2Wbd{2W7T{2WVb{B)o-evYFxevYR#eomk@eomw{eomq_eom$}eomn^emc?`Kc~_f zKb>fepVMfKpU$+#&*`+rPZwI_rz@@T(~a(6x0qcwio7VcjxPfBB_Vp+=no>1dtK*jb_-mP(R4!;lT z%(?E@ILZIsui|^;e}8x$_t*CG3lH>C<7g1KYaCrbqYe#rf7GE;{(I^7DOzcWx9j)0 zsN(gdyk9@>VtzmUywomnzkXh7Lv8)MOS!*(-erX^^b&P%n2ox3xsAGag^jvbvQhVj z+Zwm|anowv^!r}r{?zdMj&MJI->YpLpOJPPpKO$^-*>c^spU>8PphunKx@3DcBA{_{A4%TIzKmaf4$#ZXuaQCX}#auXuaRtX}#Y&Xq=xr z-H-Egm#z1EH}}{3y{GWCUh4eZ%k4Tp_Z7a^0%%?A76=_Nwsn_R8=wdqwyKdwF=d z{X%$!y)3-aem=a)UK(C)KNntOFVS1@GPT8ei}a$jL5ITq3&XG2<>6QDXTz`A&xBvM zpANraF9^SBKNWt_G94>>_@|E?MK2N+H>_B!OPU< z=s5yCJ8jVO1ouB2{?wkO=Lg(>raoUl&xrlMaR2o1I(u69OM7bgD|<@#YkP9|8+%gt zTYF-7y*(lPojpGMy*)1cgZ+^Hor0ICJ(yPX?-ksCzy6&9x8EDv^W&)Fm)fu1Psi^! z?yuwbJFVlF+8^%M@k{MbTgUG&?yuwbH;v=>kNa`_{8ofP*f@S0 z+BkldY#hJJHjdv$wvJzF8{0a5RlH11$1fj8$vPg@xLwC%6I#ckI<4bTgVyn=N#l6b zazBnoZ5zjv(MCWokMeThlro+Z3+q zrN+~?g@5t+yF0zG7)Qw8_TEp&YlmY0d`}&(9ckpRp8Ju%oopSi`rKc~s{xJU)zJNu z^PiIcv9pciwTq48wX2QewVRFOwY!buwTF%I)5u2t_Ox}p_VO|{v-)`Y~Rky zJJXIuokG6$^L8Dd{fqtcJ#~Bzpmlr>q;-4_qIG;4)06cP8XCu^iTiPU4zY234z+Q7 z4zqE5n%X)(hkKbCj!!f9>-eO0gstP#ocrtew4il-S{B~d%iHzb+2bxX%@?h>zvhe7 zTDu?hu8pmF*OvQhytbn?UXP-6KHAedA4k(VAIH$h_p$CrzB|}DAIEY3$$DNu>wKI* z>wKI@<9wXtew>e!ZJm!(xIe~gNB8S|oXYJwADw8OkJD)6yR-XsK2GO$osTYskM#0Z zeFTl$b^N>0n6J-pKk7wyTgSf#_t)|7N$dFcqILXx(>nfV(mMWqXw-|d+&@`gMeA$p z_@B-Fb^QC$I{y7>9sdEej{iBdj{mtd@-Wc-I{xQzyN>_)w2uEE8uj7=_v`o%=5`(b z3k!Gk@@Cyyar=$xE80cwzadGtr1tgUOYG~yL+xwBm)h5aFSEylhuNdUm)oPlSJ)%N zCHv~|aC=1fO8cttRrZzP5%%!#)pjX7(!L@*%D()6{+>nWYmEC-8>_cS>-=3yKcGWI z>wI2M-={-N>-^qG-=ohF&^q5Y(|75)0IlklNd}>cTr-rlz{^E{%ERJ@;c?dEdso@_~(cWvz{QnHIZPkU;zOq#pidM>(%Kta+Pfhv%mRA1P z)5`yMwDSKwt^EH$EB`;z%KuNa^8Yif{Qp8L|G(18|8KPN|2wVx|3NGNf6~hTU$pZ7 zH?92tLn~kZ(qna?s-%OZrSerqD_PjezADqo*G9DRwK1)HRiTxysUz z%#8I0-fw!ip*>Yo8^7=5*uIPVCu(Zs_VJoJX^q?6>9O(pJ=~9Zp^=SwVNYA*HnqKM z%nN(lm>2f3F)!?EYuu)`pRIAbzn7_L+#WzF)ti!V_s-tV_rDK z#=LN-jr)zmY>nI0n%Ww-hkKbC=7nbN$GmWajd`KDjd`JkJz8HyYiVoTrgo%_d7+h! zd7-t9d7+Jsd7-V1d7+(+dEqF#q_3j2w=pjqZEM{ApU*33{2c54)W+)b1+>P`akR$I z@wCRz3ADz~iL}PgNwmh#$+X7LDYV8BD8X^o#Aw8l?QTH~h|t?|>F*7!M-9J*7zAfYy6x;m-M`V*7zAnYy2Ej_`G6?A?{IBaldh7S$2NKc6~>+7G?Rue++W} z5#bB$!^4B^!@?KZhlGdN2Zt}R4+>vwA5fNE;$>=a|C0@Mf876Mm)ddvlU-)V{ZBT` zj{BeNay#ySvMcPk|H(>r-2Y_5?YRHRuC(L+C%ejy`=4xt9rr)k)pp$fWFzgk|H($# zasQKzw&VUM8)L`)Pj-#1>z`}AOzk0E64PVT29-$nV;;G|RvoyJ`>PJzM5_+mOsfvu zLaPqkN~;dsMyn2_cDws=e{qMcI&dfVR~@*ERvoyTRvoy9#yoPb`!SE)XJa0@-&P%X zfUn0q@}T=w2T~hrV;*_P#ym34#ym3K#ym2?9<8sUO|%iRNjBz@$u{PZDK_SjsW#@3 zX*TAO>Gp7a6>WxHO447s*_cOW*{TCYE9Fb&eYX2kYgv}fp_Tu+bn~+85nA=&QMy@K z_86`DFpq9pmOW0ZUOYh`T9!RYtA5O0f8M24f8L{2f8M86e?Fj9f7a5fKOfSnKOfPmKOfVoKcCR5KcCX7KcCU6KcCa8 zKVQ(QKkI1KpD$_EpRZ`upReiBS@sRB`tvQV`m>%^{rQeo{rR3&{rQ1b{rQnr{rQP5 z>2n0M>d!B<>d(W4e=U~7Qthc&R$PZ@TotYKoA(>5&lgneU&{M6uKp-qpIhT9wLjgj zah2L%w#L=p++XAB9~$>Z|GHn}D*ry|o9O$MRZXb)ejCvGejC#IewAo_zskv8VxHW{ zj(M__zfZltqLr$6d%V9=RXg5ashX|#w~3c&2GH+Qy~24X@BeDJU%yXkHEsPqwYa~2 zpW1~t_7eAhb!^=K<;PV&FSSj*UGIOh;`MpIe%|J^e%=w1a% z&24PlZ*FVjI$=9oIsHGM%hdSV!TqU?)pG<|$G;vO$3NT2{c-%W`nHaL1MaWm(U8{h z*qPSx*oD^d*p=4t*p0^V-`)K<{(IOu9*wxaj>n#~j>lfKj>q1GxAzj)P5aoYTl;c< zTsQ6K{M~Sf9U{kk35wQv}3%L4zgqXmm1p`{|DRgeoIa4c)z7X>^T3WL+v>K zrNitv|D~pOTql+ex8wbmn%R24MJpX)$NMccxAlHoc&U8t?Vk>)$ zzKYh`9-XBBPlAo>#I`oB6WiIiPCUxSbz*xP*NI2lxK2FAR-I1mSi6*@=LWXwbZW=h zs?$X)e$=c{!O@UQVHvmyWc?&#AP=PbXUA=QLX5r!%ebb2_c@ z(}mXf=}K$-bfYzX&Y(4Zy3-mzJ!p-ep0vhKFIwZLH?8q=Cav+)ht~Kxi`Mw*OOMg> z0$SszAFc7zpVs&pKx_P*Lu>q;OKbcLq&0rdqcwicr%QSsKx_P5Kx_OQUwCk_#E|zW zs+i~h^lIKp7ka-x3TgWg`*;0&go@Xf@-4spw>`Vq{lA7Uv405FSv z%>E&Kx&3|k3j4co$zC5GZhsrT(*7oVmHl;ig#A_cYWvIZNPAs)l>J3`wEcN_jQyEP zkC&-^s?tP%qS8fwtkOn*lwPP3=>8AGH`;4eI=TM`Dy{VUvHvaZe=mHi{ciX+`d)3Qw@-hbP)kh9}uigeTjNho{){!c*f)LUHP9!EB}wv z%KsCz^8X~Q{LiPA|EFl>e*vxhKTRwD&(O;Mv$XPGPAmTlY2|+rt^6;hmH#EQ^8XyI z{4b@~>GK7&^1qB${$HS#|K+suzk*i&SJKM=Dq8tpO)LLv=(YNH2ek7460Q8dOe_Dd z(8~X-wDSKNt^B`EEB|lM%Kw|R^8OaByuVE=@9)sc`@6LA{vKVf=LNL#{sFDLuceju z4{7E7BU*X?m{#6Dp_TVfY32PhT6zDRR^Gp$mG^bD^8O{QynjW{)f7Z4|KHHFHHFct z2kYrsngVInhwtebnnG#Siy!G}nu6)6nxg3`n!;(-lV9mc`n));`tmzHL6;D;>dl|@ zIF-c0e-+FB?{&$JWqEnhAEm#&-wxq_?Cs04e=A;JlJu9>|83V_@XOnV%j|8!8`xWi zH?+43SF*PZSGKnZZ)9&C-q_wOT*clrT-B}{u4dN>Z(`RDSGQ}0YuGi*vYK9|R--Jd zMOQD&YSWvPWp(Ik@%`$$ziN0>yGmKM8Ta3~EZdykDE8mN{guO8+Lgjv*&Bwpwl@fG zW0!@uwX^Vc_P_3{XxrQWBxxwve}{Lp{|eW$RfqCr>QXjq8&AZPlsN4zN|H4)iiLT$dc=e$}DW8r!Nn2XlYbnI`m9{naC_I+EIeh#g?pGzz618L>`JX(1_pH|)n z(aQS;w604A)5`yaw605r(5eR)(Yh|Vm{xtbgw}P*P+IlkQhJ*HI|a1r$1qygC708x zCs)wAE-BHfFT-hFmt0A!-dsiNx}<605ykT5VoLfy&#nI~s+1O2d%r)zBkkYAqwL?p zqix+Ur8dUa{ZeYz*t%az?OI#+OQ~IF>wYP<>uo$Qxxv=`QffEacwTaot^1|aZnp8f zxGs9k{kSfgXP2iJ>XO99bm}^ftd3wwJ$kW?4^7M|4JiTipPw&~t)B85^^nr~$t+kP-5AEgp zD%wXj^7OHdJbhv#PoLVz(`PpF^tr7(ec@$l%F{X;dHT}*$kSJLd3vErv5h=^VcJ)Uc6{nl|!L%SJwG+sH>98~Lbfuh3V~Hno>0 z=@EyGd~9wbA6wYS$Cftov6YQ{Y;7wa+jyCp^06(Ad~D}_{s|-=SB0){@gxY9|11B(%Yw{AFSsC?w=aY=ON{*F<-BIrFO9U zm9Hk;j(i>Be&p*=8~HlSM!uTb%GcpuriOeqbHDO+1h*?+&1vMTh5M1OmNxQrq>X&F zvXQUWHuBZRM!wqG$X7dig}#b*l#P6~w~?=-ZRG118~HlcM!q`O$k%bU@^!qIsVQG4 z(8$+`?nk~(vdhy8^?3pt`8vf$zB<~-*Qqx0)yYP_PP37(&NlLOx{Z8wv5~K?HuBZY zM!wFlk+1GH^3}t}{L|CM{L{-;zEbOLD_^OdX)9lSyi85`I*V4mQtRvf=}CG%U@Kq! zxWDq%zwkj`PKo^o*ptKgeU$QcE?=*_4WyN~^JwJleD@=7gKXsO0$X_-%-17t7rI}0 z8^Z0%+eI|;cCq`Bw@Yl~ZK#dBU1}q5m)XeMFdKQh+(zE6uvh4-XeAqY8*U?SSK7$i zRW|ZA!baY%wvo4yw(>U0%hZ&&(KPZl#{J0KHFkM=p*}ZYBX8H)$lLWc@^*ubyxnLc zZ#UV<+s!uec8iU?-D)Fmx7oyU$kM zQoG+)-cozOR^A@;GPUWt|EHC=hiK((9Id>Kr;EcYY758KM`Z0?Wz&T+r;JD1y) z-$!WV_fhvFzmM6-?>rm%ecVQVpRkeNCvD_+zK#4oWv|dz(H7Xq@6$H&`;3kJK5HYt zxW&_;e2*~;%?FH=)~m(a-XbM8lem)hm&h5FGp^1IANeqXSW-{m&)yTV3(SK7$$ zDjWG-Z6m*HY~=Sv8~J_7Mt)zmmETvqOilTHmBxJen)@+dzHTeOZ*YI*_f1;)eT!Cp z-=>w{cj)Q*ya28IruLrumEY9fx0T-yxWDqdmR5d0q?O-~3QzKKLi%pHRkX)z$X1I! z-#@1IskbAqpV`Xm=U%3U`SuIX3{?SAF;8*W!#zon7a_3lSr zzq66o?``Du2OD|)(MDc>vXR%HZRGVAdxgG=_N$G&{$?YuzuU;`A2#y(r;WV+Wh1YD z+sf-dUZ$qJ{!1gTS#|A%yq4MJ>4o|$Y8!do&_-S>*~n{U8+qNxMqW3zk=H6V@>n1kxTHQuoYuK1?YucD^YuU(aZ5w&5V=J$9y-ZDc-IP{dH=~u;&FLBGh5Cqu z`=^Juw2{}XY~*!oTY25a%hZ(DZE58-we8%myr#Cjt-S8Q{gu}p3xDEqFz$aoucUnK z#Ql}8`n2-ZfL6X5(#qG)wDPqJt$giDD_^_O%Gd6+^0f!8d^Mt1>i(Zzq5FSY`P!RS zzV@M&uYGCdYd>1~+Miaw4xp8<18L>!AX@oqOe{Ra;pXbUt+4?+3YNy%wJXdEMpXWN=#^<@Z*!VnGR~w(_>Sp8fTxZz$ zJXd!cpXcgf+>9`^|keRjSStzZPm#O`FhpKAvCT-E^yV)~u0t-haUF7*y;5IA8)mCcrgpiF z>yRsKT!)lwT!##|aUF7{tvZ?7RW`0eM%b#8S9_V7>f}fo*CC_azcB5n=L2?mc#Mtf zkZWvQhg@spI^;SV*CE&2xDL6&#&yVzHm*Z%vT+@9vyJPJTWnm1+-l=G)D^Jt-dgW<4jXce8 zKk_uwMxJKb$kW3%@-*8`frj*UD$Ya>tPHuAL4MxGYg$kSpQd0Jv4PtV!N(^4CG zdfrB!mf6VD3pVn!+(w>O*veCCD{bW|wNM6^`FPd+$j56o^6|Qje7s>RA8&e@8uIa$`;m{gZRFz}TlsjGuU9_aqmhsI-H&{H zU?U%EZRF!a8~OOiMm|2aSL&;1pV-L9r#ABOnT>pWZX+LG*vQ8^8~OOsMn1l>m5;Bz zOilUthDJWVbwBd4-Y!os)JMc@3kDqMh<7XTB_{By(ezlR0-)!XL zcN_Wm!$v;-w2_a$Y~PYe$MsxQ_bXr3xE=Z0#Qn%ubsPDrVJlxX`FiB5 zmiv*f+BWi4$5y`T^7YEsrZn=knfsBi&28js3mf^`(nh|vvRCP=Xj|JWlXR(XBVXIv z$k%o@^0mEU)`*^3{Mwz8bn8`P$hoPcPK-0UP<+)keN{ zvyrdeZRBeY8~JKvBVT*k$k$#r^0l{(eC=Z+U;Em~*M2tgwZDyg9bjYrInc)ZbCB(P z6|J$Ye5H1P{C2dF-&1Ym zx08+ho@OJzoo(dzbQ}5YVk5s@ZRNL{m#HbgXVA!RclRT|J?!%ILjAh|8~N>JBfq_E zsJ8}n_++mYAdw(@$Vm#JaCy~_Q{>j-XFUazK+*OBf=UPsx;>u6hf9mCfv zuh-DX>$UDjUazx}*XwQM^#&Vxz0qE!ucF;#Bd<5x$m=aO@_MU{yxwLbueaOC>m4@o zdZ&%N-eoJVcYB$d@_G-Ayx!}6*GFvR^-)`Seay?$l-GH*@|xP??pI#(c{O$7dFPYfuK9I7U$4AAMJulh zXj~sZ?SAF;8E#izpQV-8avFJE=zipNk&V1Aww2ci=Ya_28+Q{oiHuCzh zt-OBXWopXnr?m3=8Lhm2PAjjeec^uPHMMoN^7(7O+@N(S${CiC0>sRiteEmi%U%%7J z*B`X<^(U=-{Y5KZf78m>KeY1oFYSEQ)K_Y&R5EDgYXe&O+K^VhD$&YUWm@^#h*rKf zrj@TMwDMJzR=%py%GV~e@>QKyzG~3rdV93;Rf|@>YSYSB9a{OSODkWS(#qFnwDPq% zt$b}kD_>jE%GXx3^0hUsd~HK3U)$2k*LJk>wLPtT?LaGEJJPfC_m=d`EZd1zzUtG; zR|8u4YDghl2pjW8a~tzU z3mfxBOB?gXkv8UyRyO91*0#oV8!y#v*q~xLq+*fn>+_{{f0_Q!c}xGl<+k2m^)R)3 zKW#s%_&#~Rwzn_b&P$!=2mO6glaDF3AMGWsPmZ-?9Aq7AT%R0g$GFIjw^!?{XeZb) zPO=kiT%Vj|$GFK(wsC!OiXG!9>uBTppNTjMOX&bG$c>0YL$`q_oX zIP2GJ~IU-fekt@@eT1@2e< zOfCPp`gy5c=L->0Ayo=~rX>a{^2HdZom;YX=Tl)|FpZzbTwf|*>FY!{p-!N|1 z?{|6O!CtDqr*?&{`kq?;d*XNu_x4%(?+bXDntt9@G>*pz_v3h6ZR_WaJC zrS{73^Y)7HGJARW1^b2Ya(h{Lh5dYZrM)z~%6=}q+Sa_9+8SH)>Wf~chI#cR_hVjt z*)C5z>Jr7qy!xt*dG$3L^Xlt1=G8ZB%&TwOm{;GjF|WRDV_tp7#=QEjjd}Gw8}sV> zw&v9jyi`lqZ-XkB{;c@#>Yno7pen8P{wIe&v`-3uWS+GY$U)t@%U)e{6zqZ?jzp>kfzqQ+h*W0bb-`TCg-`juc`GS|}zTgi% zU#M{2$^VYdPwxM%qT7G|Z2ubm#r`GytNnBMH~XjX@Ai-3KkOgEf7;)N|FXXe|81`i z|6_j}{@4B{oYm6$*WohztMCT)m*EZVb>T|(7vakG=X$>2WonNtq?O;DY2|kpTKV0TR(^M*mEYZI<#!KS`E5iizkAZk z?_RX>yEm=;?nD2se|JDDzx&b3@BXy%djPHc9!M*{2hqxJV_Nw=m{xw9(8}*2^m_ff z16uh#j8=Y|(#r4QwDQ}GR(_A5mEY#H^4o$|ep}MY?~%0f)rwZWTGPr`8(R5lODkXP z=(Q^8wDQ%SR=$p=m9JxHP9a}+ z=6~}>c9Q#hg-^D7gio>02zRu*hEKIm4|lRp3!i46s{c-*m#M`(o}KRgn8&j&cFf~h zS3BnMteYM4cy@*z^LW2oIJYKR@2T~hus}5Z0WooJeSJA2iBWTrut7*)$Bi;XD`bK)5V6P32wlU9+u~i4I z@iH~lfoo~Zv)8#F^X&Du>c9=$Uv=O{`hVwH<^5)E?^BlDLM#8b(!I;F+i2B;+v%QV z*&Vd%!<}^Zvg|He_2O>2TUmAwt@?2<-K8wMk5)aopYB|iJwU6zJVd$yu^=AUD`ZJMM{h36o{!FG-f2Po?KT~PdpJ}w}&vaV#X9lhMGm}>RnMJGq zJWQ+p%%)X;=FqA?b7|F|M`+cbM`_ic$7t1`d9>=!J;G_Csc46XX}EUo%ePOsN3GOhZvh*tesOsoDZp;dpLqg8*F(yBkt)2ctqXw{z= zXw{$PwCc|aTJ>iot@^WyR{dE`tNyH^*Xnrzt@`s4t@`sat@`r{t@`sSt@`sCt@`si zt@`r@t@?9Q;WvwA`&8?SW&Zzf(fdyAE$^rIo!Z;B-uH!7^PW=vC+59(y&d!3dp731 z_ifC3AJ~}p*4mi&KD06KePmp?^7G|-e>lDeHHC<8}r^5Hs-x`Hs-xA zZOnUL*_ijfwlVL0V{6=f>t$*hck5}~zkcU_9RKfa9seJ=zmETph2QZK^WIN3=Dqyq zVxIlQ+x7E)EnY9>Kkqjh=lgf}>*uBRhpnIYC->LS`>XKJUan1h>vIV<=Gpw`;{E;W z?f!YS<2C+yWi;O32JXlE+t6OC&m(Yu{k+PB|MB<7{qsid$Nh7DT=D*@c)NaHRenGH zylS+5-X^quUUgbOuLiB3SF`ZOUgA2UmaQD;<3_7_)A*^w*WH{<>~ zzMIoJzFW|GzgyCJzgy9IzgyF5^WQ4}V;lG5d~R#&{cgwo^?tXf^?rAt^?r9OT-!_4 zt<>t-s#~e;WUFq~FZNIW|CK>$hl*vzbH_`{Qg5k&_rEyY(7q_Vvppodi+y2uS9@@H zH~WI{?)ISY9`^a+M)rB(J?(+vz3g+td)w!P_pt|r_qF?n_p|$j_qWduA7J+lA86~} zp&#UBdhV!yhu*ltc_;rJ`oZqUze8_gH_H7G3UA!+oTTKVlsE5E&H<+nGj{GLfGzkO)s_bgiZ?Mo}aXVc1WKU(?iPb*VVN0HIi1oM$yXG zXj=IiLn~j`(8|}fwDNTwt$bZiD_=Lz%GZsw{=XnM(aP7&wDNTet$f`|D_`9T-&QO~ zrEFC!E9T8>|I2adcJDVv&k-v2FXg+9`fq!7r~9uC-(_DFzS|xizQ?{Ie6Kw$e4l-( zo+EgfTFm3w1MZJ`JbTcNc|03y$2^`rWXC+7jk9AO&&Jy^k7pC?n8&k;cFg11Bs=Eu zY_c8mcs9k3c|4nHs}7_#%~l;qZMv;GklGAebs)8ww(3A?vuxFY)E>4~2WESj8vc7S zbKI{wklI`u^Y|k+=J7{u%;S&Qn8)YYn8zQtF^@lCV;+Cf#ymdX#ytL%jd^^5jd}cO z8}s-xHs;3Zmh;AQ%M=UL_bRc^ngEPIVs z{$Hm@mt}9zst0eyY!W1*?Y9=$NO}tEc<{~Jy}a%UY31G ztG;|hUsjfVOsn2}LJuv=KBZNEKBHBCKBrZGzMxfq*3qgzU(%{SU(u>RU(>2T-_WW* z-_oi->uJ@W?`YMZ?`hSaA86H|A8FN}pJ>&epJ~;fUue~zUuo5!-)PmJ-)YsKKWNpT zKWWvUzi8E;ziHKd!{B>d(fs z>Q5C~^`|PW`csWo{n><8{i#l?{?wpVe`?aIKecGppW3wQPaRtIr!KAfvnj3mvl*@W zvpKE$vjwf|kS%G|pRH)spRH-tpKWN>pI3_Ww{6ki0jJNaHLK|l9k*~jk;aW)^SU1CtJrY|Nm*?{x@xJ;O&?<8`_vRceXKa?qXx!+||asxtoo7 zb9Wo_<{mcY%|j^YyVb-TX?D8uQj*p_iIzQsh7%CYHe-hDz$bt=Jlg& z%vR<7^$D4x#`!tZ{rY`V>tpNpJ&XJ6_w8G_tCy;KXBY17`li;P zVv+tVonJ+(E%h9sV!u+pp@p6!6tBzOJjTfY_a6~H$8Hur*FHQv&~6$&&ps@CzI|wT zkbOw_0=r3guzhg&Lc4Kzh<#A_BKyGb#r6T=OYHr_L+$8hfYk zwRSx{NAObZ6^|FSG>%U6-*2SWN6#0E{qn9e^?ZTu9pCpx_xB3lWcLi;Z1)J?Vs{VU zYM&9l&F&Vy-R>H`!|oEk(>^_Xm)$vhw|!dp9=lWcUi;MWeRjw2{q`yPJc5_0ogCXA zbpJ`=vG$4KhwKx=|?RMDGNJpZ}ozt@#!d;c-vIreCMPNCxal=9b&(&rS4 z@0)w%fBR>Ty8r6%WA=#fJo~Ef!b<`J;8= z68Zyc!X@QZ;ZpL-aB2C0a2a`dxU76%SpQzxvhWV_(r|hC?(mNCo#6`d?ctr|Tf-IQ zo5Pjl#bNzSvKw;QE_zILU7WA1=N5){mFI_dljntZm#+!$AK!syrpUuRJMSO`Z_mPcF)3)%BQaT%50==f;L>%45Q{|-@~Wr@YnF^I{Z02 zM2A0y&(Pu4@K7Co7apd=Z^FZM_*M8!9exo$ONXC@&(`55;d6BOQFw$7w}j8t;rrq9 zbog#~qz>N>pRdC=!=rTgdiVkzz8W5_!hMJ(G@i<`7XlaS@VW399X=DjM2Am> z$LjEj@TEF@EIdw!kAyGN;U-#u9#d^d2U#{=hwH)-Gjw=o_zE4~9-gVgTfM;N0_UH1a^8ZJ+`&vENe|i4@RQ_Lb`}qZW$p4Sp?)-ms z{)e#Li}aBHp>22mUp4F}&@{#QRd zyhMjX!}&{PNcbKdo)*quT7$y(>Tp0fe<_|4zF&tYh4Ytg|L_VO9v{wM>c@s3)Zx+L zl{!2!{E!Y053kZ;-|%W3_71PnVbAbd9d-||(_z=}dL4ESZ_r`K@J1cB4{y?8+wj9W zY#n|?hpobo>aa!lF&#D!Kd!^3;U{$1IQ*mz8-}0KVT16~I; z8-8AgwZkvyux5C(4y)%QIG(H)hcD@{YWQUxRtdkN!@a_<>Tr+nYdYL5{JIV+hu_fQ z&fzz8STX#T4l9J;)?xYZJ31^EepiQO!td#@RQP=zmI!}PILyUU%Cw6{Op{MMEUjJZ+`yD zoR3iX_1+(TetOPFuKarMFF!ve=ObEvz4wowpOEvBF2COU*UyjRx9=Cg*j$!#kIDJS zm|yRe^z);0K4RwAd!_yS$efR)`So5|KYvcnN7(#&ZwEg=Jm({Ce!aJ&pC6L*5jnr! z+sV%l%K1p0U+-1&^QYu|1kbPccJcH5b3U@?*L%DA`D1fF;^)_UyZiYgbG{_VulM%! z^L=x^gvhV=_V)8VbH3!rulM%x^IdbkM9HuB_Vx1}bH1d>ulM%z^KDuGyIbe78tzs( zUoz#_d$s(0^PDfS^6R}ie!g+emt^_%-u`~RLC%+O`Ssp`e!gDLmwfs4-a&r8ZqAp8 z`Ssqxe!gbTmz3mvE<41}SIhYlG{4?!*6?49X&ad}c`uXxXUvlTydx!b?GC5zO=hu5}{CtU=FX_4Sb6Gn-zpdC7Kz_Z~ z!O#C$Y|9|O-s|M&e=WAfkYDe0@$)|x+mgty_qzG{?}}|<lMac+hZ+d|5(_fGKh3_V0p6hqR9ZicFp+zeqSyBXSuxTsB8cB*?ru`RItdT*eg zUsG($EWh3x?B`b&+hWVF_fGfo%ZqKv<=1;>`1xhUw(#=nys-hu-KMme!X|TpPyH3i!;C8yTH%SF196_ zU+-P$=dUcbg_>XQUF_$l7u#~pulFwT^HYj#(dO5Cm-_h$Ea}`uS$3IwTz({o>tihI zem_5^*p_jAy*JU%k1n>woL}!v_VXi)ZAs_XdsF@VImNcH^Xt87etvkdE${q#Z-$>A zQf!Mnzuuea=LZ$rQqQmVX8HM3@*`RN?>{NauJ-f&i*4EG*L!pP{ISKh`19+%xqkl0 zVlN5g*L&Cc`M$+oLddW87Wny|#a?pAulE-D`L6kqHU9lOXW8|BzGMC+jX2*v%Wm}Z zZSx~>oNt|FH~INi#a=SWulH{8^UaIB#FAg{-R9>T7kf!2zuvpU&o?Oc5>9@-cbA{9 zSL`L9{CaPRpRZf&C8GR#?;by2v)D^Y^#3fo*Uwig_7YTny?4K#uTtzKtNeOzg`eM} z*h^gb_1=SizH+gb#PaLChx~lSVlScP*L$n|eEDK8x#ib;YyEtg{E{txf2{Y``}q=C zw!zJx+*0_{RSSzG%V&nak?AR^zLke1@$!Q_H2l4MMtG}C z&q?)ze0mc9)8x~_Kgol`Kg;x_RKLgrllalf1Nf2aG1aN*fM1QCr)Q=5Lq0i)U#(0J zOZAt0ViK2z+&}z}d_s7ee0=y{`M7X?|A?NKDo4I#$0V`6Bp)3vDIXOsCDRjAm6nf4 zV*5%yoLfeZsrnIn?mpaN?p|z*xqEO+y1Nmp?k?QI?oMpGxjST8C3ibw+ubJ1c5xrZ zww}9XmhI|3l(yh*mSwxUo3L%@Zj@zvx(`V|2-=UHKRCR%d{DTGd|-GVdH--#xlVXr zxmLKETqDc&(_^arXq)bR`6KRq*j9D#&HqXFp0s86?))pbcV*kwy-WHp=3iaUr|!ey z0rF09{y=%ha6Ng4@Ii9faDBOS_+YtYxPh!|etQ+=AISfPdOj6v_C{{{L1QHU|FPgh~P4`eY{iB7OHGfMt{iKzf*L)9i(_dP<*%N5vrr)%6^8;w-rvJ2e z(|{`cG#!{ilnY{?pY>|LNwY|8#fLe|os-KRw;_pI&bIPj5HF1{Z9PXz79O0(_9O!%hDg z>Zbn;bJKr@yXik?y6Hb>x#>S=yXimYxamJ5-1MJw-F>s{JU9Jlq`P;Po$sbUjdJ(Q zvJ2ewtI_W6S$3hD{&kVNYnEN?rk{;*vj)7xO@ABf?wDnly6Jc0-0j&TaMS;a+-=z- zaMKSbxLar0L^u6$lDk!wO?J~Sr?^{W*;F_E^Ky6dESu)0pH6o-&9WJ8`s)?$##uJg zO~1X;-7w2$x#_=Gxf`%Y;HDqXcGu6cId1y%HST&@HrGwRp65Ow%dU0PzvsK_X4wKa z{d}Rjc9t!2)8DUi*JO{tO~1dvU7bAwH~oLHyBd20ZsviT-Bq*f7B}<3t?nvWcAJ}d z;db|4S$2n;`Qc9Y9$9vmn|b1H_ikCX#LaxM)Ll8t?r}43EOYP79)X+r<34xAEW6*$ zJhI$fAYm(Q{X-OMX1-Q}|EAvg2ODtDPITkU3^S>rC1WozBcH|yLSuXi)= zY;ZH@98&Pc!sF#Urz*BCo3;Hp{pTkAxhumD%QM4|$XA3Pm1l$>lc$Ftm#2lFkkh{e zhfm5=7iV=DS$su%SoR>Y)f+N>G1Z$geKFNrGJP@C+cJGI)jKkMG1a>= zeKFO0GJP@C`!ap;13jjqFK%(u2S0Sv_dasd=RS7R*FJI6$3At_w?1>zr&4{c=jlsd z_<8zJsxS3CeJ9mdGJPi1*D`%2)i*MIB-OVveIwO(GJPV|_cFPVYO72Rr20YTzEAa| z%zd8fCz<;?)z32bajIWr?%Pzq%G{^@^q7kK?{_!%-yd%7zdzmFe}B2T|NeG!|NZ0U z{@do}{`=Rh`_KLzb^qnu+gN92&CUI{yPNxO4>$MU zo^I~Hz1)4XY;QOBV-5-IaX-H}`W*cW3qm z+@07LaCc;1z|H+$*WI4|05|vl0dD5f1Ks37JvVddL2mM)zMHx9U^jWuz|CBGh@1Rq z=w>c$NZ z$*Z>Razw71{A%xJF6rPV&pNtGu`l4}xU-vV>Eb5uy1I#*-wOHDt?>LyJf|wQUw;Vy z9}Cr8f388ehkS6jr(8eWOFk&vTdo)GBOe&6`@M-d&;nU?m!b9ZW!)M69g@?+&hKI?&gon#N zhtHIM3ZEta7(QG6A$*R!mHh%erlK#UI#rjg@UKyi}$yr5Y#Gmr`9OlgmYVOhpc- z8n5Tc-Bc4~ayHdOnOsdZNhU{AO_s^cR8wSfGSyW1kR+B2a)a!W`GD{&dH?WLa^3LNa-HyOxpsJtTq}HyTr)gZt`VLmS7*OKkE!-c2ZiVH zg2MUvdaM@b7s~sF7s*w_*U9^Yua~QYZ;W(E8O&h2i)|72i^38m2UdMLvH%PDmVRLwVT&3*0|{pYu&tlvCd7uSnuZb ziw$o2$3{1=Uu<&IPabyj`o$w|`pctkUcY$EO}}~E&FdFWxamJny0cvNl>6T-d)iGu zdd5vZde%)pdd^KhdfrVxdcjRU+U%wuz38SNz2v4Jz3iqRz2c@Hz3QePz2>GLz3!$T zz2T-Gz3DzR%ieO+kKT6EkKS?9kKT3DkKS|BkKT9Fk3MkIkG8n!M<2TBM<2QAM<2WC zN1wRqN1wXsN1wUrN1watM_;(xW!abRw!BZkO@I2@-I}+dy6IQnx?Ax+0XO~Ydv^;) z0yq8a2X}Ku3OD`jCwEgu5;y(s7k6Vu8aMs#H+MruA~*f;4>$erPdEMWFE{=0Z#Vt$ zA2=G8 zf92iuza8E5zY1>p-%f7&Uqv_luacYox3io6w~L$pSJ_Sf+tp3~+s#e?+ucq7+rv%& z+tW?|yR-27*sJjT>cTH$^Y&4_D9?7=q2s;v=h}v=$Zf*=$b2>+RaN=0@V;`Za5cGQ zct5#CxVn63xQ5(3TvKiqt|d1O*Or@v>&T76b>&9k{pE&y)0sH`NeyIMCmkZ&I;o*dUrNq{z2OHh=gH7)0S@y7-{C~tv{y*v_{~vRc|Bt)L z|0mq!|C4U=|0y^5|FoO@f5uJzKkFv{pL3J{&%4S07u@9kW;gl&qMQ6bui%#ok9QWb zzu26(5mSzasA&epRj-eofvd{JLBv{D!=D_)U4Q@LTep;kV^I!tcnthu@WV z3%@7t8h&4{9R5JwCA>x6neQObV=9}|KhpCyr++Nloc@VybNZ*U&FP=XHm83s+noM| zY;*dTGP#`UE16tQ^|eecr}{=Fms5Q!lgp{TlgZ^&-^=83s;x4)oazUeTu$|)OfIMT zNhX(5{VbEqseX~kzhrVb)!#C?oa!H$ zTu!x3CYMwFE0fFrRcVgMmh# zWO8{2J*Mi%eu29Wmz=v7`vvYET%zu7>=(GZuwUTr#3k?Uz%Ai!m;Ob#b@Y6j@UHS< z;oanx;oap!!+Xfh!h6b1!h6Y${&P|Oi2SLd=TnhC`?$%Us&4XUUpM(v%}xI7=O%xu zyUCv#Zt|z5oBXNeCVy(X$)7rI@~5tw{Mp}4{v6;Ye-3n$KlR+qr3bmmqxx><(u3XP zQv)}1=^<|Ns-c^?w2_@+n!1@wo4Lui=5FTFL*3+E3paCVOE>w~%1!*gl^y1U80 z9&YllrpD%Ee@1xy~`Fw$!yuZlZkdd(9iwlnz6|(>H7}a>u zBQ*p*f1p42UHB6D+wfTVoA9Ob*Wq#USK-U#FT+Lh7vb^p=iv$RXW@zRr{PKRC*jHR z$KfgRN8zdRhvCcRExaXJkEzIoRMYjm%|$b0av{|fvdu*^WpW|am9ou6vt)81)m1XN zkm_ogTu3!rwz+7IOfIClMz*Xo-amvld?dJYl;9De0yp>H zdN=pq1~>QLMmP80CifQh3*6j)kGQ%29(8m7J?7^Ad)&?a_k^4K?@2fJ-&1bxzo*^Y zf6utN|DJVo|2^mC{(Iid{r7^K`){+G`|m|J_uory?!TAa^rcta+>fui=}WJrbS8neAuibTMd2a6iZ{6Jg-?_Q}zjt&0 zZ*^B^zp&sR3Xk^|?)_r>Mg4Lq5waik=lX_!lKX^zmV1YPk$Z)Im3xMNlY4}Jm%E4m zkh_Kdl)HxilDmZemOF?4kvoOA$sNQ0${oV_@4IZzcNUi6`Bb)FR6@_&eo;x;_KQl% zwqI0QX1$fFjBNWwWo6qhDkrmkO0|RhZxZ_k^0x4f@;~7U^55Z|d54Bs=BhxMf=O- za;gJln~M&V`=qms6mqZdL2{39eYsosV7W`Uf!ry4h}z zO=X*dn#nc?HJ5D;I#jkfsD*5EP)oTk{}if~TqlW`xuZu0hGH+eh8P2OJOCU3{O$=gfaF|O&(8l_vWL(Zt{7uyC)wdc9Yjr-QD?Ift&oE=I+Wzncd|140mTf3hgG}XSzG` zQEE4NKg->o?;~)N|5v-o|JiQxe~z2{zs61e&vldk^W5bBwQll%zMK4C;I7O22;AiV zA~*Seotyl>-cA1B;3ofXbXRA;xZuTw$61B!-<-=I6y^W()a}smP5N`Egm0El4&Nf5 z6uwnHF?^fcKYY7x{2R|;`9Q=fAbMTWgx%`wK zQ`sE+w4S#)_!-&e;Adrds8?w#8Z^||Yza`ro{I+a!@H?{2!SBj82fruV9Q?lAfq%AFQT~DZ zeT$w?#r^)FoBRDEH~0I;ZtnL_+}!VwK1*KY3j zZ`|DP-@3WqzjJfHfA8jg-|A)#{K3ur|D&5Z@FzEU@Uxpa@E14v@T;3S@HaPk@w=Nj z@DDfn@u!XBdfwkP^Q*AeMU>!GkRoBfNxWBs_OJjFemde_PY*Yf2Zx)> z1H*^PF^3hkkYnyDYAMH@Rn$t3xvJXeN>5HjO zm+6bChRF2ARAP(rwnCdKoo$z$|t?&%_jqnxnYvGylE8#2Um%_8;v^eJQDtTs{zgnIVo-I!c&ylBwuaPH* z=gJeq^W^d2YvmE)`SRJ}1@f8Uh4QfQBKeH)b@J)q>*c}W8{~oE8)fFAREuTipj0=> z%sr`YmYH)>-6AvBq`FmRj!AW!%-oXdc9}V)P(}Gi`tO~3KGivVAAy^Ge7E~7zK_67 ze_rYy&i4_x>DSBLL-{@e_ZjR9xQFn41aA8Ia`$O`AAx%?-$&pc#P<=n>Gvz$1Nc4y zH~oK=n|0Z0H}k+6H|w&sZsvn^Zq{Y%-OLLc+^owsx|ts~xmlMz>}H;L#Lc?wQ8)9& zV{X=EkGq*So^Z1+d(zGP@szu7E_>R|Jo1dYcP@L@&3y8lyJs$Y-p#!7f}0_5v%6a^ zd(quBm%Zd>o_X2bIhVcS?v%@3b$85Vueq6bUU#?8WpB8df8KPr&1G-7nTOtXx6Wnn zxS5aMb+^i8@41h?s>uR@%+HS;oC{E-UM1{w(Ka{@lUM{8`@3{JEo> z`LlwX`Ew^X^Jhgj^JgV@S^kbgH}mH%ZsyO*?ozpIS9eMNjzc%|=k9Li&pq7CpL@EQ zKlgGof9~yO{;c9={@lmS{8`n_{JF22`Lmjv`Ex%v^JjH8^Jfh=^Jh&r^JgtL^Ji^0 z^Jg75^JiT*^XL9<=FbD%-?7AUGmqADf5Vc@&3szl{S`|%H}h%(_ZKYr+@JH+2ky^U zBD$Gp8@oSYN$F<3ZR-ArC8(Qux4C-@OIA1YZwvSPEOFh;!>!!!vLtphAGdbD%@W$p zyxi9PCQEKN^K*Om>nzdT%+np+ud<|fGhcUhzsweZn|Zsd`$e`4+|1wI-B+^3;AS50 z>Ar$32{-e3Z})VzFx<@RechL{<>8*n*1UTPBdVKu{z&&EMp`%X{n73TjKFT@{bSul zjLdH4|Kr`~Fk-t|5A=7R#YpaEeQ=U{I3v89^};Fcp^W_QGx&Ugdk9MeH|vQ(?$cOO zxLIGE<{rcn#LaqRhFnNhq+mg40p3Hd#0Q9$ysjpWzTlAUOC6jzU&A$ z>z8xg?8~0#W<4{~&A#mUZq_%W-0aI<;AXuu+ReV~9fkgTVd42zv|ZKbwBrAYTu611 z{v5fG>SCE(NHs<#7gAj!lMAWF%H%?-OJ#B))i{}4NOhS^E~F}w$%RzoWpW|a1esh& zHBlxPQcaS{g;bMeav{|enOvBv$5fknAA$RMmfr4Xc^`rMX_o5lCwU)%`*D``?nika zf%{>$0^A$f4{)#NeFW~cypO=Wn)ea7AL4xk?gx1vfqMn-BXHl(`v~0krXK|FC(!fv zgcrz5!VBfQ!i(fP!q>^Sg|C-y3Ev>!#QO;JnCiwXTkO7`KjL1*_Yt@k@PE>ME#F7r zp3A?2dk)`6;J!Nj7xS;K=PwK2DPJ1COTHw0w|sGUiF{#rseD2B9{K$6GWk5dk3f&9 z$mLY`>3MQF)%`NLoNBpDE~i={lgp_dkjdp#56a|ns+IE4BtB;#lgp`A$>egX)iSx9 zYK=@Tr&=qM%c<7M z{ygC(f1Y%cKTo;IpQqjA&ogfF=UF%T^PHRfdEQO_yx=B(HoM867u_!qS#I*^W%qML zo|}An)%^^S=_apUcRxksy2-CM-A@qNZu0DH_hUr9n|yoM{Rl0?P2Rol-o#d{oBZ42 z-oVzan>_r;y^gJ3H~IL9dktH|Zu0UoH%;(!H~IO6dnH@XZu0ai_XBKIyUEvY+{@Y8 zc9XZ?x$k2u+)e&&buVKp-Ax|<=w8ZJyqkRf*?l)#`EK(1SNEN~6yPSme|O)`O9^iB z{7?6-ycFRk-~V>s%u5+=@_w6pF)xL<$$$Sl8L}IA>kTYZEpJ0ZtgL>H0P#Y?cu(Nm-5{7uf5!(*%xrr&#Jgbu`l4Jzg2aQWM9Bd zzpLgxmwf>@{ja*4{#V0I|EuYy|J8ES|7yGGe|6l$*cWgQWnaKe|2x1<|2xo4|EuSw z{~hF}|J8TX{|-hDlvFK{p7^9Ak&e7?YaEuSxN&*k$4?m2wEz(hTp7OMCFL`RXw>&xAN1hn&E05>%1$s<%8J{n3U&`kT+?VkA0{6vy zzQBDUpD%D9x=kxgj_j!E2z&#@V{f^i3yk3^-1o`Z6f0@_IQk^KD89qto^|Dka z%frH_$h=;b>QwoR@Bo?D%Tf)LPY(~0dA%&vV0m!(G?~}SQk^aj3=fe9bogw!WcVDJ{o7O{^O_Ob1G+nlJ z(F~cskm?HA)XsW$LEg>L%6Z1=icHpfkWxW>IEm(6w4 zFXp*d<+5wt^pE-OmAPzzn|`v;{Xi~TP-xNqkD z0&e=E%4b?(``U%*Yj-Qd28_Y1h`znk1w@_qp~{rD006}(@-O@DsOJ)QRp zxarqVxG(4Z0&e>EQ|>9eU%*X2f5ttD_Y1fu@_qsL1l}*;rr*EdF5>+HZuk;AXyf*FBQ= z3%Hp#-glqN`vu(0A6wkaA0N7zKR$9Ze|+p_{`kbr{PC%q`QtM;^T+3I=8rGj%pYI6 znLoaAGk<*T9?bg%+{_=}x|u(|b2EQ@?`Hnk>aNK91>DRdKe{XMegQZ0$FW@f0`vu(0JKNkD?-y`u{wbfDACJsG zIXCl92{-dkNjLLPDL3;^X*csv88`D!SvT`fIXCmq4sPb3@^0pz9o@`772M1}JGq&E zD!Q3}D!G||c6KxW?BZtrsqAL{+11VbvzvQ3`vUG^>r{d?|!OhRVqnn?9CpSO;&hF{FkHF2( zzpI;{e>XQj|L$&n{yp6M{Cm3j`S)`3^Y88E=ikT8&%dv`h}TQp{QM7hvtM|Go8QNg zZuSe0a`XE++Rc99F>Zce$GX`sJkHJU?|3)+g(tZAefD>=UwERM-|tCo_6tvT^ZP!< z&3<9Kf=?|x-kE;)+aHS#RcU5pBTk_oNAz+XC6*9NM`;`HCSfeO?8^ge4FZY znRzzV5SjV)3_Ye|UK{FWJ{#s{9vkjv{yNjmymgkF`RZ&p^VB(R=BE*E=A~5U>Urj+ zROiXeOQ}Z6%uA`xmzkGRjgpy{Qe7Z3FQpnSGcTpOP-b3Ab&<@xlDcgR{ESY&F z)m5_X*IX?#ucVqS+kVX)nRzAEHL~s3%$1o}Qq7Z@SFY7#D(j2$^}O}P1+w+Ug|hX< zMY8q9>*VSDQ>g1@>x(zY))#M-tuHQ?tuNjrTVK3cw!V0aY<=-o+4|yb@_7C!)a`N+ zk)zy6MgH8W=Tos>zspS?-R)+*zQj#FEp@YAzsF5pEpxM8zt>HE-REY#e!rVMTkdAP zzQRquJ>X`&{-B$@Tj^%K{*aseTjgfGy4p=1u5q(oUF#+v*ST4*u6L7{8{Di{H@eBs zO>Wk!54*|JN8GGeA9a(jkGWZ|KJF%OpK!BYebP<-KIJBVpLUbK&$!9oXWiuQb8hnY zc{lm{f}8x^>?VI-bd$d?xyj#`-Q@2pZu0k4H~IUToBVy&CV$^_vtE76O&-7P zX1)53n|yxP&3g4cH+lWOoAv4kZt{DJoAv64Zu0yicPaJ@+~oTwZq}=xy2<;`+^knW zca#5LxXJ%7-Q@pQZu0+YH~IgKoBaRQP5yu9CjY;8lmA=Y?7zg)%tY6euAD~8?GX+4(}sB6s{^i7~WT25w0fRAKp*CH(Xs_7Oo-R6Rs&Q4cC&F zglo%phwI39h3m?;IN4vmBhDWn-yS|tzAao&zBPQ1d`q~#d~^6<`KE9Kc`=_m&||6_ z(?Q{R+^BHAp&oCD^Nr=}!%gJt!cFBx;b!u}aC3P<_)vL%xP^RexTQQV+)ADsK1{wQ z+*+O!ZX?eQx0SCBx0A2pmiV9hv4fsZ#r@dP&HdQP&HdQf&HdQL&HdQb&HdQT&HdQj z&HdQJ&HdQZ&HdQR&HdQh&HdQN&HdQd&HdQV&HZ?|oBQzy_XhS0+}xi>x!18@;O2fk z#=VC90yp>Xaqd;@7r42fPjIhfzrfA?eWLpT_6ywH? zf2^DP|57*i|2Q}I|7C9O|MmqJ6&?o`?)~~uKDj`)KDkh~KDkJ? zK6#yNee!zQ`s59=^~oFM#r!`#rsBRxm46=JTsVJ|9=T6a-6C_Jq`Fn+K1p?(%zcvT zcA5Jm)g3bTNvb<#?vqq^$=oNY?v}YvQZ13WPf{(FxldBvBXggmS|)R!q`Ft;K1p?- z%zbjx|J)DD^?WMshZSz_hX>r;4-dMzA6B}#A0BdZKdf?dKdg3hKdfk}W#)+auZtxtR^Tc7w$wm$K>Y<=Pj+4{tnvh|6tWa|@O%ho5p zk*!aBD_fuVPPRVry=;AAtIU1zgC0|DW*@=*Jo^alXW2(^Kg~XZ`$_f@+>f)5;C_^S z1oy-2Be*xFe-ZW*^!)npU-H`U-}374Kk`H2ZSsTRf8`b7tOCdPGq0Dn=lK1X(DSMI z{g-s}`!D6@_g~u0@4t+j-+x&*zyES>e*ZhT`Tdu7^ZVb?&F{a0o8SLVZhrq2-TeM5 zx%vI??B@5si<{qnW%mob@4(IVu$%ii-gn@BmiHaFpW%H6ZmyTT+)uGj;O6?N;(mhn z9k{uks=6QJeFtu?uWIf`_&kD}>#e$b6Q4(LbN$tHZ{YI?Zm!4L?sa?~!Oitq*S&`K z7r41z4{)#I^9XLP-+Jzqd>+Bg^<3Zm0G~&2bA2~(FK1uC&Gp{UeINU11ve@@9$&cj zr~S`;k#32``g4|xO=Qc(rn2Q?Gud*nxoo+3sBF2|LbhCNDO)bKk}VexlPwop%a)66 zWXr|2vgKks*>bVH%zcrngKW9fQMO#_BwH?ZmMxdM$d*f8Wy_^*vgJ~Dncq{Y9adZ8h;pX}q>gM_z=H~hv?k1PcbaOqPb(2fyxw(Evy2+*U-CWP3+~m>)Zm#dqZgS~DH`n_`ZgS~jH`o6dH`o6q zZm$2a?xpM_xViqvxw-x?b94O{xw-zwySe@+xVio(y1D)*xw-x)ySe_SxViqPy1D)@ zcQ0lit>9^e$Kwju{@!KMO~51fMXKrgbC%09WXt6%WXt85vgPuXvgPtD*>d?R*>d@6 znfoHuY}s;oj%>MnjcmC*SGHW9CtEIGD_btlmo1kU$jl?D7Rt;YsTRr18>z07nJ-dZ zFF&5d7J$tBkm^R6c_Gze*>dd=1*>d<6c`5%C>Q>ou_%_*c_;%TH_zu}}_)gh! z_%7LU_-@&9c!_K|yi~RvzDKqkUM5=(-zzUB7yjpZyI;?z;(A-|=6YM<=6ZX;&Gq)6 zo9k_*o9pc%H`m)LH`m*0H`m)5H`m)*H`m)bH`m*GH`m(+H`m)nH`m)HH}mnsZsy}h z++2^3x}Reo!OivgxSRR-2{+g4lWykYr`%k>PrILBAHmJ_{H&Y#_&GP%_w#P%;}_gq z@0;D5xaHhj|1Y_@{$F-;{lDT~$}R8a`hU&M_5Zq?>;DZm*Z-StuK%~(T>o#ox&GgA zbN#>T=K6on&GrAjo9q7r_hSBD(1N!V9uF;C`^Ekq3d`XS_2(>yKawqnKb9?rKankm zKb0+qKa(woKbI|szmP45zmzS9zmhG7zm_eBzmYA6zm+YAzmqM8zn3kCx5}2oKggED zKgyQFKgpKEKg(RVseX|yhkup1E-RHye}YGT-oNYlRQ$aEaP#y2)6LKOFE>B$zuo-2 z|8ev4-sa}#{jXa;@14?rIgk9jb8dd#CEWbHOS<`amvZy-F74*$UB=DNyR4fWD(5DL zc5pw>K7#u>_7U9tek!=hp`G0PzAC!Op-OIke>=O$ph>BMf=F{KH;LO za=cHtXkR(rCtOrb=6!R8D%ww;6|OE{8LlDE4A+#e2-lKlglo&w!*%3o;kxqW;r->Q z;REC;;REH#;d=6<@Ims#aD90~_+WW_xPe@h%MQ^aNBRHceR8Q9>Un#gawD1d$)##6 z+xwK8$h=Q3Ra4pCr`$~DeR8Rq%l1CyLuKA4m#T$4B-~OyJ={v>eR8P|lX;(9s@Af- zPq~fE`{Yu!m3g0BJ3Xf2eR8SV>v`T6m#Txz``}V_lzHDAfpX!*zRG4c=LW96;9A3=|)zE20dFG0_L7d}D$Hr!wSCVZm&b@(LttMJM4 zm*G?7FT$tFpN9v?pM?j?pN0p?pM(d?ABRtqKMJ2Ne;6JjZ~4zf`6KdgsGd*7`;>>d z$;07p-lu$~n|wUW&HI$kc9WOqxOt!Q2sin8uABEMpXVk|N4j~R^7(G^b(EX;DPQ0w zZ%4a%pYnxn^7kS)`FpXO{2k*ae=l*9zhm9x@1<_?cbuF2z06Ji7P-ma@ow^Wf}8xE z=q7(Bxyj$jZt{1EoBW;XCVww?lfTp412=iR&|QJ=HE@&9*SX8{y#{Xb z`UZD7zSqD_elK>H;d>3-B-Q@oxZu0+8H~IgVoBV&= zP5wXO-onUJ@RNnd`GxG?ynQq%$}{HT>?!@Zn2WQg<(P}JXXKcRvuEX)i?iqCn2WRL z<(P}J7i4mJvmR6N`H>ghd|u=wH=hrA+0ExcUUBpJk5}D%-s3elpYM3x&F4AZaFdIv z-qiCpm%b&Fi>cn0Z7zLBCKpq^E8AT9o=h&LdSAA=^aGh(OtnR}x%5MsTuk+mY;)9S@Q?Ct;h*GP!#~TF!@tP8gnyNH=KBuxn2NrT>UTX)A4v6wOzx-p zQzqwA{Uwv@ss5J9@l^lF+UP89HyrgV%c`4cE^3t-+ z%gf3(mzR@mF5f}6xxBn=bNP<4&E*wjo6C2SZ7#1Ulgk(Ce|IYG|DE-GDn389 zi<>;C?B?@hySmAT-Q0YBYP@~4%X{5i}`{!xc$!;4bbDV|ITNFu6!f=1$s<%eViYu=NE;~mluRb z$=8N2kmrU+%X7jP%2$Ukl4pf4mahztk!OZ4k*^4km1l%6m8XZt$dHSIG3K zR5NAzRH`dw`c$e}GJPu5RWf}l)zvb6D%EV6K9y>YOrJ`1jZB|PHCLujrJ5(xr&3)j z)2HU^G1Vb)eu18E5MC%B99|^X4__xA6uw@r7rsG0FnptYKzOmdfA}W3Zun-oPWTqN zcKBAgR`@o#X83lwM)(f7I^S!c$5fT#{9Sr}r|{kKj^QQp4&kM8+3-Db>F_eSWcXe= z7rsxnIr)Cs=H%tF&B-fdo0A`qZBBkrwmEsFY;*EMvdzh>WSf&$%Qh#kk!?<1E8CpB zPPRFDy=-&x2HEE1jWT_rP(}I2d-+}iJ)esFf5d$c-)rC|{~vQN;d>3-z)k)?O}}`<&HnY9Zu-YtZuYO=cGFMZakGE@uABbyo}2ya_ucfH58Uiu zZ*kA!dkx(5pO4%#`CbDz{pS<+48GUEP5=4KJ&k<Mn@y6HbZy3gi&5Z&~jpWSEjJ&11l&#&%b>SmtkMP zU7CFXcPaJ-+$Gr;aF<|Tz@1}Xz@6o?>Tdd94LAL-rknm(%T52Q?WX_Lant|my6J!W zyXk)ixaoffy6J!Q-1NVL-1NWtZu;NBZu(yXH~sGrH~nu@;eKdXcz(X!4mvzn=gGxX zjr8Zp#Z--DaxqmCnOscOR3;ZwHIvE3RLx~_G1Z|mxtOYjOfIHsDU*w-TFK;Ms>5V* zF;#1sTujwQCKpq+mB)wM$>d^tJ*FZTQ+3euMWCssk+GIVydn( zxtOY(OfIJCE|ZI?ddTErs-7~rn5vgdE~e@&lZ&bP$mC+GzVg6uKbc%ST#u>R#rY%j zeB1Dma+~l`a_jKX@?qg)&U_cd2%q-Aer1tHCQI+Qk^D~YpG6`$+1*JWO6Ij88SJQYN%{; z`7qh$^5L@0E!$jvp-e6nn@h=`i}ide@@I^j{JF$U{)}~#KbN}6pK)&T=Q20>Q{*Op#=FU% z32yRdqMQ7gn=V~|kGuuu6%yE-H*SN`_xo+}jo}2u+)=mD*cauL0+~m(fH~F*3P5xZxCV#GX zlRr1O$)6kDRmC42A=VWp* z`FWXKOnyNo7n3*3V!f(qLhu@Je48JR15PnZSKm5LY9{UA){D0nGyG75F!>K-$$>CHV$>eaV zk7aT=)h9AJoa$4V98UF_Ob(~|TqcK8eIb*>slJrS;Z$GARXu{ zPW7Ek4u7x5RQ=-oRz2T0{Da&l{G;4E{FB@({IlFM{EOTp{HxqO{F~e@{JY#W{D<5n z{HNSG{FmG*{I}dO{EyrryiIP;et{lSk;DJh&K!}usd6$oo2rCNuBIv}lcT9h$>e6L z(lR-js*Fr7rYbAj99T}aIdBKr=D_l@&4D}0HV0OaZ4TT?wmGn(Y;#~G+2+8VWt#(c zk!=pFEZZEot88=NZnDjRyUR8Q?jdjGpY2tYe<1Jn((|dvyS?4yT@^QZw~w2=tLi52 z_H~nY)!gLWes1!vx|_VK;U@2Dy2-m*Zt||So4l*z9?$y-+~nQ=I! z-_CCGw~Koq?;~)NzunyAZ+AEO+rv%%_H>iKz1-w)Z#Vhd$4&nBb(6pS+~n`!Zu0jC zH~D*{oBTb>P5vJ3CV!7{ci`_$bd$fwxyj$--4%1$32ySZzq>*%JJC%(pX4r|%T9Ka z*QdD4<+4-VX{Jx>m$ zDw4^eRO4lGDAfd+97;7&CWlf@lF6Y|lVx%!)fAZ=N;Op`hf-ZGlS8Sd$>dO~=`uN# zYKBY>rMf~Uhf>Xy$)PLtnCjFxKTFS_623}4IefKzQh2s}Vt9_+KYWdRLU^uxe0ZLG zT=-h~*zkP$nD7Gm=2@J7k*!@04v0yi2w@@NU`Wz$LQHflFnZ1MiV- z4qPVN9C)v6bKrfl&4KsJHU}=3Z4O)^+Z_0SY;)j)@>c%YUPbu_=Aei4d@AOkRc`WN zwVOF;jhlQ}>t+sG=O!=KyP1PFxXF)=Zswp(Zt~<|H*?S?xZt~}8H~I67oBVm!P5wORCV!rHlRq!G$)C+`^5;c2`SX&S{CU|;{=DKQ ze_nNyKd-sTpV!^w&l_&?=S?^H^Ol?ZdD~6?yyGT+-gT2d@43mJ_ub^r2X69bi<|uU z&`th)?_Zt~}IH*?SzZu00$H*?TeZu04CH*?T8Zu06|H*?T; zZu0AUH*?TdH+lAhn>px5H~IFHn>pxbH+lDqn>pxLH~IIQoBaFTP5%AiCjb6)lYf7? z$-lqd_YbrL_f|f4Rrv2KQ+R$; z;lFP4_SR68r}c%h`g7J7%E{Ijc95+vl$Wh9>?m7bs32Qk*h#j&P*JwNP)WAFu(ND^ zVHer@LS@NJc}}>AJUiS}zB=4Y zzKWNE^_c21UMhBvODA|qS&hmNTF7mnIu5!C@H@R)NyWA$+Lv9`JDIXT@CASLqmRpAV$SuNs zxwoftT#u*YlE|`#N6o zb1&j0LH9ylGITHCB}MmqUUGC_%Ui|V^N48oTq4|k4H562Lkn=vrbW1~riHk#qQ$r` zqXoIg(W2a!(!$(hX>smLXo2oAv`F{Gv{3g&d~c!qLOw#{9?eH=+!tioP3}>AM8|zT zAK`J2RE=Yh>^PuA3<`r<|9h(!}ti3yA>aCa<}9oQ0^9d zM9O_A-$&qX&WPe}#t7qX%827`!U*JU%!uS}#0ceX$cW`8{~vOb|Et{O|7th+zs61e zuXU6E>)ho3dN=vM!A<^ebd&#^+~ohmZu0*TH~IgloBV&wP5wXbCiI6C{6yjL{{Yd@ BL5ctX diff --git a/resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd b/resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd deleted file mode 100644 index a4e3e444d0d0d25ba351a6868a75f94e5c72dc1c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 97456 zcmZtP3Ak0``^NFLkA%!KnL`pXha`yxN#=Q~G>?QN$$BEDI_tgPXT59heJW*HwxxdUpkMoy=(niSe0SeCSnz?p z!}(st@Mb>0N4T85TX=JOmvDJ|r|=f`4q^YfvhBiK+1rFG*cHNC+gpaWvCD_IwabOK zvrEF;+gaFuC)vhAwxhSHHspT`xpwmJ>%u$Re}#9k*M@hs{|N79uL|#OuL$p9FAwi& z{~E4nFAMKw{}kTaUK-xV{yw~~y(GM!y||F=?`^8D^b4X?OEX?>>1%|_O$Sk_LM?)l((tgit|VN z*c;(v?AOA_+OLF%y1X9mAK|?ZcPbZNpdCt;1K^EyGvY&BIsQ&BE8%O~Os=tHVw0E5ps~ z%fr{&mxi0$7l&Ke7lvEf=Z9O_=Z0I`jl*s1M&Y)0!*DyhLAbqLFWkXCBizxh6YgZ! z4qs>23SV#640pC`gm17<4Bu#158q@T7w%#o6TaC#D%{ns7QV$kJlxH$8ot%867FtS z4&P=U6z*YH3g2$;A1<}`4c}q!9qwsY4EM742=}&k3-_^i3HP;k3g2n(5bkGh7w&Iw z6CPk!2oJQk4Bus!58rK<3lFkO!uQx&c(A$Azip~}z1dKNHbcC-E_|PN{|XQF?%MGE z-u)vy%)6_?4|sP)c(`|$hadFrui+8iT^4@GyFZ0TdUt8~Veft)9_8I7;YYl?I6T_B zUxy#{?w8>)-u*oMn0G%7kM-`y;m5uEVR)Q(-w!|G-FL&|y}Kwp!Mh8>6TLexJjuIr z!cTg4R`@CJ&InKT?zHgJ-klPD#=CEYpY`q=;pe>jTKIYIz7l@HyDx=b^lpA|)3N+i z{x8|&m%YjVqnn)ntMWhOCco-U{y(+J`F}e9OW5Ssy~+R5HaY*J=6}hX{H8bgH)WIa z|F!&^xXEvOGkVj$f1l#t@{7bfSvJ+bVq z)4qStzcoX`v%NbwoPR3^h39(ru5kXX9}u4J-G1TxQt2CB=-uAo{L<k;SQ^KSQWe(81#f8gD&;rvqX68^}$H-67;cvX#B)r(WSBJm#?v>#s-n~5hop&z{fA8Ik!#{ZU!the> zo*(|vyXS^~@^0hs&)#hmUgq6~;a|MlApEO$>xF;w?iu0b-mMe<-Mh8JE4*7PywbZh z!>hboBfQ$XCx-v)m6*e|q<*@L%4o7XI72hlkgBw`zF3cdLXqc(-!+ zAMYL%-ss&*;eWlme>k5C_YD^c+TS~zPn{LRoB8}6;e4vyExftU?-I_Z=AFV@`1}rK zrf|*waegbG-zJ<-{T0Gn`}~&Syj3V4-qz>Kh4a>;B)q-PtBRXV(0a#?JfQUsv@H30 z$Id*j^$xT+`Fh8$Jg@Z*v_$!O$L>6@^$xU9`Fh8mJg@Z*v|Ram$6h?I^$xUX`Fh7b zJg@Z*v~>D^EXnrc`Hi~&r#BR`1L$=Hwv73D$ALV*w!juMU+*}W=T{ZjlIH6jhw%LJ z0$bR8z2i`xUshnto3D2q#`8-HY?1Tzjw5(}Nr5eOzTR;p&ws7^fBMTpb~OEYfh~K! z-f=9?e_UXTpRacu&-3pWcuSD4cbvfUi*)}_FDPUu(enzt<;d4NPUiVp1>U0M>m8@? z{ImjZY4Y`sQ+fU^-T%{X6tdIk*9yF4%GWzi=lPckyv54bJL>ZMa|Pa#En>djaW>D7De#suU+*}F=SS)OpB`Dr&Z9>Zc*~lv zcU-{p!wS5`&DT3F;`t#3-V*2Q9hdO@paO59^YxC)cz!^Ex7_)9#}z!^x4>KUe7)l; zp6^-UEq%V;aShM+C~yfNU+-wj^W6$uGRW6EuI2eI1uik<>m4n4zH@;~68U;ZE1vII z;1Wi@-qD8V+ZMRwk*{~O>5>me2aSP8kEO5ywU+=h;=j#=?M3t|1 z+{W{D3YoseQ5x*G)3pj-0*k6y$nKzP6u4xTuXps~`RWBOvE}O>eR%$u0+;0S^^QAv zzFL7xc=>uqf1a;e;F4dy-Z7BpD;Ky#n6G!-&GVHCTvE(D+12Qsd$psO~_O9k}R7{FV3>3>96xCDX#x7v+P-(|GdnSeSWj{JkNhz zW{E#v?|6~t-!Jn>AivpsndcXkd4!O!cf88;^U6GO$k#hw=lNM>9#Q1$9dGjdG%ab+ zQ?l%B`mMYri2wgLbl=bOua$XZlCO76=lPe)JYvb$J7)6yb7daMzR< z$6TJDSmu#WzTPpP=f{mBd#{HVMoi~s+TS@s^!k0|rVDqrvT zfaiymdBl~kcYMV2L&`i7%hx+T;rT&%OBw(DyRz&vo*z)=kz2mr@deNKE%S&jU+?&e z=X+{Nk1ox!Z|EL*OC10G?pgLN&vz^H$S_~;_>SkhlzGIMuXp^w^PS5)lFZjTe&qR% zWgcPX>m5JyeA_aQJoELAUwFP{nMb7gddF`(->l3d)qK6nx%p=%*y<;WMUtZ>s zZNA>In&&Sr^N2TJ?^wh0=a+dToUeEM$@7iNJVMUbJO1YRhGiZ(=j$EodA^>OsA;Wt z{6p8tA8E(;w|18O%k#B#sw7T(y`wbDlq@%p>`Hy<-cW zua@7^#y_w1j;(mUYMDp=`Fh9JJYPA#WsZMd>mA$je5EokDbW0%W!v-ozWFVA{PTNf z*^WG4vCK;r^7W3Ld49M2mOuV^t#|Co^E;J!NkqQhu{+OimoF*AKd<$UJ$b%@9=Xt3 z@7RkjU*;tm`Fh7bJgHhwAHx3_Q zpB=7bHwqtUpA|mHZWun;J~Ld|ZV*1it{<*q*9#wN*9}*-&j=r8pB_Hkt`k1OJ}q3$ zu3cyQBfU-k|NF&X%27VPPxxqiukbPUp5bHd-NVP(yM~XqcMey#cMP9kZy!F^EGYtl&Ma!w+h#?@l5Shdy6<<+g8t+>NI=va2*@Z+)lSQi}PpL z>Pb`8wF}{TwtCe1`L_5~`9l4s{RTe1sCZnPGwpn#f77vs_JU3O{{1XFe^9XLSR;Gh zrhWf@ww*s{*mSJ1J!jLte?Q00A5?5QcCI~Z)4qQ{&(0roY&v$nJ!8|pf4{)aACzo5 zcA-6O)4qSd$etR$*q)MQmw21%nzW?^`o+M3f+U1eXE zq<^)2Y4{rZl5i7Sb6%>Z_C-m0ykK7#zSh1V+}zfjn5u<+UXmVP*ym_Ug4hhp5b2h?&03{uHioR&f&iHj#+l6x2ZJy^rN@Uvi|hedRY*? zRhA8;x6ra0y?K`1O>d@`9nld-rTHI0`9GNFwPtWHt@&UGy>}tIkJkJ!l-8QU{j}zb zVYD6yJV0yy7*1==;XzvS$p~5xBp#wQzl@}{Ch;(>`DPTY2H_D}^Ur8n^UtHS=ASXN z=AXxC%|Byl%|DORnt#U8ntz_4HUEsKHUCVYHUCVcHUCVaHUB(GYyNqP*8DS>*8KA{ zt@-B}TJz7dwC11ZXw5&*)0%%?pf&%zNNfIiiPrq{GOhXN6mKquTJz)gwC*W?pfz7ErFD<_Bdz)KCtCNMKhv5|m(jWh{e{;2`YWw_(%)#! zx6A3Hvg~(S^Y03}T9&P(H6O2{56`mIwC3kO=&D(^hSq$&madXzf6|)2|Dr2r+26G0 z^L4cDf!EWT-#5^ev_?Q{zTZgiuQdYN^S|cE|4R4F1zP<;39WnR&1m%pNmEdchD^tt^Q*NdONi!wEB^q z=xx-}(CSZip)05bqSdeLMsKN>iB|uz2VGt*7Oj4!B3(``8Lj=jX?<_^q1Er~OY0kb zPx1S|U-9^R#c%$Rn~s;}JV*avvG(`ZH4YzOpB=7bHwqtUpA|mHZWun;J~Ld|ZV*1i zt{<*q*9#wN*9}*-&j=r8pB_Hkt`k1OJ}q3$u3gBE^fndO1$>jE~{1#GSh*jyK|xh`OHUBKqLfX#IQo9hBL*9B~@3)oy2u(>W^ zb6vppxd zYhLK&Z7R(Rsq*9U^~Lkod8>IMRcBlCM5-HX%@e6^v^7tpy2;i&k*bTWc_P)#w&sac zU2V-1scx|~Po(N*Yo17TtF3t=Rd?IJigla!(F63bBM!6M@wGZrs$W_x(`yHKT=PLs zTJu3KTJu3~TJu34TJu3)TJyo3wC00;wC02UwB~~WwC00>wC01mXw3(A)0z(k(V7qL zp*0^2rZpejOKY8A2(9_yK3eMpLut(y_tRP@7)ERUc!1VA!Ejph$%C}k2}aPGUml{h zPB4+XT^Ve+pM6Cjwy>3pskXG0 zi>bD;R6E$pg;YD*%7s)r*~*1f zJKM^IRJ+*9g;cxR%7s+B*~*1fyW7fzRD0OUg;aam%7s)FZRJ9$y=>(|s=aOHLaKdi zVoLsNz5SG`_}pa7^)d{;yFU97`(? zj-!a_CU1X_7;BCR|)iB=xepp^$F)5?RIwDRB-T6s{5Rvw&6D-UYZ%7fGB zYqZotD-TYml?P|g%7ePJ@}M5AJg84A4;s+QgEML6K|@-3a2BmRXhbUy&Zd9<4k$pH?1RKr0U}q?HF3(aM91Y30EswDRCmT6u68tvtA#RvuhID-W)u zl?PYR%7d$E<-s+y@}LQ=JZMU5{5PXD{;#Dq{+rVp|1D^Z|CY4Ie=Az!zcsD#--g!s zZ%b?Zx1%-w+tb?bKx_PWq&2oX(Hj5P(Hi7O7kzzkJ0&0ZdbGbO7fN#;8Nt!nUso-B zgMCE!M*HyaP4;2oE_T)M&Gw<;u6C91E%qVdZg%DHt@gp;?)E|9+w23wJ?u*1+wB9w zrS|?>FYq=M)>C`>IM!2p*;r5QZDT#PkB#-zzBblV@3gU=+Rw&%YJVH+sRL}Rrw+8S zo_d#!_0+pM$GYsSns# zPaSS+qIxjjdS9hnPBp^EmCLCfvX#rJM%v2dR1e$AK6Ye3i4dkyH*^*RFjwDcd=x`B_Ms`Ub%KPApT@8c(jU$9RKzi6Kje#t&Q z{IY#)_!ax;@T>Nb;n(aV!mrzhh2O9b4Zmq05`N1*IQ+JKV0emsz<(~y_mqFrd_0x% zZ#u2~n?Wo8X41;PS+w$RHm&@dLo5I0(#pSiwDNC0t^8X+EB_YK%D+Xl^6wp5`S&iZ z{Ckg9{=H9Y{qO@?dH5l%^}~;7<>SY+)(=0Sm6xB=T0i`ZR(^g?YyI#GT6y{l zWuY}6>_XR6i$iOE*p05CmWbATu?KykS}0ocM@722S}t1i$=>vFYSC!TFZ&`5Jch@X7Y^;hOew;Zy8m!?o;V!l&9thiltM=`#qt zO@;e{IzEp3g41o>7o1_^zM!s+`+|Bl?hESMxG!j6Htq{9wsBu@iH-Y$OKr_d zm*rdUt8ia%xsT(%;0hb}1y|a*FSyFaeZkc>?hCH5abM8H#(hCk8}|jxY}^-IYvaD4 zxsCgR7B=n+TH3fTXl3KRptY@esg1X(G%vNKH4n9;HSe^iHP3XQHLrA}>uSA#*1T~Y zeVSfJKxY31M3wDRv6TKV@Z zt^9kAR{lLtEB{`gm47eN%DvaUQ@^>DszI;Bd zJYGPnFJDM2pBK^U%ip1u*YDEm%ip7w-|y4v%Riu%=O5DQ%Ri!(?;q3Z%Rix&_n*@0 z%Wo3;Vb5m-esWuk2sKU)#&V-`GEg7u!FDzqNl1 zFR_<~zq5Y`e{X*u{=xn(ywqM2{?Yz6{FA*{e6e z+(uvYyN$kRg^j*wrH#I5m5sh=wT-^$4;y{a8XJAlS{r@QpEmlUzij0~s=saYCF^YT zCF^bUB^zw?CI8syOE%i*Oa8Udm*mfAqc6z{<#m2P{VG<8y>F6QUVEQ#IeYK$=JsCU z@^;1W7WSUuE$uzRTiLsZE7-e*x3+f;Z)5Kg-qzkZyq&#Mczb)t@DBD4nm0<~Sf=r} zlaHs;_}iJ*_}hin_}i7%_}h)v_}iV<_}hcl_}i1#_^U`S)n6(7gZ|2CjlX?pjlX?q zjlcb9jlccr#ab_*HU28m8h;1U8h;1T8h;1V8h@2(jlV-^jlU|i#^0f|#$Q!h@plZZdFfbM?>`%g%+aHIous;f4 zX@3~L%KjjHwf%nh8vDI)6Z>7gzQEg5%7s+Td|bJZ>RMa5kgB<@Tu9ZzRxYG!X)6~} zwX&59sao60g;Z_qOOmv1U@I3=wX>BAsoLAhg;X7EN;Dwkm`C{ z{dlU*w)*c>H`wa8Q{8B*zfN_NT{lUeOJJ*iPIa@bemPZFTjMa*Ew;vCs&2N%VX9kg zjl)#kZH>cJx7iwpse0HNhpBG2H4ami+8T$c?yxluQ}whp4pa5AH4antwlxk@^|3V$ zQ}wkq4pZG}YaEuDhc({%`*C;!Y26=Bpml#dk=A&eMC<X+?-yy^AHPIvyuVD>)_MW0 z@&78V@&6jF@&7ul@&5*`@&6{R@&6XB@&7ih@jr#u_@7E^{7<7b{-@I#|1)Tf|CzML z|14VLe>Sc0->>L7#qB-Cv0vu7%{7IT2-#eJ-PPfF_Eq8e_LboU_7&lU_T}M4_GRIB z>`TM%+Lwgivo8+6Z(kJtz`ij2p?yL4Bm4aD$M$*QPwaE``T}oL;knIcK91)$pWAqD z^M#G)HecF!Zu6Cm=Qdy4cy9BJjpsItZ9KR6*2Z(2B{rVhd}rgi&G$B*+x%eTxy@1= z&uxCR@!aMo8_#Wiw(;C%nT_W*zu0(g^Q(>LHow_;ZnNCRbDQ67ty`|hx87GNms73u zapiKVRkm_D)oNS0oazr-xtwZ^tz1sE)>ba3`qNe}r~1oQE~on2RxYPnXDgRet+$oS zsW#ZkoWZRK*Re{JRRf3=17&<7Q4^g$&y`k>A1`ubI@ayI&)&298S!@xBeM@xCps@xC3c@xDE+@xBAC@xCLi@xBwS z@xC*y@xBYK@xCjq@xB|a@xD8)@xBMG=Ola58vhk(Jtx_VRvzq4>p96jwDMtJTF*)L zqm>u?(|S&F0ImF}MC&=pfwc1EAX?8!4yKham1#XEIfPc;RH2nOhtkTMsz#Cqm?&D(#o5oXywh(wDRT{T6uFUt-LvoR^A*>D{rdP%9|5t<;{t-^5!I3 zc~gT{-keM;Z)(!Yn^S1zO)Xk^b1JR8sZA?yPNS7Kb!g?y>9q3Z3|e_pmsZ}?qm?)H zY4w2(X!U_-(#oTTwEDoaXysEQT7BTzwDPJktv>J^y1pWiuBV8km1pPE>H{yJm2Vf) z>H{yLm3J4@wKc^ReMxcKBw7NeU}@eDz4PC}(o6ky3&WS$^TU_hbHi8Iv%^=~Gs9Qe z)5BNWQ^VKTox)A*4&kPDyKpnRP54^7Rk*p`BHY5hHr&!~8g6ArA6VMjj=rz7jU9bn zX+G|`*V|`>JKJZ5Z?Nl!Z?x-% zZ?aDhcd<_k-)x^6?rNVBzQsN{+|52Ie5-vzxVwFP_%{34a1ZJ3;okPa;Xd|(;lB0(;X7^3i~YP!eS+r2RQ-Ki^J1z2w&ulD18vQVsqV5h zFQ&TN*1VW%kga(!)jhW6#Z-fB&5Nn-wKXrM8e(f+Om&~Fc`?;cTk~S7`)$pOsfO8_ z7gIf8Zyz3RqYr=3-ZsvUu+fJ;9kp0R&R(z=2DOZYka=kW9PkKq^WAHpx%--TbYzYV`^e-nPi{wn;c{YCgS z`?K)t_9x*t?2p24+8>19vfm58ZNC$qVlNC&wdaSY*>l6w?b+cO_RR21dwO`5JvBVr zemgwJeltARemy+Tel>YGx1Y^zTyR%yO{N9zJUo@$ZacR(-H`wr*@dfx#(U++7h=jnY1 z^jy8~fS#lG9niB2*ApmksT53PP_Bdz=5e`)tm zTc(RoTiq8IX!TPiwC;;Hqt#!Pqjg`rIjw%HJgxiUE$DN!EjJd; zzdh($S{Igb|9cXx{VNCd>VHqC)&HJBtN*P_tN*P>tN*P}Z?B(mNN-oj z&ZM{1&p4#FDP(8STkB^W(iIBX+4NTW8He1OmFx}~SpUpJ>$X$gQMm>vdY~X!Ymq zX}wOX1Fe3&BmI+>Jm??w{sMZb-d{kgpYKfTby_#j>hEu)^*XJaX!ZMD=*9ZE1+@DA zuJqS>eJQQ`fo}AdS#~R}`-ATE=UH|et^0)@^r!my1+?xTO6iZY><(J@6Funwae#Jw>k9SQO|KWAb-y%IkVPOlfBb-y)^ zenzhspmqN>p4Phf1X}lF6KSoBPoi~y_9U%!@uz6ruT7@4F8(yF`?qIkt&2ZP>wfMz zTI=G^)4IQVf!4bCi?r_dUZS-w{zMdK>MXI-LT^FgQ*t#xKO|^Ahq?%^yx=1zM)^(9; zhOO%&)l6I0MXFi0u8Y~;rg~cMJD{J^(gQt7?>nF;XsLpJLhn1EAJ@_b{g~c&KtHOb z5c(0lbdG*lODFV0dg&egpq5(b2lUcC`hG3V(D&)3fAqb&F6evGUxZ#l=;L>XKeGph zKezjbzp(ENe`)s#e`WUye{J8Pr6_Mx-JWHO>D#n)Mc=Bo1kkr=sf)f@Zz-T}($W}x zgWi%rU!VTNT2k}zPT`;I4&k5ecHw1qoA57otMIROi|}vuwc+Jr4r8WNkq&5EjqBZ{hrZxW7(Hej2X^p=Pw8q~*w8q~? zTI26uTH}wO6_-uc%I#L^FGE|6#}fKUEd|pWpXKO@T1uuTXsMYVucc^O<9ADXoc=hp z#&ZRFtd_!Qjqh#fFrpDL{BA(E z)T38gdESt2u1B@B^1Tt=OpkVH<$Ytii5>+N^Y5JEas83-(&>#rZJzN9~`>u~_B zzGNV+>+>#JeaYSQ1g#g)-rr`t1r2i*7ZDuR$p=-t?PRzt-j=bTG#t9dbBQC zTG#(@T7Aibw8p~-T7Ahww8qCsT7AjGw8qOQT7AhQw8qb9T7Ai*w8qmIT7Ai5w8qz1 zT7Ajmw8q;wT7AhAw8r0fTJzEbTH|pdt$Aq@t?~IJt$FDwx{cNgXiWl7(;C0e(3+Q? zr8SR2%csG#m5MbQ|;13>)*(OdIpkEL*vd zYPPL$F~{3fm>1^yIOc_UwyyJ3^KHxv3vA2_3vJ8`i)_pb@7VX~SFzr;)i0-d&%QfJ z>jt*^3JEsM7L>I<9@o$sk85d-$3JO}$G>Qe$G>Tf$91&E<9b@-aRaUK_z$h|xRKU){FnB4 ztdRa^wbghm(3+=8XpPU!Xw6gQXpPs+Y0XpRX^r14Xw6ew(i+cO(VC|!&>G)c)0(HY zp*7yOr8Q4&M{E3VPivmqfmR;uNGlI^qLl|b)5?QgXyw7K^Z>0F(EYVuKr0XSpp^%E z(#nI1wDMptT6wTHtvuL=Rvzq2D-ZUgOSN7=Yy2NTYy4NDHU1BzHU1BxHU1B#HU2Bp z8vlpT8vj*jjsHVwjsL2&#{Xfo#{c28#{Utt#(yt#Oj-GF#&$)#bLvNvbPs zjgwSY+8QURuCg^wQeACpoTR$O);LMk#MU@D?SIBYGapZ-@o+7z@z9*scxXXuJhY@W z9$L{F53Om9hc>jvLt9$op&hO9(4N+K=s;^cbfh&NI?);r*U=ge*V7sgooS7S8)%J( z8)=P)n`n)PF7yC>OKFXVuC&I(Ewsi%H(KN2R$AksJFW3>8?EusgVuPsoz{3LrAswL zXkGt3XP3!s} zMCi6Q#yl~?#ys(mjd^0Ejd|i>8}q~{8}q~?Hs*=Zw#LPy-llq5>j?ByT1TKK zX&r%{pmhZL39Td0k82%)eoX5K^rKowpdU$p5UnTp_`~5z_Cw((?FYk8*$;##+xLf` zw(ko+W8bU){{MXcpY!ol`u;yp>-+x#t?&Pfw7&l@(fa{|2q^|C_YF|8LRy{=ZG@`#*)&_kSv_@BcJw1_$KdHAK(7HZm(G&Ih z0$SJ09D2N7UqI{nnMaS)>kDXIPYdX=dVK+{>uV7`Mz1fRb-leykJjr8XkCBr)1&nI z0$SJOhxACjzJS*C`7u30uP>l=y?#m$*IP?yUB92x!}L}ZTG#WJ^iaKZh1T``H9bV@ z0<^C8#q?lBLebwAx0e>L{W6~`h+JIauR|_=XCoKCw~>oK*vQ4DHgfSt8@c$Cja>ZM zMlLS1k&D0B$i-i6RZeF0QaOE>f+ukxQ#==(Z8>^(Z6r7(ZBy=qkrFM574h-{cEFt&wrL3`uD=tI**0ip_ZD?J8 z+tRxJwxf0ZZBOg^+kw{gw#riMT-uA)^|&{!T-t}$^|>#tT-uM;^}0W;TsnZ(^;?NnE*(hgdOnC&E*(tk`mRhX zmkyzIy;q@?ONY|B{;Sfu{tu&d{U1*2`agmmsC5Kd*Z+~UuK%NGUH?bZy8e%$b^RYp z>-s;A*7bipt?R!!t?T~;TG#)Hbg9k@4B8>t%E>Mv5AZL6P1)!0`5km?*;{X(j9ZRGHIHgfoU8##P|jU2wv9;jc% zy2wTjUu+|XFR_usm)gkT%WUNE=fZ6k-Tu}lBwbJdi?O?^C- zuD52iuD5GxU2n~4U2iRDU2iREU2m;uU2m;vU2kn@U2kn^U2pAZU2pAaU2h#|U2h#} zU2mOeU2oUX>W{Cd)gO1Jbv@odt3SSxo}_gIdZN}5XkD*2)9R1A(zzU?@#OcA3*E+A4u!^zl+xOe>bh`e-N$f{~o$jue&LFaB+J=@!Bu*xmFq%sqXdH zA%}<9$l?2J;juPy_;DLKJkHj2dBPUjrqbW{cpp!tzwZgO{=O&D z`um+kzXT7Tb9(fa$IOzZFaX+kzzS~>IztsHul*7xHzS~>JOJxP}!tsHuj*7xfzS~>JKt?%CyS~)b8*7tK7tsI(8 z>-#%{Ru0Xi_5GejkJcqq^z7nxMf%=v+HU$>pSKI?Xz3h(-J9XL_Uqw!_N(Ff_RHY~ z_KV?#_VeLI_OtqVir(fkrG9@}KTmO!^Mlz_`gw}g-^dV+qQBK?GZo+ACYex4%z zn0}rj{iuGPBK?Ry*MNRlpKCxrq|Y^=AJpd>&=2Ty4e0yzxd!xo`dkD0UVW|seb0Y? zKiOg*zdQV`Jutk)?jQcnzBBy2-6#Bm-7CD*zC)jD;BBhg^|=Q0ZTegT`c{3e0ey=; z*MPoRpKCzhq|Y^=Z_wu&(AVp84d_ns=Ud_9`do!nEA0;9Rkl7?A=PTTUHA`MpR15+ zjol`^*4F1Lr25lt75>ZC=PIQ7+inqFXEzV8x33Lvu=TkLss6E>hBw+x^tlG!rqcVW z|Eq1Zr}t5(D%g78bgB|t?~_ionXUIlrz&UbebA{kxAnf~ROM~G&pFi=w%*sAYD-)1 zYfiP5t@kyjs$lDV&8fDw^}gm*+t_+vbE<7^y{|ddcDCNvoN9Yp?`uxAgRS>9r`plp zJiL=#F1)k7S$G#)?`uxAtF8Anr`pZV^tlH4ws`cNbXMyLKE5ctr@b&-(OwYV%bp+J z+nyKR$DSMB*PavJ&z>FL-<}mdz@8bdWX}j6XipCxWKRnpY)=hWwx|5((tJ<(U&Y5$ zy`|4Jpfw*F|COuJ~Yd~whszs02=NizO zziQLt^tlGK=CeBVSbeSmt@-T?dW=5TfYy9hj~=bhHJ~;BHK0f7a}8+ChYjhG`dkBA z^J61=gg)1R)_mER9m#a}8+CuNTro^tlGK=G%+u!TMYS zTJ!Iv^dSA*L|XIl<@8;J>O$Znw3U))G{E@U^+>NmR3*A=pxY4smnY4sns(CR&^rqE+^r6*%^rh8*+)1nd z=tt{wUHa4NM+VUPT$h2g`jfk8eXh&hwEC4nv_9A69$Nj&U|OH+axblZW(cj%b-9mL ze>0TU=epcatKS($>vLTmpw<5jr`7*FNUQ%DL973Hh*tkIl2-roFs=S)6s`W}5nBDv zXu7<9ZX&JzXAG_W=P_FS&sbXh&*QZEpK-MMpC@SbKjUfjKND#6KNIN%S{In z|9Og5|1+6Z|MN7h{^uE5{m-+s`k&`$^*_(k>VICK)&IOmtN(e4o}%X{MZa9!{=fId z`<0}QsPq+o-9F)0?Y+XU*?Wdxw|5V}VecA#)80A!mc3(1_O`d_zIgkRY|19*2eWNU zvZ?geCD}B3tCDOwy+uhjgWkL(n@Mk0lFgzECE0BH-$FKr{-==5rPmj-dGz0fY(D*G zAzMJNDP#-j)rD*ky;AD}^zYHPzw6__h2OJ(3BPau9R9%mG5n$ZL--^6yYR>Mw^|qQ zHq|#;7ofk=x&Zx!)&=O#v@Sq@qICiKBdrV2A81{GeoyNH^gHqATkPYD!r$5p!%OT1 z;qUDE;qUEv;UDa|;idMR@Q?QF@K5%v@Xz+l@G^Tw_!oP6_*Z*c_&0lMc)2}A>jK`U z(!7*vg^z0 zZOsd**4dgDQmwZ&FQnRFYhFn8kF9wj)ka(MLaKjl%?tn4w%XIYkg8y7UPx79YhFmT znXP#tRXJPpLaNPe%?qi@+nN`)$hXC#xG&z)$8lf0mA$a|b=p+07lgOAabLWRjr-zl zZQK`cXXCzjdmHz~JJ`4{-qFT=@lH1Gi+8qhU%ZPwO}~n@t35SIuQRZx{O8hqPx-%x zkEhzVB-@kLd{B|zyCmC-*8H$HU9lwFht_Dum-(B z>jJdq$C~sytqahaFKf|%X0h-jKx=+(L@(320Im7DG5wR)1!&FR=h92HEjJdq z_Y3JIS{I---(O5G*17<#`TtV-Ypn~=>IW{Tztp+_t^VLj`g5%d(CQbira#rX0ImL^ z3H`Cw1!(mX&FBxcESXx$gzPOE<@rFCC?2d#dlC$0P9UbOm~-n8zE`_Srl z`qH{DzLQq}(~nmF)1OxVGk{kAGmuvQa~G}t=Wbg4&mda;&povIpTV^HpL=QbKSOBs zKljnZPf9|K%{|uwm|2#me{~1oJ|9OyJsLwB;)&D$1tN$5EtN(eJR{t}KR{!$| zt^Q{;t^VgxTK&%$TK&&swECa1wECaNY4tzj=qYNMi+-ZGeLD3%o3@*t7vJ+=-&i`{ zUpFW`!M-~@(Y`A@$sQPf(jE|g%I+VYZ1)R4ZQmJw#_k(_*6tI2&h8z4-tHBC!R{G; z(Y_=6l3f~p*}g6OihXPNRr{9kYxd3I*X^6aZ`e14-?XnUWN&$!)&V;GcRqXD$2*3n z*d4-C?e^hmcDwL&yKQ)e-6lNKZXKRww+hd;TZZS@Ey8o{=HYqvwc+`8v+x4DX?UUC zMC$_Hrow&TJ3g-5P4%v=oK5wftz1p@zO5Wh^?|M2O!c9yoJ{qRtz1m?v8^0T^@*(< zO!cX)98C3@tsG4Cxvd;b^@Xh*O!cL$98C3}b0yxjgI{JZ^ec!m8@c%}Vec$NJ@c(wh0_z(NN@EZHw|6H2yDewOD@l?vYzi8#% z-?Z{>9j&}uPb=>>(8{}iXyx5TT6y;`?Y!GA{b96K-W6!&T?wtc+l*G;m7|q+o72j> z^7QRm7odCS=OEC^!>#D<`Z)-+@^Ndrn|=-gt-Rcp?y8@IKr27Dr@QFqAkfOw9qAkO za}a3d>&|p%{Tu{ZdAlopoqi4it^D1cR{rimD}VQ-mA@5fXj*;nF|_jiSXzDXakTROcv^jMbz1p<0w-SY6I_HW_K>|eu|+rNabu$P6ew0{m?W&aeu z+Ws+ojlDG7#Qq`N)c!u)%>FKXt-U1N-2OJ)!d@J1X@3=NWq%QFZGRSSV}BBEYkw4O zXMYfGZ@;JY0&i0v{LX*pvyMK#DBQ_j7{1P45We1?AMR|=3*TVR4c}SZJhP&D`!nfGd!`G_2AcO<>ec+)`Q=qm7j0XS`U7k zR-R6wwH`c`R=!T7wH`d3R^HB_wH`c^R{qYSmA|uTG3?*dx+ zyO37?E~1sc@6gKMcWLGCd$jWReOmeZ0j>P~kXHVFL@R$krqu_3LMx9yrPT+2Mk}8` zr_~34K`XDnq}2z1MJvC*rqu_3Lo3f0)9Qo2rIqhXX!XJ0(aQVpY4yQB(8~X%wDSK) zTKWGIt^EI)R{k%emH)rc%Ku+!<^ONA@_#w4{QsR+{;!~w|0`+b|0-JfznWJ5|3NGN z*U-xUwY2j8Pg?o^7p?sNn^yj>qm}>bY32V0TKWGEt^D6eEC2tco&VdXaMo7&U!ax$ zCA9K?Gg|pyj#mCZUi|lOUOfH`{`K13v>yD(fAeN(d4Ju*;VtZk!du!8hPSdG2v@N0 z4{vSX7v9FcH@vNVPk1|fP*F26``I1B``hiq2iWbxmF%|R z1MN29gY4GfgY8z~%67}}A$E&!6}x%(Q2W|&Rl8aEFuQ5^aJz}t3%pH*zPOr?qc1+v zMqhlCjlTG38-4LHHu~aYZS=**+31Upx0Q>js@uxNR43TV#Z)KS%EeSC*~-OKHEiW# zs*`QyVyc?9axv8@wsJ95EnB&m>Qq~~n5wp|TugPEtz1l1$5t+;I^9+-raHq`F4oPr z#iPHcv-&v+KE6C$-~KJ!!2UISru|E}p}j18mi=?Mk^NKnZ2QM>V|!`%9Q%jxx%T(r z^X%`!=i5ud7ues1FSHl`=hA#n`FF98ry8Z#5zxxROX-n%9RaO;yqq4P*AdXl%PZ;O zdL03+{JfeTrq>bB%F`zFP`!?TR=zf)hv;<#wDPt&Jy@?Jpq0NZY2|M#TKU_WR{pl3 zmA`FiTKU_ZR{q{bD}Q^?%HP{*pi{Jn!#{`RDmzrASXZ*N-p z+lN;E_NCPq-$^Tv`_bx)`_szj0krz!fwc1aE?Rx@-L&$15UsxW9$I-mm{wnWFRgqZ zLaQ&nk5=9frPUYTPb>e2(aQe^XyyNKTKWGVt^6NBEB_y&mH#7Y<^RL9@_!Vq{C|X2 z{*R`W|Buqj|1q@k|1nzmKbBVhKTiLy*AdXl|0ihW|9D#YKY>>MPo$OqlW67tleF^x zDO&kInO6QkO)LMOp_Tv7(#rqmXyyO&wDSK2da>4vi+-`V{eS!7fB%~|OJDNW{S$uK zULStN{yY4t{b%?!drkOtdv*8?du8}d`}go$_VV!C_HW@S_OIco_AlXS_OkGF`{(cs z`={_s`^WGsdue#K{X=+;{e5_@{atvTy(B!}{x-b8Uaa*3Z&P3V&VT2#MLxbL{Eoda z{I0zq{GL5O{JuRe{DD0;{GmN3{EjmDXLSOu~kE1XC#ztSf*hXLct&P5TiH*MaI~#rR_cr?CA8h4fs-?DaG1ZT@axv9U zwsJAm&$extQu#Te+C(H(R-wYPqdkO!d31TuimXRxYMmX)6~~t+JJi zsaD&{#Z-UT%EdMLws`dSbXMyIK90WlPaA#lUpD&Uzisrz>umJJ>uvPK8*KE&|Jdk@ zH`?fn|FzK0j<2;hW<;hBcPR^+tF+FIs#gGx&!@(UPnMHUw5Kc>2(CO@^%+`g(#qe%XyxzWwDR`|TKQXzR{kDID}Rro zmA^;R%HLyX6nY8l1A+7vB zi&p+OqLu$=)5`zGwDSKPTKRu2t^7ZaR{oz)EB`N`mH!vg%KwY#-?QvuTKRtot^B`~ zR{mc`EB`O2mH$`J%Ks~A<^NT*^8adD`F{_4(3-zm+1fBL_9bJO~<+-lR;m$vkF@_*;E*7j53HujU@w)UiOJ9}cdy*(k^ z!5$y(Xg?9|WRDA9XFndk-X0t7Y(Eyh!5$O7(S9_1lRY}z#l9zevpp!>)xJA?i+xwP zn>{dmt34px-R>X0&F&ZOVc!|P-R>JMwfltcuzQDl+P%WP?4IG?_8s9qcB$42yiL_9 z&fn?d9mDkF?RJKWw8= z_(l7#@Jsfe;g{{T;aBW6;aBZH!mrt@!>`+`!f)6s!*ALv{&Q)*r~G}}$5SbPr_jpZ zskHKU8m;`DPAh+B(8}MLwDNZrt^A!$D}U$E%HO%P@^>Ds{GCrLe;3fo--WdDcM+}p zeTP>5zDp~A-=mek@6*cP4`}7@hqUtdBU<_UF|GXlgjW83N-KXqqm{p()5_m3Xyxyh zwDR{WTKW4mt^EClR{k!gmA~K8%HJil^7lJh`TISs{QZGe{w}4JzdzE--=ApZ@6WXI zcNwkx{e@Qk{z@xu;EK`Z~aq?P|$(aQe{wDNyzTKT^Xt^D7XR{n2CEC08rmH#`?%Ksf{ z<^N8!@_%Pq`M(RT{NI&U{_jRB|97XA|9jBN|2=8te??mPzZb3i--%L4F4@5`t&Y^v*W=^c!sS={MTw z({Hj@>Q}M4*vjc*mF8RJZ&x2rrTo2xR{nOQmA|*r%HQs^^7l4c`P+k5{@zY2e@kiQ z?;W)Aw(;{Job}{tls)zxUC~-=Vbf_kLRWJB(KTK0qsfhtta62WjQ+2wM635UudNQp({b^eH{S2)>{aIRh{v547{drpX{sOH&{Y6@N{}Qb} z{bgGD{|c@Af0b7LzeX$nU#FG-Z_vvBH)-YnTeR~3ZCd$1g;xGgrIr8FXyyNOTKPYN zR{qbVmH)G7<^OD2`9FtN{?DbA|MO_&|9o2czkpW$FQk?Ki)iKlJGAouU0V779(6?&6fWAfR1@z5YFQ9MIdI5cd z)(hzCwO&AXia*~^KHf3>v)v)Q%x)k4#cmh=)ovU9&2AH3ZnqBqZnp}ruv>;#+AYGX z?B?Os_O;}KILcGK`$yNT8dyiIjvod3(mtA+ozj|i``4-c=m4-0RwtA_uv4-Id$ ztAzix4+&>G$(6$e``~bieNcEa`@nEHyHa>_`+#tHdw;DLc$*4+`IbJ8zI-bieR%~N zeficl`togT^yS;y=*zdW(U)&;E0mFSUr9RaO;I*1;j*AdXltIG6ny^erZepR7|>2(CO@~kR7RIekT zm2ZdBL-aZVT6tHE9<0|9(8|A~=pK3<0j)edhVHJ{5zxxV=Nf6t(m zzjbNlZ#`Q1Tc1|`HlUTiXVS{whP3kcEL!>7h*tidO)GyJ)5_m-Xyxy@wDR{nTKRiE zt^B=!R{mZ{D}OJdmA@C$%HK<9+>BOVel4whZceK&Z$T@sThi*wThYqz*0lQaHnj4*Ev>%19j$zCPpdEQ zKr8P%((21Q(aQhpXyyO)wDP|*t^B`%R{q~eEB|kzmH%C6<^RpJ^1mys{J(`({&%C5 z|F_c0|L(N%|2A6r--A~E?^pEg#qIz0rTOQW|Eu>MY&u_>&kX;>JaUJRuMhXM{|@)E z{|xuG*M$4ntHXWmm3rTSx2e9-dI9~F)(hw_v|d1eru72)6Rj7}A8EaS{y^&m^m|$_ zpx=o<-(VkK6u#G97#?CT2;XPV4-d8Hh3~iLhKJd6!VlQ9!^7=a;Ro%R;Su(X@I&_W z@JM@F_+fi$c$7Uw>jmDXQZA+%?c>VDRFB%q#Z+T#vpIbZL%^Pk0U?ZXhqI6P~P zkG+pfm(qMA)1@@u%5*8scQRc{^Sw-$()=LPr8GavbSceGGF?hDPNqv~#>;dm%>?!`D3(fKHf**reE{idwCy$o1QIj@8Nv}Zu+*!y^Hq|xar*z_YU4i;HH1e+-rCrftwz# zaIfNh1aA7c%Dsa35xD8)8uv2J1>E#=t$PXQ0&aS`&b^3p0XKbJ?_R*UfScZKaL?mh zz)gQQy6NvGH~ro0roUU<^mnV9{%&*A-|cSt`-hwU?r_uJoo@QO%T0fGyXo&9H~sz7 zO@H^g>F-}|`n%6ffA_oT?*TXcJ?N&thurk{u$%rKans+UZuR$k`};_Lk8{)C#vzMOjrr+nd z*-OuL)ARG(?4{?s>H7t4_R3>c){m}L@f^vV(3v^BMIOhfK?wl96yK-LO?#y|CyCdfX z?)ID)xZ84G;BL)%fxBh=`AX`3i*PBqdAPLPEL=uz8ZIk0373-_hs(>2!WHC(;Tz-z z;fiwoa3#53xUyU~e4|_^Tt%)OzDcgdd4aBJD#Yuz=zjU|t#Y~WZF1RgRk=*Knp`@3 zyId-Khg>pzr(7a@ms~ttUA{hiw_GfIk6bi-uUsU2pIkUxLoUR5fv#!jVwwkZpAM#Z zP^Non9+K%?nule&mZqjm$I?6^)2%d*%5*BtV=`SzQ%j~xX==-KDNP-jE~Tj})1@@^ zWV)25zD$?WG?3|1nuao6O4CTDOKBR*bSX^}nJ%SiD$}Jj&1AZirnyX)(zKB2Qhj~i zE)9SGTOGTfxzU630yjNqy6MkLZu-;DO@I2k>CXT+{Tb+{KZD%#XRw?83~|$+ zp>Fy!%uRoWyXnseH~ksuraz=idXdezNd_L`eMz3yf& zd&5ny-gL8BK58U+bLpOWbM{fG}v77#V;--I} zy6N9%ZuEBmw`uDY){(a-7f8V<4-*;~M_r06`{otm5Kf38(%Zz`@ zT>tN0b~L&;GWNUfqQQJ`R3{VobY^kc6fn2E4)yi8D1pM2rrhWhnL9H z!b|0;;broa@N#)_c!fMEyi%SRUL{Wmua?J$*T~~IFVHm&T}-o9_iZoxMW%~s*2%V) z{VLPNH0x#C%YKvTVww%I?Pb5qbTQ3F+4izcGF?owS+>1wi%b{OY?W;<+a}Y+G}~pm z_=m1(=wg~3x=#nw?3C$Vnq4xTOS4<1Yiah#bS%xEGTlnESEf^G{*rAk+b7#zwqLfr z?0{^0*+JR%vO}`%Wrt@S zH~lH#rauMU^yfM^{VC+8KZV`&r-+;W6m`>|Vs83#y_^0NchjE|Zu(Qw&0bc@O^-^u z*~`kf=~G!Zds#U*y(;f!FRS3DUpKhf%PP9*StU1nS!FkUyV1>FR>e*4ZgR7i-R!1+ zx47xwt#0~vo16Ysb<@9UZu)n-oBrM5rhj+3>EB&$`d8ge|L%6vzkA&D?_M|kyU$Jk zYPjj&$r<0Dxqh5UU%GTqAII|k0{?xZ#cOZMYy%xJlsZZ7H%sy4Y!k*)P7wDSi&Un42?%xsaCEp%?Lar8mQmz{AE#DUI zBi|Z+O1>rhw0v{;8TqF0vvQU2bMlSh=jF=bzH+7T3v$Kqi}DSe7wDRXE~n|I`*b)> zf0^#486eZyGy`S2nr4tpN7D?J>1LWCGM!8_RHln*hRJj>&2X76rWqmA#WW*jx|n8^ zOc&FPmg!=eF*03DGghXHXqAWA`&(HHPy7H$8mUJ&N-JH+_8HJ%aNBH@*DOJ&f}LH~swBJ%sZDH$DB-J&5xH zH+}uwJ%IB9H@*GR-H-DEH~szEO@F^})8B91^!GbA{r%oee}8b(-yhxd_a`^~9p|RM zF-oG{hj8fzti3HcZQq(&UDk?S#J6}+f9Gxxasd) zH~pRGroZ#u^ml=q{w{RW-$icvyVy;Cm$>QgQaAlw=BB^P-Sl^boBpnJ)8AEY`n%dq zf7iI_@6T@fyVgyAe{s{_b#D6mtDF9=chleB-1K*YoBsaproS8A?8Teh^mwzIy?Be8 zK5uoi7jJXZ>+Np#;y>K|Btxo|53O4e_<*#SNeaPoBki~rvE3n>Hmpt`hSv}{{P2K z|KG~|_fO8;|KGj%I(_cpDCY(K`wqu*%v#azl5)p*K%HQ$(iAX^P5pIZZK{ zE~mL(rpsxH%XB$S37IaZDJj$CG^J#^oTju)m(!Gy>2jK~GF?tnPNvIg%FA@Qg05+7 zFTO$dZ7;4U+g@Brw!OHrYm@weD1(aU+cMd@$d9*dfULggU=nf>2D)9{cY@~zfIiqx2c={HgnV8=5G4i!cBi$ zy6JB#H~nqxroV06^tY{>{+?WX_lxat49ZuHmjr`u~xe{(tPI|DU+&|EF&H|CyWqf9|IL zU%2W2mu~uhf5u;Bt}mqr#NXGye@;~Qf1VpXzt-1v4Syqd4u30m41XuL4}UMW4gVmw z=Da}H^mG1O{_}d4pLD-Pc%0lkJYH@Vo**|3Pn4U4C&`V&ljTO?DRRT`RJlQTnp{6T zU9K0NA=eGhlr^gMQbob!TX z_cM2Ua9-f12m9UKI4^M1hlB1eoENz1#bI|R&I{c1f4;1lKZ`E(pR8{BbCH|=TQArEdCjnVbGx?xsIixarT8 zZu)bToBm{T)1Rx|^e4NU{^W4epKILoC#ReK5-_Ml>JdUw5>J*c>w{*`dkzmjhHSISNQ zO1tS_88`ha>!yF@-1M)!oBmaB)4vyg=79 zbSTZEx^H{XV=^5|Q%klzsJ2Xp($tY{52`EEp)~bm+k@)MbSO;&+4i7@G960ONVYww zu}p{3G?8r&YAVyAG|gl>)LhpzbSO;=-KRTgTFP`LO)HtMq-ibFku+^&x{;=>OefN` zlj%a5_Ok6k9c0^sI?A>Ob&_ol>MYwH)J3*EsH<#yP&e83pzgBmK|N&KgC3V{59%q~ z9@I;=J?IJ9_Mj(aI*_Kf{J(q9&zu(=yPx^0HJlf?>A^GZRh$>N>BDpG6`U8i=|x}n zGR_O!^y5YM63z?U^rWAA5$6SN`ZB=1fb#-3y&2@5$9aL9{tR)`pP_F0Gt5nYhP&y{ z2siy1>83xU-1KL(oBoV()1R?!`t!1z{=DL*Kd-v!&ueb_^SYbCZ=Q`tz}y{(R!5KcBkk&u4D>^SPV;eBq`) zU%J_YzH-x}uiflH-?-`1w{G^J@7(n2dpCQ~4{rMPqnkbGCpSGC=VlKY@1}1P-0VRU z-Slpfn>}c1`Zvu@|E9a?-wZeXo9U*1v)uG=wwwOVanrxKZu&RRP5E8l3{afg!e~aAouTRE{GuLsM^!@Aew)R|DqOY^(!cy6u3(I7CE-aVrxv)aE=fX;jL-!2ac{~-?!?~n(Dcgh39yW|1k z-E#l%9=RX?l&x!;$N5po?jGp{{s~<7yNCD5-NO6juHgf6m+(QkbNGliP-mm)nF-kXwgOlw0vFrE8j&e9O68q!;+s)cxk+Q{-mhQ{|@N z)8r=M)8)qDGvr3$Gv$Wiv*ZThv*r5XbL4vAbLG0>^W-|=^X1y%3*=hi3*`#otaADA zMRK|D#d6v3C32bYrE=-;Wpb(T<#NgJ6>^F2m2&a$Rr2-WY;v*i)pF5rcDYD6hg>*( zja(?4Q@$>oOD-7BEf)yqk@JVImGg!3%6Y^2{~O)(zlxjw-{hwMH@oToEpGaMtDFAc=BEEu-Soej zoBrSK9?Dxo+(WWtxzkPm?{d@s>Tddfx10Xod|Hf|m-^5M-o4V-0 when left>right, 0 when left=right - * } - * - * You get: - * - some_type *some_name_put(avl_node_t **root_node, some_type *data, int *inserted); - * Either insert new or retrieve existing key, if non-NULL receives 0 or 1. - * - int some_name_insert(avl_node_t **root_node, some_type *data); - * Try to insert, return 1 if succesful, 0 if existed. - * - int some_name_delete(avl_node_t **root_node, some_type *data); - * Try to delete, return 1 if deleted, 0 if no match. - * - some_type *some_name_search(avl_node_t *root_node, some_type *data); - * Retrieve existing data, returns NULL if unsuccesful - * - void some_name_free(avl_node_t **root_node); - * Free all memory used by the AVL tree - * - some_type *some_name_toarray(avl_node_t *root_node); - * Malloc an array and put the sorted data in it... - * - size_t avl_count(avl_node_t *root_node); - * Returns the number of items in the tree - * - * For example: - * struct my_struct { ... }; - * AVL(some_name, struct my_struct) - * { - * Compare struct my_struct *left with struct my_struct *right - * Return <0 when left0 when left>right, 0 when left=right - * } - * - * avl_node_t *the_root = NULL; - * struct mystuff; - * if (!some_name_search(the_root, &mystuff)) some_name_insert(&the_root, &mystuff); - * some_name_free(&the_root); - * - * For questions, feedback, etc: t.vandijk@utwente.nl - */ - -#include -#include - -#ifndef __AVL_H__ -#define __AVL_H__ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -typedef struct avl_node -{ - struct avl_node *left, *right; - unsigned int height; - char pad[8-sizeof(unsigned int)]; - char data[0]; -} avl_node_t; - -/* Retrieve the height of a tree */ -static inline int -avl_get_height(avl_node_t *node) -{ - return node == NULL ? 0 : node->height; -} - -/* Helper for rotations to update the heights of trees */ -static inline void -avl_update_height(avl_node_t *node) -{ - int h1 = avl_get_height(node->left); - int h2 = avl_get_height(node->right); - node->height = 1 + (h1 > h2 ? h1 : h2); -} - -/* Helper for avl_balance_tree */ -static inline int -avl_update_height_get_balance(avl_node_t *node) -{ - int h1 = avl_get_height(node->left); - int h2 = avl_get_height(node->right); - node->height = 1 + (h1 > h2 ? h1 : h2); - return h1 - h2; -} - -/* Helper for avl_check_consistent */ -static inline int -avl_verify_height(avl_node_t *node) -{ - int h1 = avl_get_height(node->left); - int h2 = avl_get_height(node->right); - int expected_height = 1 + (h1 > h2 ? h1 : h2); - return expected_height == avl_get_height(node); -} - -/* Optional consistency check */ -static inline int __attribute__((unused)) -avl_check_consistent(avl_node_t *root) -{ - if (root == NULL) return 1; - if (!avl_check_consistent(root->left)) return 0; - if (!avl_check_consistent(root->right)) return 0; - if (!avl_verify_height(root)) return 0; - return 1; -} - -/* Perform LL rotation, returns the new root */ -static avl_node_t* -avl_rotate_LL(avl_node_t *parent) -{ - avl_node_t *child = parent->left; - parent->left = child->right; - child->right = parent; - avl_update_height(parent); - avl_update_height(child); - return child; -} - -/* Perform RR rotation, returns the new root */ -static avl_node_t* -avl_rotate_RR(avl_node_t *parent) -{ - avl_node_t *child = parent->right; - parent->right = child->left; - child->left = parent; - avl_update_height(parent); - avl_update_height(child); - return child; -} - -/* Perform RL rotation, returns the new root */ -static avl_node_t* -avl_rotate_RL(avl_node_t *parent) -{ - avl_node_t *child = parent->right; - parent->right = avl_rotate_LL(child); - return avl_rotate_RR(parent); -} - -/* Perform LR rotation, returns the new root */ -static avl_node_t* -avl_rotate_LR(avl_node_t *parent) -{ - avl_node_t *child = parent->left; - parent->left = avl_rotate_RR(child); - return avl_rotate_LL(parent); -} - -/* Calculate balance factor */ -static inline int -avl_get_balance(avl_node_t *node) -{ - if (node == NULL) return 0; - return avl_get_height(node->left) - avl_get_height(node->right); -} - -/* Balance the tree */ -static void -avl_balance_tree(avl_node_t **node) -{ - int factor = avl_update_height_get_balance(*node); - - if (factor > 1) { - if (avl_get_balance((*node)->left) > 0) *node = avl_rotate_LL(*node); - else *node = avl_rotate_LR(*node); - } else if (factor < -1) { - if (avl_get_balance((*node)->right) < 0) *node = avl_rotate_RR(*node); - else *node = avl_rotate_RL(*node); - } -} - -/* Get number of items in the AVL */ -static size_t -avl_count(avl_node_t *node) -{ - if (node == NULL) return 0; - return 1 + avl_count(node->left) + avl_count(node->right); -} - -/* Structure for iterator */ -typedef struct avl_iter -{ - size_t height; - avl_node_t *nodes[0]; -} avl_iter_t; - -/** - * nodes[0] = root node - * nodes[1] = some node - * nodes[2] = some node - * nodes[3] = leaf node (height = 4) - * nodes[4] = NULL (max = height + 1) - */ - -/* Create a new iterator */ -static inline avl_iter_t* -avl_iter(avl_node_t *node) -{ - size_t max = node ? node->height+1 : 1; - avl_iter_t *result = (avl_iter_t*)malloc(sizeof(avl_iter_t) + sizeof(avl_node_t*) * max); - result->height = 0; - result->nodes[0] = node; - return result; -} - -/* Get the next node during iteration */ -static inline avl_node_t* -avl_iter_next(avl_iter_t *iter) -{ - /* when first node is NULL, we're done */ - if (iter->nodes[0] == NULL) return NULL; - - /* if the head is not NULL, first entry... */ - while (iter->nodes[iter->height] != NULL) { - iter->nodes[iter->height+1] = iter->nodes[iter->height]->left; - iter->height++; - } - - /* head is now NULL, take parent as result */ - avl_node_t *result = iter->nodes[iter->height-1]; - - if (result->right != NULL) { - /* if we can go right, do that */ - iter->nodes[iter->height] = result->right; - } else { - /* cannot go right, backtrack */ - do { - iter->height--; - } while (iter->height > 0 && iter->nodes[iter->height] == iter->nodes[iter->height-1]->right); - iter->nodes[iter->height] = NULL; /* set head to NULL: second entry */ - } - - return result; -} - -#define AVL(NAME, TYPE) \ -static inline int \ -NAME##_AVL_compare(TYPE *left, TYPE *right); \ -static __attribute__((unused)) TYPE* \ -NAME##_put(avl_node_t **root, TYPE *data, int *inserted) \ -{ \ - if (inserted && *inserted) *inserted = 0; /* reset inserted once */ \ - TYPE *result; \ - avl_node_t *it = *root; \ - if (it == NULL) { \ - *root = it = (avl_node_t*)malloc(sizeof(struct avl_node)+sizeof(TYPE)); \ - it->left = it->right = NULL; \ - it->height = 1; \ - memcpy(it->data, data, sizeof(TYPE)); \ - result = (TYPE *)it->data; \ - if (inserted) *inserted = 1; \ - } else { \ - int cmp = NAME##_AVL_compare(data, (TYPE*)(it->data)); \ - if (cmp == 0) return (TYPE *)it->data; \ - if (cmp < 0) result = NAME##_put(&it->left, data, inserted); \ - else result = NAME##_put(&it->right, data, inserted); \ - avl_balance_tree(root); \ - } \ - return result; \ -} \ -static __attribute__((unused)) int \ -NAME##_insert(avl_node_t **root, TYPE *data) \ -{ \ - int inserted; \ - NAME##_put(root, data, &inserted); \ - return inserted; \ -} \ -static void \ -NAME##_exchange_and_balance(avl_node_t *target, avl_node_t **node) \ -{ \ - avl_node_t *it = *node; \ - if (it->left == 0) { /* leftmost node contains lowest value */ \ - memcpy(target->data, it->data, sizeof(TYPE)); \ - *node = it->right; \ - free(it); \ - } else { \ - NAME##_exchange_and_balance(target, &it->left); \ - } \ - avl_balance_tree(node); \ -} \ -static __attribute__((unused)) int \ -NAME##_delete(avl_node_t **node, TYPE *data) \ -{ \ - avl_node_t *it = *node; \ - if (it == NULL) return 0; \ - int cmp = NAME##_AVL_compare(data, (TYPE *)((*node)->data)), res; \ - if (cmp < 0) res = NAME##_delete(&it->left, data); \ - else if (cmp > 0) res = NAME##_delete(&it->right, data); \ - else { \ - int h_left = avl_get_height(it->left); \ - int h_right = avl_get_height(it->right); \ - if (h_left == 0) { \ - if (h_right == 0) { /* Leaf */ \ - *node = NULL; \ - free(it); \ - return 1; \ - } else { /* Only right child */ \ - *node = it->right; \ - free(it); \ - return 1; \ - } \ - } else if (h_right == 0) { /* Only left child */ \ - *node = it->left; \ - free(it); \ - return 1; \ - } else { /* Exchange with successor */ \ - NAME##_exchange_and_balance(it, &it->right); \ - res = 1; \ - } \ - } \ - if (res) avl_balance_tree(node); \ - return res; \ -} \ -static __attribute__((unused)) TYPE* \ -NAME##_search(avl_node_t *node, TYPE *data) \ -{ \ - while (node != NULL) { \ - int result = NAME##_AVL_compare((TYPE *)node->data, data); \ - if (result == 0) return (TYPE *)node->data; \ - if (result > 0) node = node->left; \ - else node = node->right; \ - } \ - return NULL; \ -} \ -static __attribute__((unused)) void \ -NAME##_free(avl_node_t **node) \ -{ \ - avl_node_t *it = *node; \ - if (it) { \ - NAME##_free(&it->left); \ - NAME##_free(&it->right); \ - free(it); \ - *node = NULL; \ - } \ -} \ -static void \ -NAME##_toarray_rec(avl_node_t *node, TYPE **ptr) \ -{ \ - if (node->left != NULL) NAME##_toarray_rec(node->left, ptr); \ - memcpy(*ptr, node->data, sizeof(TYPE)); \ - (*ptr)++; \ - if (node->right != NULL) NAME##_toarray_rec(node->right, ptr); \ -} \ -static __attribute__((unused)) TYPE* \ -NAME##_toarray(avl_node_t *node) \ -{ \ - size_t count = avl_count(node); \ - TYPE *arr = (TYPE *)malloc(sizeof(TYPE) * count); \ - TYPE *ptr = arr; \ - NAME##_toarray_rec(node, &ptr); \ - return arr; \ -} \ -static __attribute__((unused)) avl_iter_t* \ -NAME##_iter(avl_node_t *node) \ -{ \ - return avl_iter(node); \ -} \ -static __attribute__((unused)) TYPE* \ -NAME##_iter_next(avl_iter_t *iter) \ -{ \ - avl_node_t *result = avl_iter_next(iter); \ - if (result == NULL) return NULL; \ - return (TYPE*)(result->data); \ -} \ -static __attribute__((unused)) void \ -NAME##_iter_free(avl_iter_t *iter) \ -{ \ - free(iter); \ -} \ -static inline int \ -NAME##_AVL_compare(TYPE *left, TYPE *right) - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/lace.c b/resources/3rdparty/sylvan/src/lace.c deleted file mode 100644 index a49db4f8a..000000000 --- a/resources/3rdparty/sylvan/src/lace.c +++ /dev/null @@ -1,1040 +0,0 @@ -/* - * Copyright 2013-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include // for sched_getaffinity -#include // for fprintf -#include // for memalign, malloc -#include // for memset -#include // for mprotect -#include // for gettimeofday -#include -#include -#include - -#include - -#ifndef USE_HWLOC -#define USE_HWLOC 0 -#endif - -#if USE_HWLOC -#include -#endif - -// public Worker data -static Worker **workers; -static size_t default_stacksize = 0; // set by lace_init -static size_t default_dqsize = 100000; - -#if USE_HWLOC -static hwloc_topology_t topo; -static unsigned int n_nodes, n_cores, n_pus; -#endif - -static int verbosity = 0; - -static int n_workers = 0; -static int enabled_workers = 0; - -// private Worker data (just for stats at end ) -static WorkerP **workers_p; - -// set to 0 when quitting -static int lace_quits = 0; - -// for storing private Worker data -#ifdef __linux__ // use gcc thread-local storage (i.e. __thread variables) -static __thread WorkerP *current_worker; -#else -static pthread_key_t worker_key; -#endif -static pthread_attr_t worker_attr; - -static pthread_cond_t wait_until_done = PTHREAD_COND_INITIALIZER; -static pthread_mutex_t wait_until_done_mutex = PTHREAD_MUTEX_INITIALIZER; - -struct lace_worker_init -{ - void* stack; - size_t stacksize; -}; - -static struct lace_worker_init *workers_init; - -lace_newframe_t lace_newframe; - -WorkerP* -lace_get_worker() -{ -#ifdef __linux__ - return current_worker; -#else - return (WorkerP*)pthread_getspecific(worker_key); -#endif -} - -Task* -lace_get_head(WorkerP *self) -{ - Task *dq = self->dq; - if (dq[0].thief == 0) return dq; - if (dq[1].thief == 0) return dq+1; - if (dq[2].thief == 0) return dq+2; - - size_t low = 2; - size_t high = self->end - self->dq; - - for (;;) { - if (low*2 >= high) { - break; - } else if (dq[low*2].thief == 0) { - high=low*2; - break; - } else { - low*=2; - } - } - - while (low < high) { - size_t mid = low + (high-low)/2; - if (dq[mid].thief == 0) high = mid; - else low = mid + 1; - } - - return dq+low; -} - -size_t -lace_workers() -{ - return n_workers; -} - -size_t -lace_default_stacksize() -{ - return default_stacksize; -} - -#ifndef cas -#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) -#endif - -#if LACE_PIE_TIMES -static uint64_t count_at_start, count_at_end; -static long long unsigned us_elapsed_timer; - -static void -us_elapsed_start(void) -{ - struct timeval now; - gettimeofday(&now, NULL); - us_elapsed_timer = now.tv_sec * 1000000LL + now.tv_usec; -} - -static long long unsigned -us_elapsed(void) -{ - struct timeval now; - long long unsigned t; - - gettimeofday( &now, NULL ); - - t = now.tv_sec * 1000000LL + now.tv_usec; - - return t - us_elapsed_timer; -} -#endif - -#if USE_HWLOC -// Lock used only during parallel lace_init_worker... -static volatile int __attribute__((aligned(64))) lock = 0; -static inline void -lock_acquire() -{ - while (1) { - while (lock) {} - if (cas(&lock, 0, 1)) return; - } -} -static inline void -lock_release() -{ - lock=0; -} -#endif - -/* Barrier */ -#define BARRIER_MAX_THREADS 128 - -typedef union __attribute__((__packed__)) -{ - volatile size_t val; - char pad[LINE_SIZE]; -} asize_t; - -typedef struct { - volatile int __attribute__((aligned(LINE_SIZE))) count; - volatile int __attribute__((aligned(LINE_SIZE))) wait; - /* the following is needed only for destroy: */ - asize_t entered[BARRIER_MAX_THREADS]; -} barrier_t; - -barrier_t lace_bar; - -void -lace_barrier() -{ - int id = lace_get_worker()->worker; - - lace_bar.entered[id].val = 1; // signal entry - - int wait = lace_bar.wait; - if (enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) { - lace_bar.count = 0; // reset counter - lace_bar.wait = 1 - wait; // flip wait - lace_bar.entered[id].val = 0; // signal exit - } else { - while (wait == lace_bar.wait) {} // wait - lace_bar.entered[id].val = 0; // signal exit - } -} - -static void -lace_barrier_init() -{ - assert(n_workers <= BARRIER_MAX_THREADS); - memset(&lace_bar, 0, sizeof(barrier_t)); -} - -static void -lace_barrier_destroy() -{ - // wait for all to exit - for (int i=0; icpuset, HWLOC_CPUBIND_THREAD); - - // Allocate memory on our node... - lock_acquire(); - wt = (Worker *)hwloc_alloc_membind(topo, sizeof(Worker), pu->cpuset, HWLOC_MEMBIND_BIND, 0); - w = (WorkerP *)hwloc_alloc_membind(topo, sizeof(WorkerP), pu->cpuset, HWLOC_MEMBIND_BIND, 0); - if (wt == NULL || w == NULL || (w->dq = (Task*)hwloc_alloc_membind(topo, dq_size * sizeof(Task), pu->cpuset, HWLOC_MEMBIND_BIND, 0)) == NULL) { - fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n"); - exit(1); - } - lock_release(); -#else - // Allocate memory... - if (posix_memalign((void**)&wt, LINE_SIZE, sizeof(Worker)) || - posix_memalign((void**)&w, LINE_SIZE, sizeof(WorkerP)) || - posix_memalign((void**)&w->dq, LINE_SIZE, dq_size * sizeof(Task))) { - fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n"); - exit(1); - } -#endif - - // Initialize public worker data - wt->dq = w->dq; - wt->ts.v = 0; - wt->allstolen = 0; - wt->movesplit = 0; - - // Initialize private worker data - w->_public = wt; - w->end = w->dq + dq_size; - w->split = w->dq; - w->allstolen = 0; - w->worker = worker; - w->enabled = 1; - if (workers_init[worker].stack != 0) { - w->stack_trigger = ((size_t)workers_init[worker].stack) + workers_init[worker].stacksize/20; - } else { - w->stack_trigger = 0; - } - -#if LACE_COUNT_EVENTS - // Reset counters - { int k; for (k=0; kctr[k] = 0; } -#endif - - // Set pointers -#ifdef __linux__ - current_worker = w; -#else - pthread_setspecific(worker_key, w); -#endif - workers[worker] = wt; - workers_p[worker] = w; - - // Synchronize with others - lace_barrier(); - -#if LACE_PIE_TIMES - w->time = gethrtime(); - w->level = 0; -#endif -} - -#if defined(__APPLE__) && !defined(pthread_barrier_t) - -typedef int pthread_barrierattr_t; -typedef struct -{ - pthread_mutex_t mutex; - pthread_cond_t cond; - int count; - int tripCount; -} pthread_barrier_t; - -static int -pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count) -{ - if(count == 0) - { - errno = EINVAL; - return -1; - } - if(pthread_mutex_init(&barrier->mutex, 0) < 0) - { - return -1; - } - if(pthread_cond_init(&barrier->cond, 0) < 0) - { - pthread_mutex_destroy(&barrier->mutex); - return -1; - } - barrier->tripCount = count; - barrier->count = 0; - - return 0; - (void)attr; -} - -static int -pthread_barrier_destroy(pthread_barrier_t *barrier) -{ - pthread_cond_destroy(&barrier->cond); - pthread_mutex_destroy(&barrier->mutex); - return 0; -} - -static int -pthread_barrier_wait(pthread_barrier_t *barrier) -{ - pthread_mutex_lock(&barrier->mutex); - ++(barrier->count); - if(barrier->count >= barrier->tripCount) - { - barrier->count = 0; - pthread_cond_broadcast(&barrier->cond); - pthread_mutex_unlock(&barrier->mutex); - return 1; - } - else - { - pthread_cond_wait(&barrier->cond, &(barrier->mutex)); - pthread_mutex_unlock(&barrier->mutex); - return 0; - } -} - -#endif // defined(__APPLE__) && !defined(pthread_barrier_t) - -static pthread_barrier_t suspend_barrier; -static volatile int must_suspend = 0, suspended = 0; - -void -lace_suspend() -{ - if (suspended == 0) { - suspended = 1; - must_suspend = 1; - lace_barrier(); - must_suspend = 0; - } -} - -void -lace_resume() -{ - if (suspended == 1) { - suspended = 0; - pthread_barrier_wait(&suspend_barrier); - } -} - -/** - * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled. - * You can never disable the current worker or reduce the number of workers below 1. - */ -void -lace_disable_worker(int worker) -{ - int self = lace_get_worker()->worker; - if (worker == self) return; - if (workers_p[worker]->enabled == 1) { - workers_p[worker]->enabled = 0; - enabled_workers--; - } -} - -void -lace_enable_worker(int worker) -{ - int self = lace_get_worker()->worker; - if (worker == self) return; - if (workers_p[worker]->enabled == 0) { - workers_p[worker]->enabled = 1; - enabled_workers++; - } -} - -void -lace_set_workers(int workercount) -{ - if (workercount < 1) workercount = 1; - if (workercount > n_workers) workercount = n_workers; - enabled_workers = workercount; - int self = lace_get_worker()->worker; - if (self >= workercount) workercount--; - for (int i=0; ienabled = (i < workercount || i == self) ? 1 : 0; - } -} - -int -lace_enabled_workers() -{ - return enabled_workers; -} - -static inline uint32_t -rng(uint32_t *seed, int max) -{ - uint32_t next = *seed; - - next *= 1103515245; - next += 12345; - - *seed = next; - - return next % max; -} - -VOID_TASK_IMPL_0(lace_steal_random) -{ - Worker *victim = workers[(__lace_worker->worker + 1 + rng(&__lace_worker->seed, n_workers-1)) % n_workers]; - - YIELD_NEWFRAME(); - - PR_COUNTSTEALS(__lace_worker, CTR_steal_tries); - Worker *res = lace_steal(__lace_worker, __lace_dq_head, victim); - if (res == LACE_STOLEN) { - PR_COUNTSTEALS(__lace_worker, CTR_steals); - } else if (res == LACE_BUSY) { - PR_COUNTSTEALS(__lace_worker, CTR_steal_busy); - } -} - -VOID_TASK_IMPL_1(lace_steal_random_loop, int*, quit) -{ - while(!(*(volatile int*)quit)) { - lace_steal_random(); - - if (must_suspend) { - lace_barrier(); - do { - pthread_barrier_wait(&suspend_barrier); - } while (__lace_worker->enabled == 0); - } - } -} - -static lace_startup_cb main_cb; - -static void* -lace_main_wrapper(void *arg) -{ - lace_init_worker(0, 0); - WorkerP *self = lace_get_worker(); - -#if LACE_PIE_TIMES - self->time = gethrtime(); -#endif - - lace_time_event(self, 1); - main_cb(self, self->dq, arg); - lace_exit(); - pthread_cond_broadcast(&wait_until_done); - - return NULL; -} - -VOID_TASK_IMPL_1(lace_steal_loop, int*, quit) -{ - // Determine who I am - const int worker_id = __lace_worker->worker; - - // Prepare self, victim - Worker ** const self = &workers[worker_id]; - Worker **victim = self; - -#if LACE_PIE_TIMES - __lace_worker->time = gethrtime(); -#endif - - uint32_t seed = worker_id; - unsigned int n = n_workers; - int i=0; - - while(*(volatile int*)quit == 0) { - // Select victim - if( i>0 ) { - i--; - victim++; - if (victim == self) victim++; - if (victim >= workers + n) victim = workers; - if (victim == self) victim++; - } else { - i = rng(&seed, 40); // compute random i 0..40 - victim = workers + (rng(&seed, n-1) + worker_id + 1) % n; - } - - PR_COUNTSTEALS(__lace_worker, CTR_steal_tries); - Worker *res = lace_steal(__lace_worker, __lace_dq_head, *victim); - if (res == LACE_STOLEN) { - PR_COUNTSTEALS(__lace_worker, CTR_steals); - } else if (res == LACE_BUSY) { - PR_COUNTSTEALS(__lace_worker, CTR_steal_busy); - } - - YIELD_NEWFRAME(); - - if (must_suspend) { - lace_barrier(); - do { - pthread_barrier_wait(&suspend_barrier); - } while (__lace_worker->enabled == 0); - } - } -} - -static void* -lace_default_worker(void* arg) -{ - lace_init_worker((size_t)arg, 0); - WorkerP *__lace_worker = lace_get_worker(); - Task *__lace_dq_head = __lace_worker->dq; - lace_steal_loop(&lace_quits); - lace_time_event(__lace_worker, 9); - lace_barrier(); - return NULL; -} - -pthread_t -lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg) -{ - // Determine stack size - if (stacksize == 0) stacksize = default_stacksize; - - size_t pagesize = sysconf(_SC_PAGESIZE); - stacksize = (stacksize + pagesize - 1) & ~(pagesize - 1); // ceil(stacksize, pagesize) - -#if USE_HWLOC - // Get our logical processor - hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus); - - // Allocate memory for the program stack - lock_acquire(); - void *stack_location = hwloc_alloc_membind(topo, stacksize + pagesize, pu->cpuset, HWLOC_MEMBIND_BIND, 0); - lock_release(); - if (stack_location == 0) { - fprintf(stderr, "Lace error: Unable to allocate memory for the pthread stack!\n"); - exit(1); - } -#else - void *stack_location = mmap(NULL, stacksize + pagesize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, 0, 0); - if (stack_location == MAP_FAILED) { - fprintf(stderr, "Lace error: Cannot allocate program stack, errno=%d!\n", errno); - exit(1); - } -#endif - - if (0 != mprotect(stack_location, pagesize, PROT_NONE)) { - fprintf(stderr, "Lace error: Unable to protect the allocated program stack with a guard page!\n"); - exit(1); - } - stack_location = (uint8_t *)stack_location + pagesize; // skip protected page. - if (0 != pthread_attr_setstack(&worker_attr, stack_location, stacksize)) { - fprintf(stderr, "Lace error: Unable to set the pthread stack in Lace!\n"); - exit(1); - } - - workers_init[worker].stack = stack_location; - workers_init[worker].stacksize = stacksize; - - if (fun == 0) { - fun = lace_default_worker; - arg = (void*)(size_t)worker; - } - - pthread_t res; - pthread_create(&res, &worker_attr, fun, arg); - return res; -} - -static int -get_cpu_count() -{ -#if USE_HWLOC - int count = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU); -#elif defined(sched_getaffinity) - /* Best solution: find actual available cpus */ - cpu_set_t cs; - CPU_ZERO(&cs); - sched_getaffinity(0, sizeof(cs), &cs); - int count = CPU_COUNT(&cs); -#elif defined(_SC_NPROCESSORS_ONLN) - /* Fallback */ - int count = sysconf(_SC_NPROCESSORS_ONLN); -#else - /* Okay... */ - int count = 1; -#endif - return count < 1 ? 1 : count; -} - -void -lace_set_verbosity(int level) -{ - verbosity = level; -} - -void -lace_init(int n, size_t dqsize) -{ -#if USE_HWLOC - hwloc_topology_init(&topo); - hwloc_topology_load(topo); - - n_nodes = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE); - n_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE); - n_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU); -#endif - - // Initialize globals - n_workers = n; - if (n_workers == 0) n_workers = get_cpu_count(); - enabled_workers = n_workers; - if (dqsize != 0) default_dqsize = dqsize; - lace_quits = 0; - - // Create barrier for all workers - lace_barrier_init(); - - // Create suspend barrier - pthread_barrier_init(&suspend_barrier, NULL, n_workers); - - // Allocate array with all workers - if (posix_memalign((void**)&workers, LINE_SIZE, n_workers*sizeof(Worker*)) != 0 || - posix_memalign((void**)&workers_p, LINE_SIZE, n_workers*sizeof(WorkerP*)) != 0) { - fprintf(stderr, "Lace error: unable to allocate memory!\n"); - exit(1); - } - - // Create pthread key -#ifndef __linux__ - pthread_key_create(&worker_key, NULL); -#endif - - // Prepare structures for thread creation - pthread_attr_init(&worker_attr); - - // Set contention scope to system (instead of process) - pthread_attr_setscope(&worker_attr, PTHREAD_SCOPE_SYSTEM); - - // Get default stack size - if (pthread_attr_getstacksize(&worker_attr, &default_stacksize) != 0) { - fprintf(stderr, "Lace warning: pthread_attr_getstacksize returned error!\n"); - default_stacksize = 1048576; // 1 megabyte default - } - - if (verbosity) { -#if USE_HWLOC - fprintf(stderr, "Initializing Lace, %u nodes, %u cores, %u logical processors, %d workers.\n", n_nodes, n_cores, n_pus, n_workers); -#else - fprintf(stderr, "Initializing Lace, %d workers.\n", n_workers); -#endif - } - - // Prepare lace_init structure - workers_init = (struct lace_worker_init*)calloc(1, sizeof(struct lace_worker_init) * n_workers); - - lace_newframe.t = NULL; - -#if LACE_PIE_TIMES - // Initialize counters for pie times - us_elapsed_start(); - count_at_start = gethrtime(); -#endif -} - -void -lace_startup(size_t stacksize, lace_startup_cb cb, void *arg) -{ - if (stacksize == 0) stacksize = default_stacksize; - - if (verbosity) { - if (cb != 0) { - fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers, stacksize); - } else if (n_workers == 1) { - fprintf(stderr, "Lace startup, creating 0 worker threads.\n"); - } else { - fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers-1, stacksize); - } - } - - /* Spawn workers */ - int i; - for (i=1; ictr[j] = 0; - } - } - -#if LACE_PIE_TIMES - for (i=0;itime = gethrtime(); - if (i != 0) workers_p[i]->level = 0; - } - - us_elapsed_start(); - count_at_start = gethrtime(); -#endif -#endif -} - -void -lace_count_report_file(FILE *file) -{ -#if LACE_COUNT_EVENTS - int i; - size_t j; - - for (j=0;jctr; - for (j=0;jctr[CTR_tasks]); - } - fprintf(file, "Tasks (sum): %zu\n", ctr_all[CTR_tasks]); - fprintf(file, "\n"); -#endif - -#if LACE_COUNT_STEALS - for (i=0;ictr[CTR_steals], workers_p[i]->ctr[CTR_steal_busy], - workers_p[i]->ctr[CTR_steal_tries], workers_p[i]->ctr[CTR_leaps], - workers_p[i]->ctr[CTR_leap_busy], workers_p[i]->ctr[CTR_leap_tries]); - } - fprintf(file, "Steals (sum): %zu good/%zu busy of %zu tries; leaps: %zu good/%zu busy of %zu tries\n", - ctr_all[CTR_steals], ctr_all[CTR_steal_busy], - ctr_all[CTR_steal_tries], ctr_all[CTR_leaps], - ctr_all[CTR_leap_busy], ctr_all[CTR_leap_tries]); - fprintf(file, "\n"); -#endif - -#if LACE_COUNT_STEALS && LACE_COUNT_TASKS - for (i=0;ictr[CTR_tasks]/(workers_p[i]->ctr[CTR_steals]+workers_p[i]->ctr[CTR_leaps])); - } - fprintf(file, "Tasks per steal (sum): %zu\n", ctr_all[CTR_tasks]/(ctr_all[CTR_steals]+ctr_all[CTR_leaps])); - fprintf(file, "\n"); -#endif - -#if LACE_COUNT_SPLITS - for (i=0;ictr[CTR_split_shrink], workers_p[i]->ctr[CTR_split_grow], workers_p[i]->ctr[CTR_split_req]); - } - fprintf(file, "Splits (sum): %zu shrinks, %zu grows, %zu outgoing requests\n", - ctr_all[CTR_split_shrink], ctr_all[CTR_split_grow], ctr_all[CTR_split_req]); - fprintf(file, "\n"); -#endif - -#if LACE_PIE_TIMES - count_at_end = gethrtime(); - - uint64_t count_per_ms = (count_at_end - count_at_start) / (us_elapsed() / 1000); - double dcpm = (double)count_per_ms; - - uint64_t sum_count; - sum_count = ctr_all[CTR_init] + ctr_all[CTR_wapp] + ctr_all[CTR_lapp] + ctr_all[CTR_wsteal] + ctr_all[CTR_lsteal] - + ctr_all[CTR_close] + ctr_all[CTR_wstealsucc] + ctr_all[CTR_lstealsucc] + ctr_all[CTR_wsignal] - + ctr_all[CTR_lsignal]; - - fprintf(file, "Measured clock (tick) frequency: %.2f GHz\n", count_per_ms / 1000000.0); - fprintf(file, "Aggregated time per pie slice, total time: %.2f CPU seconds\n\n", sum_count / (1000*dcpm)); - - for (i=0;ictr[CTR_init] / dcpm); - fprintf(file, "Steal work (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_wapp] / dcpm); - fprintf(file, "Leap work (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_lapp] / dcpm); - fprintf(file, "Steal overhead (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_wstealsucc]+workers_p[i]->ctr[CTR_wsignal]) / dcpm); - fprintf(file, "Leap overhead (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_lstealsucc]+workers_p[i]->ctr[CTR_lsignal]) / dcpm); - fprintf(file, "Steal search (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_wsteal]-workers_p[i]->ctr[CTR_wstealsucc]-workers_p[i]->ctr[CTR_wsignal]) / dcpm); - fprintf(file, "Leap search (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_lsteal]-workers_p[i]->ctr[CTR_lstealsucc]-workers_p[i]->ctr[CTR_lsignal]) / dcpm); - fprintf(file, "Exit time (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_close] / dcpm); - fprintf(file, "\n"); - } - - fprintf(file, "Startup time (sum): %10.2f ms\n", ctr_all[CTR_init] / dcpm); - fprintf(file, "Steal work (sum): %10.2f ms\n", ctr_all[CTR_wapp] / dcpm); - fprintf(file, "Leap work (sum): %10.2f ms\n", ctr_all[CTR_lapp] / dcpm); - fprintf(file, "Steal overhead (sum): %10.2f ms\n", (ctr_all[CTR_wstealsucc]+ctr_all[CTR_wsignal]) / dcpm); - fprintf(file, "Leap overhead (sum): %10.2f ms\n", (ctr_all[CTR_lstealsucc]+ctr_all[CTR_lsignal]) / dcpm); - fprintf(file, "Steal search (sum): %10.2f ms\n", (ctr_all[CTR_wsteal]-ctr_all[CTR_wstealsucc]-ctr_all[CTR_wsignal]) / dcpm); - fprintf(file, "Leap search (sum): %10.2f ms\n", (ctr_all[CTR_lsteal]-ctr_all[CTR_lstealsucc]-ctr_all[CTR_lsignal]) / dcpm); - fprintf(file, "Exit time (sum): %10.2f ms\n", ctr_all[CTR_close] / dcpm); - fprintf(file, "\n" ); -#endif -#endif - return; - (void)file; -} - -void lace_exit() -{ - lace_time_event(lace_get_worker(), 2); - - // first suspend all other threads - lace_suspend(); - - // now enable all threads and tell them to quit - lace_set_workers(n_workers); - lace_quits = 1; - - // now resume all threads and wait until they all pass the barrier - lace_resume(); - lace_barrier(); - - // finally, destroy the barriers - lace_barrier_destroy(); - pthread_barrier_destroy(&suspend_barrier); - -#if LACE_COUNT_EVENTS - lace_count_report_file(stderr); -#endif -} - -void -lace_exec_in_new_frame(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root) -{ - TailSplit old; - uint8_t old_as; - - // save old tail, split, allstolen and initiate new frame - { - Worker *wt = __lace_worker->_public; - - old_as = wt->allstolen; - wt->allstolen = 1; - old.ts.split = wt->ts.ts.split; - wt->ts.ts.split = 0; - mfence(); - old.ts.tail = wt->ts.ts.tail; - - TailSplit ts_new; - ts_new.ts.tail = __lace_dq_head - __lace_worker->dq; - ts_new.ts.split = __lace_dq_head - __lace_worker->dq; - wt->ts.v = ts_new.v; - - __lace_worker->split = __lace_dq_head; - __lace_worker->allstolen = 1; - } - - // wait until all workers are ready - lace_barrier(); - - // execute task - root->f(__lace_worker, __lace_dq_head, root); - compiler_barrier(); - - // wait until all workers are back (else they may steal from previous frame) - lace_barrier(); - - // restore tail, split, allstolen - { - Worker *wt = __lace_worker->_public; - wt->allstolen = old_as; - wt->ts.v = old.v; - __lace_worker->split = __lace_worker->dq + old.ts.split; - __lace_worker->allstolen = old_as; - } -} - -VOID_TASK_IMPL_2(lace_steal_loop_root, Task*, t, int*, done) -{ - t->f(__lace_worker, __lace_dq_head, t); - *done = 1; -} - -VOID_TASK_2(lace_together_helper, Task*, t, volatile int*, finished) -{ - t->f(__lace_worker, __lace_dq_head, t); - - for (;;) { - int f = *finished; - if (cas(finished, f, f-1)) break; - } - - while (*finished != 0) STEAL_RANDOM(); -} - -static void -lace_sync_and_exec(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root) -{ - // wait until other workers have made a local copy - lace_barrier(); - - // one worker sets t to 0 again - if (LACE_WORKER_ID == 0) lace_newframe.t = 0; - // else while (*(volatile Task**)&lace_newframe.t != 0) {} - - // the above line is commented out since lace_exec_in_new_frame includes - // a lace_barrier before the task is executed - - lace_exec_in_new_frame(__lace_worker, __lace_dq_head, root); -} - -void -lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head) -{ - // make a local copy of the task - Task _t; - memcpy(&_t, lace_newframe.t, sizeof(Task)); - - // wait until all workers have made a local copy - lace_barrier(); - - // one worker sets t to 0 again - if (LACE_WORKER_ID == 0) lace_newframe.t = 0; - // else while (*(volatile Task**)&lace_newframe.t != 0) {} - - // the above line is commented out since lace_exec_in_new_frame includes - // a lace_barrier before the task is executed - - lace_exec_in_new_frame(__lace_worker, __lace_dq_head, &_t); -} - -void -lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t) -{ - /* synchronization integer */ - int done = n_workers; - - /* wrap task in lace_together_helper */ - Task _t2; - TD_lace_together_helper *t2 = (TD_lace_together_helper *)&_t2; - t2->f = lace_together_helper_WRAP; - t2->thief = THIEF_TASK; - t2->d.args.arg_1 = t; - t2->d.args.arg_2 = &done; - - while (!cas(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head); - lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2); -} - -void -lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t) -{ - /* synchronization integer */ - int done = 0; - - /* wrap task in lace_steal_loop_root */ - Task _t2; - TD_lace_steal_loop_root *t2 = (TD_lace_steal_loop_root *)&_t2; - t2->f = lace_steal_loop_root_WRAP; - t2->thief = THIEF_TASK; - t2->d.args.arg_1 = t; - t2->d.args.arg_2 = &done; - - /* and create the lace_steal_loop task for other workers */ - Task _s; - TD_lace_steal_loop *s = (TD_lace_steal_loop *)&_s; - s->f = &lace_steal_loop_WRAP; - s->thief = THIEF_TASK; - s->d.args.arg_1 = &done; - - compiler_barrier(); - - while (!cas(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head); - lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2); -} diff --git a/resources/3rdparty/sylvan/src/lace.h b/resources/3rdparty/sylvan/src/lace.h deleted file mode 100644 index a4f343ca8..000000000 --- a/resources/3rdparty/sylvan/src/lace.h +++ /dev/null @@ -1,2741 +0,0 @@ -/* - * Copyright 2013-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the License); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include /* for pthread_t */ - -#ifndef __LACE_H__ -#define __LACE_H__ - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* Some flags */ - -#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */ -#define LACE_DEBUG_PROGRAMSTACK 0 -#endif - -#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */ -#define LACE_LEAP_RANDOM 1 -#endif - -#ifndef LACE_PIE_TIMES /* Record time spent stealing and leapfrogging */ -#define LACE_PIE_TIMES 0 -#endif - -#ifndef LACE_COUNT_TASKS /* Count number of tasks executed */ -#define LACE_COUNT_TASKS 0 -#endif - -#ifndef LACE_COUNT_STEALS /* Count number of steals performed */ -#define LACE_COUNT_STEALS 0 -#endif - -#ifndef LACE_COUNT_SPLITS /* Count number of times the split point is moved */ -#define LACE_COUNT_SPLITS 0 -#endif - -#ifndef LACE_COUNT_EVENTS -#define LACE_COUNT_EVENTS (LACE_PIE_TIMES || LACE_COUNT_TASKS || LACE_COUNT_STEALS || LACE_COUNT_SPLITS) -#endif - -/* Typical cacheline size of system architectures */ -#ifndef LINE_SIZE -#define LINE_SIZE 64 -#endif - -/* The size of a pointer, 8 bytes on a 64-bit architecture */ -#define P_SZ (sizeof(void *)) - -#define PAD(x,b) ( ( (b) - ((x)%(b)) ) & ((b)-1) ) /* b must be power of 2 */ -#define ROUND(x,b) ( (x) + PAD( (x), (b) ) ) - -/* The size is in bytes. Note that this is without the extra overhead from Lace. - The value must be greater than or equal to the maximum size of your tasks. - The task size is the maximum of the size of the result or of the sum of the parameter sizes. */ -#ifndef LACE_TASKSIZE -#define LACE_TASKSIZE (6)*P_SZ -#endif - -/* Some fences */ -#ifndef compiler_barrier -#define compiler_barrier() { asm volatile("" ::: "memory"); } -#endif - -#ifndef mfence -#define mfence() { asm volatile("mfence" ::: "memory"); } -#endif - -/* Compiler specific branch prediction optimization */ -#ifndef likely -#define likely(x) __builtin_expect((x),1) -#endif - -#ifndef unlikely -#define unlikely(x) __builtin_expect((x),0) -#endif - -#if LACE_PIE_TIMES -/* High resolution timer */ -static inline uint64_t gethrtime() -{ - uint32_t hi, lo; - asm volatile ("rdtsc" : "=a"(lo), "=d"(hi) :: "memory"); - return (uint64_t)hi<<32 | lo; -} -#endif - -#if LACE_COUNT_EVENTS -void lace_count_reset(); -void lace_count_report_file(FILE *file); -#endif - -#if LACE_COUNT_TASKS -#define PR_COUNTTASK(s) PR_INC(s,CTR_tasks) -#else -#define PR_COUNTTASK(s) /* Empty */ -#endif - -#if LACE_COUNT_STEALS -#define PR_COUNTSTEALS(s,i) PR_INC(s,i) -#else -#define PR_COUNTSTEALS(s,i) /* Empty */ -#endif - -#if LACE_COUNT_SPLITS -#define PR_COUNTSPLITS(s,i) PR_INC(s,i) -#else -#define PR_COUNTSPLITS(s,i) /* Empty */ -#endif - -#if LACE_COUNT_EVENTS -#define PR_ADD(s,i,k) ( ((s)->ctr[i])+=k ) -#else -#define PR_ADD(s,i,k) /* Empty */ -#endif -#define PR_INC(s,i) PR_ADD(s,i,1) - -typedef enum { -#ifdef LACE_COUNT_TASKS - CTR_tasks, /* Number of tasks spawned */ -#endif -#ifdef LACE_COUNT_STEALS - CTR_steal_tries, /* Number of steal attempts */ - CTR_leap_tries, /* Number of leap attempts */ - CTR_steals, /* Number of succesful steals */ - CTR_leaps, /* Number of succesful leaps */ - CTR_steal_busy, /* Number of steal busies */ - CTR_leap_busy, /* Number of leap busies */ -#endif -#ifdef LACE_COUNT_SPLITS - CTR_split_grow, /* Number of split right */ - CTR_split_shrink,/* Number of split left */ - CTR_split_req, /* Number of split requests */ -#endif - CTR_fast_sync, /* Number of fast syncs */ - CTR_slow_sync, /* Number of slow syncs */ -#ifdef LACE_PIE_TIMES - CTR_init, /* Timer for initialization */ - CTR_close, /* Timer for shutdown */ - CTR_wapp, /* Timer for application code (steal) */ - CTR_lapp, /* Timer for application code (leap) */ - CTR_wsteal, /* Timer for steal code (steal) */ - CTR_lsteal, /* Timer for steal code (leap) */ - CTR_wstealsucc, /* Timer for succesful steal code (steal) */ - CTR_lstealsucc, /* Timer for succesful steal code (leap) */ - CTR_wsignal, /* Timer for signal after work (steal) */ - CTR_lsignal, /* Timer for signal after work (leap) */ -#endif - CTR_MAX -} CTR_index; - -struct _WorkerP; -struct _Worker; -struct _Task; - -#define THIEF_EMPTY ((struct _Worker*)0x0) -#define THIEF_TASK ((struct _Worker*)0x1) -#define THIEF_COMPLETED ((struct _Worker*)0x2) - -#define TASK_COMMON_FIELDS(type) \ - void (*f)(struct _WorkerP *, struct _Task *, struct type *); \ - struct _Worker * volatile thief; - -struct __lace_common_fields_only { TASK_COMMON_FIELDS(_Task) }; -#define LACE_COMMON_FIELD_SIZE sizeof(struct __lace_common_fields_only) - -typedef struct _Task { - TASK_COMMON_FIELDS(_Task) - char p1[PAD(LACE_COMMON_FIELD_SIZE, P_SZ)]; - char d[LACE_TASKSIZE]; - char p2[PAD(ROUND(LACE_COMMON_FIELD_SIZE, P_SZ) + LACE_TASKSIZE, LINE_SIZE)]; -} Task; - -typedef union __attribute__((packed)) { - struct { - uint32_t tail; - uint32_t split; - } ts; - uint64_t v; -} TailSplit; - -typedef struct _Worker { - Task *dq; - TailSplit ts; - uint8_t allstolen; - - char pad1[PAD(P_SZ+sizeof(TailSplit)+1, LINE_SIZE)]; - - uint8_t movesplit; -} Worker; - -typedef struct _WorkerP { - Task *dq; // same as dq - Task *split; // same as dq+ts.ts.split - Task *end; // dq+dq_size - Worker *_public; - size_t stack_trigger; // for stack overflow detection - int16_t worker; // what is my worker id? - uint8_t allstolen; // my allstolen - volatile int enabled; // if this worker is enabled - -#if LACE_COUNT_EVENTS - uint64_t ctr[CTR_MAX]; // counters - volatile uint64_t time; - volatile int level; -#endif - - uint32_t seed; // my random seed (for lace_steal_random) -} WorkerP; - -#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__); -LACE_TYPEDEF_CB(void, lace_startup_cb, void*); - -/** - * Set verbosity level (0 = no startup messages, 1 = startup messages) - * Default level: 0 - */ -void lace_set_verbosity(int level); - -/** - * Initialize master structures for Lace with workers - * and default deque size of . - * Does not create new threads. - * Tries to detect number of cpus, if n_workers equals 0. - */ -void lace_init(int n_workers, size_t dqsize); - -/** - * After lace_init, start all worker threads. - * If cb,arg are set, suspend this thread, call cb(arg) in a new thread - * and exit Lace upon return - * Otherwise, the current thread is initialized as a Lace thread. - */ -void lace_startup(size_t stacksize, lace_startup_cb, void* arg); - -/** - * Initialize current thread as worker and allocate a deque with size . - * Use this when manually creating worker threads. - */ -void lace_init_worker(int idx, size_t dqsize); - -/** - * Manually spawn worker with (optional) program stack size . - * If fun,arg are set, overrides default startup method. - * Typically: for workers 1...(n_workers-1): lace_spawn_worker(i, stack_size, 0, 0); - */ -pthread_t lace_spawn_worker(int idx, size_t stacksize, void *(*fun)(void*), void* arg); - -/** - * Steal a random task. - */ -#define lace_steal_random() CALL(lace_steal_random) -void lace_steal_random_CALL(WorkerP*, Task*); - -/** - * Steal random tasks until parameter *quit is set - * Note: task declarations at end; quit is of type int* - */ -#define lace_steal_random_loop(quit) CALL(lace_steal_random_loop, quit) -#define lace_steal_loop(quit) CALL(lace_steal_loop, quit) - -/** - * Barrier (all workers must enter it before progressing) - */ -void lace_barrier(); - -/** - * Suspend and resume all other workers. - * May only be used when all other workers are idle. - */ -void lace_suspend(); -void lace_resume(); - -/** - * When all tasks are suspended, workers can be temporarily disabled. - * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled. - * You can never disable the current worker or reduce the number of workers below 1. - * You cannot add workers. - */ -void lace_disable_worker(int worker); -void lace_enable_worker(int worker); -void lace_set_workers(int workercount); -int lace_enabled_workers(); - -/** - * Retrieve number of Lace workers - */ -size_t lace_workers(); - -/** - * Retrieve default program stack size - */ -size_t lace_default_stacksize(); - -/** - * Retrieve current worker. - */ -WorkerP *lace_get_worker(); - -/** - * Retrieve the current head of the deque - */ -Task *lace_get_head(WorkerP *); - -/** - * Exit Lace. Automatically called when started with cb,arg. - */ -void lace_exit(); - -#define LACE_STOLEN ((Worker*)0) -#define LACE_BUSY ((Worker*)1) -#define LACE_NOWORK ((Worker*)2) - -#define TASK(f) ( f##_CALL ) -#define WRAP(f, ...) ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) ) -#define SYNC(f) ( __lace_dq_head--, WRAP(f##_SYNC) ) -#define DROP() ( __lace_dq_head--, WRAP(lace_drop) ) -#define SPAWN(f, ...) ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ ) -#define CALL(f, ...) ( WRAP(f##_CALL, ##__VA_ARGS__) ) -#define TOGETHER(f, ...) ( WRAP(f##_TOGETHER, ##__VA_ARGS__) ) -#define NEWFRAME(f, ...) ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) ) -#define STEAL_RANDOM() ( CALL(lace_steal_random) ) -#define LACE_WORKER_ID ( __lace_worker->worker ) - -/* Use LACE_ME to initialize Lace variables, in case you want to call multiple Lace tasks */ -#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker); - -#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1) -#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2) -#define TASK_RESULT(t) (&t->d[0]) - -#if LACE_DEBUG_PROGRAMSTACK -static inline void CHECKSTACK(WorkerP *w) -{ - if (w->stack_trigger != 0) { - register size_t rsp; - asm volatile("movq %%rsp, %0" : "+r"(rsp) : : "cc"); - if (rsp < w->stack_trigger) { - fputs("Warning: program stack 95% used!\n", stderr); - w->stack_trigger = 0; - } - } -} -#else -#define CHECKSTACK(w) {} -#endif - -typedef struct -{ - Task *t; - uint8_t all; - char pad[64-sizeof(Task *)-sizeof(uint8_t)]; -} lace_newframe_t; - -extern lace_newframe_t lace_newframe; - -/** - * Internal function to start participating on a task in a new frame - * Usually, is set to NULL and the task is copied from lace_newframe.t - * It is possible to override the start task by setting . - */ -void lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task); -void lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task); - -void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head); -#define YIELD_NEWFRAME() { if (unlikely((*(volatile Task**)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); } - -#if LACE_PIE_TIMES -static void lace_time_event( WorkerP *w, int event ) -{ - uint64_t now = gethrtime(), - prev = w->time; - - switch( event ) { - - // Enter application code - case 1 : - if( w->level /* level */ == 0 ) { - PR_ADD( w, CTR_init, now - prev ); - w->level = 1; - } else if( w->level /* level */ == 1 ) { - PR_ADD( w, CTR_wsteal, now - prev ); - PR_ADD( w, CTR_wstealsucc, now - prev ); - } else { - PR_ADD( w, CTR_lsteal, now - prev ); - PR_ADD( w, CTR_lstealsucc, now - prev ); - } - break; - - // Exit application code - case 2 : - if( w->level /* level */ == 1 ) { - PR_ADD( w, CTR_wapp, now - prev ); - } else { - PR_ADD( w, CTR_lapp, now - prev ); - } - break; - - // Enter sync on stolen - case 3 : - if( w->level /* level */ == 1 ) { - PR_ADD( w, CTR_wapp, now - prev ); - } else { - PR_ADD( w, CTR_lapp, now - prev ); - } - w->level++; - break; - - // Exit sync on stolen - case 4 : - if( w->level /* level */ == 1 ) { - fprintf( stderr, "This should not happen, level = %d\n", w->level ); - } else { - PR_ADD( w, CTR_lsteal, now - prev ); - } - w->level--; - break; - - // Return from failed steal - case 7 : - if( w->level /* level */ == 0 ) { - PR_ADD( w, CTR_init, now - prev ); - } else if( w->level /* level */ == 1 ) { - PR_ADD( w, CTR_wsteal, now - prev ); - } else { - PR_ADD( w, CTR_lsteal, now - prev ); - } - break; - - // Signalling time - case 8 : - if( w->level /* level */ == 1 ) { - PR_ADD( w, CTR_wsignal, now - prev ); - PR_ADD( w, CTR_wsteal, now - prev ); - } else { - PR_ADD( w, CTR_lsignal, now - prev ); - PR_ADD( w, CTR_lsteal, now - prev ); - } - break; - - // Done - case 9 : - if( w->level /* level */ == 0 ) { - PR_ADD( w, CTR_init, now - prev ); - } else { - PR_ADD( w, CTR_close, now - prev ); - } - break; - - default: return; - } - - w->time = now; -} -#else -#define lace_time_event( w, e ) /* Empty */ -#endif - -static Worker* __attribute__((noinline)) -lace_steal(WorkerP *self, Task *__dq_head, Worker *victim) -{ - if (!victim->allstolen) { - /* Must be a volatile. In GCC 4.8, if it is not declared volatile, the - compiler will optimize extra memory accesses to victim->ts instead - of comparing the local values ts.ts.tail and ts.ts.split, causing - thieves to steal non existent tasks! */ - register TailSplit ts; - ts.v = *(volatile uint64_t *)&victim->ts.v; - if (ts.ts.tail < ts.ts.split) { - register TailSplit ts_new; - ts_new.v = ts.v; - ts_new.ts.tail++; - if (__sync_bool_compare_and_swap(&victim->ts.v, ts.v, ts_new.v)) { - // Stolen - Task *t = &victim->dq[ts.ts.tail]; - t->thief = self->_public; - lace_time_event(self, 1); - t->f(self, __dq_head, t); - lace_time_event(self, 2); - t->thief = THIEF_COMPLETED; - lace_time_event(self, 8); - return LACE_STOLEN; - } - - lace_time_event(self, 7); - return LACE_BUSY; - } - - if (victim->movesplit == 0) { - victim->movesplit = 1; - PR_COUNTSPLITS(self, CTR_split_req); - } - } - - lace_time_event(self, 7); - return LACE_NOWORK; -} - -static int -lace_shrink_shared(WorkerP *w) -{ - Worker *wt = w->_public; - TailSplit ts; - ts.v = wt->ts.v; /* Force in 1 memory read */ - uint32_t tail = ts.ts.tail; - uint32_t split = ts.ts.split; - - if (tail != split) { - uint32_t newsplit = (tail + split)/2; - wt->ts.ts.split = newsplit; - mfence(); - tail = *(volatile uint32_t *)&(wt->ts.ts.tail); - if (tail != split) { - if (unlikely(tail > newsplit)) { - newsplit = (tail + split) / 2; - wt->ts.ts.split = newsplit; - } - w->split = w->dq + newsplit; - PR_COUNTSPLITS(w, CTR_split_shrink); - return 0; - } - } - - wt->allstolen = 1; - w->allstolen = 1; - return 1; -} - -static inline void -lace_leapfrog(WorkerP *__lace_worker, Task *__lace_dq_head) -{ - lace_time_event(__lace_worker, 3); - Task *t = __lace_dq_head; - Worker *thief = t->thief; - if (thief != THIEF_COMPLETED) { - while ((size_t)thief <= 1) thief = t->thief; - - /* PRE-LEAP: increase head again */ - __lace_dq_head += 1; - - /* Now leapfrog */ - int attempts = 32; - while (thief != THIEF_COMPLETED) { - PR_COUNTSTEALS(__lace_worker, CTR_leap_tries); - Worker *res = lace_steal(__lace_worker, __lace_dq_head, thief); - if (res == LACE_NOWORK) { - YIELD_NEWFRAME(); - if ((LACE_LEAP_RANDOM) && (--attempts == 0)) { lace_steal_random(); attempts = 32; } - } else if (res == LACE_STOLEN) { - PR_COUNTSTEALS(__lace_worker, CTR_leaps); - } else if (res == LACE_BUSY) { - PR_COUNTSTEALS(__lace_worker, CTR_leap_busy); - } - compiler_barrier(); - thief = t->thief; - } - - /* POST-LEAP: really pop the finished task */ - /* no need to decrease __lace_dq_head, since it is a local variable */ - compiler_barrier(); - if (__lace_worker->allstolen == 0) { - /* Assume: tail = split = head (pre-pop) */ - /* Now we do a real pop ergo either decrease tail,split,head or declare allstolen */ - Worker *wt = __lace_worker->_public; - wt->allstolen = 1; - __lace_worker->allstolen = 1; - } - } - - compiler_barrier(); - t->thief = THIEF_EMPTY; - lace_time_event(__lace_worker, 4); -} - -static __attribute__((noinline)) -void lace_drop_slow(WorkerP *w, Task *__dq_head) -{ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) lace_leapfrog(w, __dq_head); -} - -static inline __attribute__((unused)) -void lace_drop(WorkerP *w, Task *__dq_head) -{ - if (likely(0 == w->_public->movesplit)) { - if (likely(w->split <= __dq_head)) { - return; - } - } - lace_drop_slow(w, __dq_head); -} - - - -// Task macros for tasks of arity 0 - -#define TASK_DECL_0(RTYPE, NAME) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { RTYPE res; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * ); \ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head ) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head ) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head ) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head ); \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head ); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_0(RTYPE, NAME) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head ); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head ); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head ) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head ); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) )\ - -#define TASK_0(RTYPE, NAME) TASK_DECL_0(RTYPE, NAME) TASK_IMPL_0(RTYPE, NAME) - -#define VOID_TASK_DECL_0(NAME) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * ); \ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head ) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head ) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head ) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head ); \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head ); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_0(NAME) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head ); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head ); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head ) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head ); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) )\ - -#define VOID_TASK_0(NAME) VOID_TASK_DECL_0(NAME) VOID_TASK_IMPL_0(NAME) - - -// Task macros for tasks of arity 1 - -#define TASK_DECL_1(RTYPE, NAME, ATYPE_1) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; } args; RTYPE res; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_1(RTYPE, NAME, ATYPE_1, ARG_1) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1)\ - -#define TASK_1(RTYPE, NAME, ATYPE_1, ARG_1) TASK_DECL_1(RTYPE, NAME, ATYPE_1) TASK_IMPL_1(RTYPE, NAME, ATYPE_1, ARG_1) - -#define VOID_TASK_DECL_1(NAME, ATYPE_1) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; } args; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_1(NAME, ATYPE_1, ARG_1) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1)\ - -#define VOID_TASK_1(NAME, ATYPE_1, ARG_1) VOID_TASK_DECL_1(NAME, ATYPE_1) VOID_TASK_IMPL_1(NAME, ATYPE_1, ARG_1) - - -// Task macros for tasks of arity 2 - -#define TASK_DECL_2(RTYPE, NAME, ATYPE_1, ATYPE_2) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; } args; RTYPE res; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2)\ - -#define TASK_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) TASK_DECL_2(RTYPE, NAME, ATYPE_1, ATYPE_2) TASK_IMPL_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) - -#define VOID_TASK_DECL_2(NAME, ATYPE_1, ATYPE_2) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; } args; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2); \ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2)\ - -#define VOID_TASK_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) VOID_TASK_DECL_2(NAME, ATYPE_1, ATYPE_2) VOID_TASK_IMPL_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) - - -// Task macros for tasks of arity 3 - -#define TASK_DECL_3(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; } args; RTYPE res; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3)\ - -#define TASK_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) TASK_DECL_3(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3) TASK_IMPL_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) - -#define VOID_TASK_DECL_3(NAME, ATYPE_1, ATYPE_2, ATYPE_3) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; } args; } d; \ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) \ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3)\ - -#define VOID_TASK_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) VOID_TASK_DECL_3(NAME, ATYPE_1, ATYPE_2, ATYPE_3) VOID_TASK_IMPL_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) - - -// Task macros for tasks of arity 4 - -#define TASK_DECL_4(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; } args; RTYPE res; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4)\ - -#define TASK_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) TASK_DECL_4(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) TASK_IMPL_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) - -#define VOID_TASK_DECL_4(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; } args; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4)\ - -#define VOID_TASK_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) VOID_TASK_DECL_4(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) VOID_TASK_IMPL_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) - - -// Task macros for tasks of arity 5 - -#define TASK_DECL_5(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; } args; RTYPE res; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5)\ - -#define TASK_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) TASK_DECL_5(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) TASK_IMPL_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) - -#define VOID_TASK_DECL_5(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; } args; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5)\ - -#define VOID_TASK_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) VOID_TASK_DECL_5(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) VOID_TASK_IMPL_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) - - -// Task macros for tasks of arity 6 - -#define TASK_DECL_6(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6)\ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; ATYPE_6 arg_6; } args; RTYPE res; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\ -static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ -static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ((TD_##NAME *)t)->d.res; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ((TD_##NAME *)t)->d.res; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ -} \ - \ -static inline __attribute__((unused)) \ -RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define TASK_IMPL_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5, arg_6); \ -} \ - \ -static inline __attribute__((always_inline)) \ -RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5, ATYPE_6 ARG_6)\ - -#define TASK_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) TASK_DECL_6(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) TASK_IMPL_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) - -#define VOID_TASK_DECL_6(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) \ - \ -typedef struct _TD_##NAME { \ - TASK_COMMON_FIELDS(_TD_##NAME) \ - union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; ATYPE_6 arg_6; } args; } d;\ -} TD_##NAME; \ - \ -/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ -typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ - \ -void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ -void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\ -static inline void NAME##_SYNC(WorkerP *, Task *); \ -static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ - \ -static inline __attribute__((unused)) \ -void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - PR_COUNTTASK(w); \ - \ - TD_##NAME *t; \ - TailSplit ts; \ - uint32_t head, split, newsplit; \ - \ - /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ - \ - t = (TD_##NAME *)__dq_head; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (unlikely(w->allstolen)) { \ - if (wt->movesplit) wt->movesplit = 0; \ - head = __dq_head - w->dq; \ - ts = (TailSplit){{head,head+1}}; \ - wt->ts.v = ts.v; \ - compiler_barrier(); \ - wt->allstolen = 0; \ - w->split = __dq_head+1; \ - w->allstolen = 0; \ - } else if (unlikely(wt->movesplit)) { \ - head = __dq_head - w->dq; \ - split = w->split - w->dq; \ - newsplit = (split + head + 2)/2; \ - wt->ts.ts.split = newsplit; \ - w->split = w->dq + newsplit; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - \ - lace_do_newframe(w, __dq_head, &_t); \ - return ; \ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - Task _t; \ - TD_##NAME *t = (TD_##NAME *)&_t; \ - t->f = &NAME##_WRAP; \ - t->thief = THIEF_TASK; \ - t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ - \ - lace_do_together(w, __dq_head, &_t); \ -} \ - \ -static __attribute__((noinline)) \ -void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ -{ \ - TD_##NAME *t; \ - \ - if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ - lace_leapfrog(w, __dq_head); \ - t = (TD_##NAME *)__dq_head; \ - return ; \ - } \ - \ - compiler_barrier(); \ - \ - Worker *wt = w->_public; \ - if (wt->movesplit) { \ - Task *t = w->split; \ - size_t diff = __dq_head - t; \ - diff = (diff + 1) / 2; \ - w->split = t + diff; \ - wt->ts.ts.split += diff; \ - compiler_barrier(); \ - wt->movesplit = 0; \ - PR_COUNTSPLITS(w, CTR_split_grow); \ - } \ - \ - compiler_barrier(); \ - \ - t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ -} \ - \ -static inline __attribute__((unused)) \ -void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ -{ \ - /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ - \ - if (likely(0 == w->_public->movesplit)) { \ - if (likely(w->split <= __dq_head)) { \ - TD_##NAME *t = (TD_##NAME *)__dq_head; \ - t->thief = THIEF_EMPTY; \ - return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ - } \ - } \ - \ - return NAME##_SYNC_SLOW(w, __dq_head); \ -} \ - \ - \ - -#define VOID_TASK_IMPL_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6)\ -void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ -{ \ - NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6);\ - \ -/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ -void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ -{ \ - CHECKSTACK(w); \ - return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5, arg_6); \ -} \ - \ -static inline __attribute__((always_inline)) \ -void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5, ATYPE_6 ARG_6)\ - -#define VOID_TASK_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) VOID_TASK_DECL_6(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) VOID_TASK_IMPL_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) - - -VOID_TASK_DECL_0(lace_steal_random); -VOID_TASK_DECL_1(lace_steal_random_loop, int*); -VOID_TASK_DECL_1(lace_steal_loop, int*); -VOID_TASK_DECL_2(lace_steal_loop_root, Task *, int*); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/llmsset.c b/resources/3rdparty/sylvan/src/llmsset.c deleted file mode 100644 index 3e1e54b6a..000000000 --- a/resources/3rdparty/sylvan/src/llmsset.c +++ /dev/null @@ -1,563 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include // for uint64_t etc -#include // for printf -#include -#include // memset -#include // for mmap - -#include -#include -#include - -#ifndef USE_HWLOC -#define USE_HWLOC 0 -#endif - -#if USE_HWLOC -#include - -static hwloc_topology_t topo; -#endif - -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - -#ifndef cas -#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) -#endif - -DECLARE_THREAD_LOCAL(my_region, uint64_t); - -VOID_TASK_0(llmsset_reset_region) -{ - LOCALIZE_THREAD_LOCAL(my_region, uint64_t); - my_region = (uint64_t)-1; // no region - SET_THREAD_LOCAL(my_region, my_region); -} - -static uint64_t -claim_data_bucket(const llmsset_t dbs) -{ - LOCALIZE_THREAD_LOCAL(my_region, uint64_t); - - for (;;) { - if (my_region != (uint64_t)-1) { - // find empty bucket in region - uint64_t *ptr = dbs->bitmap2 + (my_region*8); - int i=0; - for (;i<8;) { - uint64_t v = *ptr; - if (v != 0xffffffffffffffffLL) { - int j = __builtin_clzll(~v); - *ptr |= (0x8000000000000000LL>>j); - return (8 * my_region + i) * 64 + j; - } - i++; - ptr++; - } - } else { - // special case on startup or after garbage collection - my_region += (lace_get_worker()->worker*(dbs->table_size/(64*8)))/lace_workers(); - } - uint64_t count = dbs->table_size/(64*8); - for (;;) { - // check if table maybe full - if (count-- == 0) return (uint64_t)-1; - - my_region += 1; - if (my_region >= (dbs->table_size/(64*8))) my_region = 0; - - // try to claim it - uint64_t *ptr = dbs->bitmap1 + (my_region/64); - uint64_t mask = 0x8000000000000000LL >> (my_region&63); - uint64_t v; -restart: - v = *ptr; - if (v & mask) continue; // taken - if (cas(ptr, v, v|mask)) break; - else goto restart; - } - SET_THREAD_LOCAL(my_region, my_region); - } -} - -static void -release_data_bucket(const llmsset_t dbs, uint64_t index) -{ - uint64_t *ptr = dbs->bitmap2 + (index/64); - uint64_t mask = 0x8000000000000000LL >> (index&63); - *ptr &= ~mask; -} - -static void -set_custom_bucket(const llmsset_t dbs, uint64_t index, int on) -{ - uint64_t *ptr = dbs->bitmapc + (index/64); - uint64_t mask = 0x8000000000000000LL >> (index&63); - if (on) *ptr |= mask; - else *ptr &= ~mask; -} - -static int -get_custom_bucket(const llmsset_t dbs, uint64_t index) -{ - uint64_t *ptr = dbs->bitmapc + (index/64); - uint64_t mask = 0x8000000000000000LL >> (index&63); - return (*ptr & mask) ? 1 : 0; -} - -#ifndef rotl64 -static inline uint64_t -rotl64(uint64_t x, int8_t r) -{ - return ((x<>(64-r))); -} -#endif - -uint64_t -llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed) -{ - const uint64_t prime = 1099511628211; - - uint64_t hash = seed; - hash = hash ^ a; - hash = rotl64(hash, 47); - hash = hash * prime; - hash = hash ^ b; - hash = rotl64(hash, 31); - hash = hash * prime; - - return hash ^ (hash >> 32); -} - -/* - * CL_MASK and CL_MASK_R are for the probe sequence calculation. - * With 64 bytes per cacheline, there are 8 64-bit values per cacheline. - */ -// The LINE_SIZE is defined in lace.h -static const uint64_t CL_MASK = ~(((LINE_SIZE) / 8) - 1); -static const uint64_t CL_MASK_R = ((LINE_SIZE) / 8) - 1; - -/* 40 bits for the index, 24 bits for the hash */ -#define MASK_INDEX ((uint64_t)0x000000ffffffffff) -#define MASK_HASH ((uint64_t)0xffffff0000000000) - -static inline uint64_t -llmsset_lookup2(const llmsset_t dbs, uint64_t a, uint64_t b, int* created, const int custom) -{ - uint64_t hash_rehash = 14695981039346656037LLU; - if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); - else hash_rehash = llmsset_hash(a, b, hash_rehash); - - const uint64_t hash = hash_rehash & MASK_HASH; - uint64_t idx, last, cidx = 0; - int i=0; - -#if LLMSSET_MASK - last = idx = hash_rehash & dbs->mask; -#else - last = idx = hash_rehash % dbs->table_size; -#endif - - for (;;) { - volatile uint64_t *bucket = dbs->table + idx; - uint64_t v = *bucket; - - if (v == 0) { - if (cidx == 0) { - cidx = claim_data_bucket(dbs); - if (cidx == (uint64_t)-1) return 0; // failed to claim a data bucket - if (custom) dbs->create_cb(&a, &b); - uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*cidx; - d_ptr[0] = a; - d_ptr[1] = b; - } - if (cas(bucket, 0, hash | cidx)) { - if (custom) set_custom_bucket(dbs, cidx, custom); - *created = 1; - return cidx; - } else { - v = *bucket; - } - } - - if (hash == (v & MASK_HASH)) { - uint64_t d_idx = v & MASK_INDEX; - uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*d_idx; - if (custom) { - if (dbs->equals_cb(a, b, d_ptr[0], d_ptr[1])) { - if (cidx != 0) { - dbs->destroy_cb(a, b); - release_data_bucket(dbs, cidx); - } - *created = 0; - return d_idx; - } - } else { - if (d_ptr[0] == a && d_ptr[1] == b) { - if (cidx != 0) release_data_bucket(dbs, cidx); - *created = 0; - return d_idx; - } - } - } - - sylvan_stats_count(LLMSSET_LOOKUP); - - // find next idx on probe sequence - idx = (idx & CL_MASK) | ((idx+1) & CL_MASK_R); - if (idx == last) { - if (++i == dbs->threshold) return 0; // failed to find empty spot in probe sequence - - // go to next cache line in probe sequence - if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); - else hash_rehash = llmsset_hash(a, b, hash_rehash); - -#if LLMSSET_MASK - last = idx = hash_rehash & dbs->mask; -#else - last = idx = hash_rehash % dbs->table_size; -#endif - } - } -} - -uint64_t -llmsset_lookup(const llmsset_t dbs, const uint64_t a, const uint64_t b, int* created) -{ - return llmsset_lookup2(dbs, a, b, created, 0); -} - -uint64_t -llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b, int* created) -{ - return llmsset_lookup2(dbs, a, b, created, 1); -} - -static inline int -llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx) -{ - const uint64_t * const d_ptr = ((uint64_t*)dbs->data) + 2*d_idx; - const uint64_t a = d_ptr[0]; - const uint64_t b = d_ptr[1]; - - uint64_t hash_rehash = 14695981039346656037LLU; - const int custom = get_custom_bucket(dbs, d_idx) ? 1 : 0; - if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); - else hash_rehash = llmsset_hash(a, b, hash_rehash); - const uint64_t new_v = (hash_rehash & MASK_HASH) | d_idx; - int i=0; - - uint64_t idx, last; -#if LLMSSET_MASK - last = idx = hash_rehash & dbs->mask; -#else - last = idx = hash_rehash % dbs->table_size; -#endif - - for (;;) { - volatile uint64_t *bucket = &dbs->table[idx]; - if (*bucket == 0 && cas(bucket, 0, new_v)) return 1; - - // find next idx on probe sequence - idx = (idx & CL_MASK) | ((idx+1) & CL_MASK_R); - if (idx == last) { - if (++i == dbs->threshold) return 0; // failed to find empty spot in probe sequence - - // go to next cache line in probe sequence - if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); - else hash_rehash = llmsset_hash(a, b, hash_rehash); - -#if LLMSSET_MASK - last = idx = hash_rehash & dbs->mask; -#else - last = idx = hash_rehash % dbs->table_size; -#endif - } - } -} - -llmsset_t -llmsset_create(size_t initial_size, size_t max_size) -{ -#if USE_HWLOC - hwloc_topology_init(&topo); - hwloc_topology_load(topo); -#endif - - llmsset_t dbs = NULL; - if (posix_memalign((void**)&dbs, LINE_SIZE, sizeof(struct llmsset)) != 0) { - fprintf(stderr, "llmsset_create: Unable to allocate memory!\n"); - exit(1); - } - -#if LLMSSET_MASK - /* Check if initial_size and max_size are powers of 2 */ - if (__builtin_popcountll(initial_size) != 1) { - fprintf(stderr, "llmsset_create: initial_size is not a power of 2!\n"); - exit(1); - } - - if (__builtin_popcountll(max_size) != 1) { - fprintf(stderr, "llmsset_create: max_size is not a power of 2!\n"); - exit(1); - } -#endif - - if (initial_size > max_size) { - fprintf(stderr, "llmsset_create: initial_size > max_size!\n"); - exit(1); - } - - // minimum size is now 512 buckets (region size, but of course, n_workers * 512 is suggested as minimum) - - if (initial_size < 512) { - fprintf(stderr, "llmsset_create: initial_size too small!\n"); - exit(1); - } - - dbs->max_size = max_size; - llmsset_set_size(dbs, initial_size); - - /* This implementation of "resizable hash table" allocates the max_size table in virtual memory, - but only uses the "actual size" part in real memory */ - - dbs->table = (uint64_t*)mmap(0, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - dbs->data = (uint8_t*)mmap(0, dbs->max_size * 16, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - - /* Also allocate bitmaps. Each region is 64*8 = 512 buckets. - Overhead of bitmap1: 1 bit per 4096 bucket. - Overhead of bitmap2: 1 bit per bucket. - Overhead of bitmapc: 1 bit per bucket. */ - - dbs->bitmap1 = (uint64_t*)mmap(0, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - dbs->bitmap2 = (uint64_t*)mmap(0, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - dbs->bitmapc = (uint64_t*)mmap(0, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - - if (dbs->table == (uint64_t*)-1 || dbs->data == (uint8_t*)-1 || dbs->bitmap1 == (uint64_t*)-1 || dbs->bitmap2 == (uint64_t*)-1 || dbs->bitmapc == (uint64_t*)-1) { - fprintf(stderr, "llmsset_create: Unable to allocate memory!\n"); - exit(1); - } - -#if defined(madvise) && defined(MADV_RANDOM) - madvise(dbs->table, dbs->max_size * 8, MADV_RANDOM); -#endif - -#if USE_HWLOC - hwloc_set_area_membind(topo, dbs->table, dbs->max_size * 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); - hwloc_set_area_membind(topo, dbs->data, dbs->max_size * 16, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); - hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); - hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); - hwloc_set_area_membind(topo, dbs->bitmapc, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); -#endif - - // forbid first two positions (index 0 and 1) - dbs->bitmap2[0] = 0xc000000000000000LL; - - dbs->hash_cb = NULL; - dbs->equals_cb = NULL; - dbs->create_cb = NULL; - dbs->destroy_cb = NULL; - - // yes, ugly. for now, we use a global thread-local value. - // that is a problem with multiple tables. - // so, for now, do NOT use multiple tables!! - - LACE_ME; - INIT_THREAD_LOCAL(my_region); - TOGETHER(llmsset_reset_region); - - return dbs; -} - -void -llmsset_free(llmsset_t dbs) -{ - munmap(dbs->table, dbs->max_size * 8); - munmap(dbs->data, dbs->max_size * 16); - munmap(dbs->bitmap1, dbs->max_size / (512*8)); - munmap(dbs->bitmap2, dbs->max_size / 8); - munmap(dbs->bitmapc, dbs->max_size / 8); - free(dbs); -} - -VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs) -{ - // just reallocate... - if (mmap(dbs->table, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { -#if defined(madvise) && defined(MADV_RANDOM) - madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM); -#endif -#if USE_HWLOC - hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); -#endif - } else { - // reallocate failed... expensive fallback - memset(dbs->table, 0, dbs->max_size * 8); - } - - if (mmap(dbs->bitmap1, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { -#if USE_HWLOC - hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); -#endif - } else { - memset(dbs->bitmap1, 0, dbs->max_size / (512*8)); - } - - if (mmap(dbs->bitmap2, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { -#if USE_HWLOC - hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); -#endif - } else { - memset(dbs->bitmap2, 0, dbs->max_size / 8); - } - - // forbid first two positions (index 0 and 1) - dbs->bitmap2[0] = 0xc000000000000000LL; - - TOGETHER(llmsset_reset_region); -} - -int -llmsset_is_marked(const llmsset_t dbs, uint64_t index) -{ - volatile uint64_t *ptr = dbs->bitmap2 + (index/64); - uint64_t mask = 0x8000000000000000LL >> (index&63); - return (*ptr & mask) ? 1 : 0; -} - -int -llmsset_mark(const llmsset_t dbs, uint64_t index) -{ - volatile uint64_t *ptr = dbs->bitmap2 + (index/64); - uint64_t mask = 0x8000000000000000LL >> (index&63); - for (;;) { - uint64_t v = *ptr; - if (v & mask) return 0; - if (cas(ptr, v, v|mask)) return 1; - } -} - -VOID_TASK_3(llmsset_rehash_par, llmsset_t, dbs, size_t, first, size_t, count) -{ - if (count > 512) { - size_t split = count/2; - SPAWN(llmsset_rehash_par, dbs, first, split); - CALL(llmsset_rehash_par, dbs, first + split, count - split); - SYNC(llmsset_rehash_par); - } else { - uint64_t *ptr = dbs->bitmap2 + (first / 64); - uint64_t mask = 0x8000000000000000LL >> (first & 63); - for (size_t k=0; k>= 1; - if (mask == 0) { - ptr++; - mask = 0x8000000000000000LL; - } - } - } -} - -VOID_TASK_IMPL_1(llmsset_rehash, llmsset_t, dbs) -{ - CALL(llmsset_rehash_par, dbs, 0, dbs->table_size); -} - -TASK_3(size_t, llmsset_count_marked_par, llmsset_t, dbs, size_t, first, size_t, count) -{ - if (count > 512) { - size_t split = count/2; - SPAWN(llmsset_count_marked_par, dbs, first, split); - size_t right = CALL(llmsset_count_marked_par, dbs, first + split, count - split); - size_t left = SYNC(llmsset_count_marked_par); - return left + right; - } else { - size_t result = 0; - uint64_t *ptr = dbs->bitmap2 + (first / 64); - if (count == 512) { - result += __builtin_popcountll(ptr[0]); - result += __builtin_popcountll(ptr[1]); - result += __builtin_popcountll(ptr[2]); - result += __builtin_popcountll(ptr[3]); - result += __builtin_popcountll(ptr[4]); - result += __builtin_popcountll(ptr[5]); - result += __builtin_popcountll(ptr[6]); - result += __builtin_popcountll(ptr[7]); - } else { - uint64_t mask = 0x8000000000000000LL >> (first & 63); - for (size_t k=0; k>= 1; - if (mask == 0) { - ptr++; - mask = 0x8000000000000000LL; - } - } - } - return result; - } -} - -TASK_IMPL_1(size_t, llmsset_count_marked, llmsset_t, dbs) -{ - return CALL(llmsset_count_marked_par, dbs, 0, dbs->table_size); -} - -VOID_TASK_3(llmsset_destroy_par, llmsset_t, dbs, size_t, first, size_t, count) -{ - if (count > 1024) { - size_t split = count/2; - SPAWN(llmsset_destroy_par, dbs, first, split); - CALL(llmsset_destroy_par, dbs, first + split, count - split); - SYNC(llmsset_destroy_par); - } else { - for (size_t k=first; kbitmap2 + (k/64); - volatile uint64_t *ptrc = dbs->bitmapc + (k/64); - uint64_t mask = 0x8000000000000000LL >> (k&63); - - // if not marked but is custom - if ((*ptr2 & mask) == 0 && (*ptrc & mask)) { - uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*k; - dbs->destroy_cb(d_ptr[0], d_ptr[1]); - *ptrc &= ~mask; - } - } - } -} - -VOID_TASK_IMPL_1(llmsset_destroy_unmarked, llmsset_t, dbs) -{ - if (dbs->destroy_cb == NULL) return; // no custom function - CALL(llmsset_destroy_par, dbs, 0, dbs->table_size); -} - -/** - * Set custom functions - */ -void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb) -{ - dbs->hash_cb = hash_cb; - dbs->equals_cb = equals_cb; - dbs->create_cb = create_cb; - dbs->destroy_cb = destroy_cb; -} diff --git a/resources/3rdparty/sylvan/src/llmsset.h b/resources/3rdparty/sylvan/src/llmsset.h deleted file mode 100644 index 34feefaf0..000000000 --- a/resources/3rdparty/sylvan/src/llmsset.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - * Copyright 2011-2014 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "sylvan_config.h" -#include -#include - -#include "lace.h" - -#include - -#ifndef LLMSSET_H -#define LLMSSET_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#ifndef LLMSSET_MASK -#define LLMSSET_MASK 0 // set to 1 to use bit mask instead of modulo -#endif - -/** - * Lockless hash table (set) to store 16-byte keys. - * Each unique key is associated with a 42-bit number. - * - * The set has support for stop-the-world garbage collection. - * Methods llmsset_clear, llmsset_mark and llmsset_rehash implement garbage collection. - * During their execution, llmsset_lookup is not allowed. - */ - -/** - * hash(a, b, seed) - * equals(lhs_a, lhs_b, rhs_a, rhs_b) - * create(a, b) -- with a,b pointers, allows changing pointers on create of node, - * but must keep hash/equals same! - * destroy(a, b) - */ -typedef uint64_t (*llmsset_hash_cb)(uint64_t, uint64_t, uint64_t); -typedef int (*llmsset_equals_cb)(uint64_t, uint64_t, uint64_t, uint64_t); -typedef void (*llmsset_create_cb)(uint64_t *, uint64_t *); -typedef void (*llmsset_destroy_cb)(uint64_t, uint64_t); - -typedef struct llmsset -{ - uint64_t *table; // table with hashes - uint8_t *data; // table with values - uint64_t *bitmap1; // ownership bitmap (per 512 buckets) - uint64_t *bitmap2; // bitmap for "contains data" - uint64_t *bitmapc; // bitmap for "use custom functions" - size_t max_size; // maximum size of the hash table (for resizing) - size_t table_size; // size of the hash table (number of slots) --> power of 2! -#if LLMSSET_MASK - size_t mask; // size-1 -#endif - size_t f_size; - llmsset_hash_cb hash_cb; // custom hash function - llmsset_equals_cb equals_cb; // custom equals function - llmsset_create_cb create_cb; // custom create function - llmsset_destroy_cb destroy_cb; // custom destroy function - int16_t threshold; // number of iterations for insertion until returning error -} *llmsset_t; - -/** - * Retrieve a pointer to the data associated with the 42-bit value. - */ - -static inline void* -llmsset_index_to_ptr(const llmsset_t dbs, size_t index) -{ - return dbs->data + index * 16; -} - -/** - * Create the set. - * This will allocate a set of buckets in virtual memory. - * The actual space used is buckets. - */ -llmsset_t llmsset_create(size_t initial_size, size_t max_size); - -/** - * Free the set. - */ -void llmsset_free(llmsset_t dbs); - -/** - * Retrieve the maximum size of the set. - */ -static inline size_t -llmsset_get_max_size(const llmsset_t dbs) -{ - return dbs->max_size; -} - -/** - * Retrieve the current size of the lockless MS set. - */ -static inline size_t -llmsset_get_size(const llmsset_t dbs) -{ - return dbs->table_size; -} - -/** - * Set the table size of the set. - * Typically called during garbage collection, after clear and before rehash. - * Returns 0 if dbs->table_size > dbs->max_size! - */ -static inline void -llmsset_set_size(llmsset_t dbs, size_t size) -{ - /* check bounds (don't be rediculous) */ - if (size > 128 && size <= dbs->max_size) { - dbs->table_size = size; -#if LLMSSET_MASK - /* Warning: if size is not a power of two, you will get interesting behavior */ - dbs->mask = dbs->table_size - 1; -#endif - dbs->threshold = (64 - __builtin_clzll(dbs->table_size)) + 4; // doubling table_size increases threshold by 1 - } -} - -/** - * Core function: find existing data or add new. - * Returns the unique 42-bit value associated with the data, or 0 when table is full. - * Also, this value will never equal 0 or 1. - * Note: garbage collection during lookup strictly forbidden - */ -uint64_t llmsset_lookup(const llmsset_t dbs, const uint64_t a, const uint64_t b, int *created); - -/** - * Same as lookup, but use the custom functions - */ -uint64_t llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b, int *created); - -/** - * To perform garbage collection, the user is responsible that no lookups are performed during the process. - * - * 1) call llmsset_clear - * 2) call llmsset_mark for every bucket to rehash - * 3) call llmsset_rehash - */ -VOID_TASK_DECL_1(llmsset_clear, llmsset_t); -#define llmsset_clear(dbs) CALL(llmsset_clear, dbs) - -/** - * Check if a certain data bucket is marked (in use). - */ -int llmsset_is_marked(const llmsset_t dbs, uint64_t index); - -/** - * During garbage collection, buckets are marked (for rehashing) with this function. - * Returns 0 if the node was already marked, or non-zero if it was not marked. - * May also return non-zero if multiple workers marked at the same time. - */ -int llmsset_mark(const llmsset_t dbs, uint64_t index); - -/** - * Rehash all marked buckets. - */ -VOID_TASK_DECL_1(llmsset_rehash, llmsset_t); -#define llmsset_rehash(dbs) CALL(llmsset_rehash, dbs) - -/** - * Retrieve number of marked buckets. - */ -TASK_DECL_1(size_t, llmsset_count_marked, llmsset_t); -#define llmsset_count_marked(dbs) CALL(llmsset_count_marked, dbs) - -/** - * During garbage collection, this method calls the destroy callback - * for all 'custom' data that is not kept. - */ -VOID_TASK_DECL_1(llmsset_destroy_unmarked, llmsset_t); -#define llmsset_destroy_unmarked(dbs) CALL(llmsset_destroy_unmarked, dbs) - -/** - * Set custom functions - */ -void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb); - -/** - * Default hashing function - */ -uint64_t llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - - -#endif diff --git a/resources/3rdparty/sylvan/src/refs.c b/resources/3rdparty/sylvan/src/refs.c deleted file mode 100644 index e13e6d5e6..000000000 --- a/resources/3rdparty/sylvan/src/refs.c +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include // for assert -#include // for fprintf -#include // for uint32_t etc -#include // for exit -#include // for mmap - -#include - -#ifndef compiler_barrier -#define compiler_barrier() { asm volatile("" ::: "memory"); } -#endif - -#ifndef cas -#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) -#endif - -/** - * Implementation of external references - * Based on a hash table for 40-bit non-null values, linear probing - * Use tombstones for deleting, higher bits for reference count - */ -static const uint64_t refs_ts = 0x7fffffffffffffff; // tombstone - -/* FNV-1a 64-bit hash */ -static inline uint64_t -fnv_hash(uint64_t a) -{ - const uint64_t prime = 1099511628211; - uint64_t hash = 14695981039346656037LLU; - hash = (hash ^ a) * prime; - hash = (hash ^ ((a << 25) | (a >> 39))) * prime; - return hash ^ (hash >> 32); -} - -// Count number of unique entries (not number of references) -size_t -refs_count(refs_table_t *tbl) -{ - size_t count = 0; - uint64_t *bucket = tbl->refs_table; - uint64_t * const end = bucket + tbl->refs_size; - while (bucket != end) { - if (*bucket != 0 && *bucket != refs_ts) count++; - bucket++; - } - return count; -} - -static inline void -refs_rehash(refs_table_t *tbl, uint64_t v) -{ - if (v == 0) return; // do not rehash empty value - if (v == refs_ts) return; // do not rehash tombstone - - volatile uint64_t *bucket = tbl->refs_table + (fnv_hash(v & 0x000000ffffffffff) % tbl->refs_size); - uint64_t * const end = tbl->refs_table + tbl->refs_size; - - int i = 128; // try 128 times linear probing - while (i--) { - if (*bucket == 0) { if (cas(bucket, 0, v)) return; } - if (++bucket == end) bucket = tbl->refs_table; - } - - // assert(0); // impossible! -} - -/** - * Called internally to assist resize operations - * Returns 1 for retry, 0 for done - */ -static int -refs_resize_help(refs_table_t *tbl) -{ - if (0 == (tbl->refs_control & 0xf0000000)) return 0; // no resize in progress (anymore) - if (tbl->refs_control & 0x80000000) return 1; // still waiting for preparation - - if (tbl->refs_resize_part >= tbl->refs_resize_size / 128) return 1; // all parts claimed - size_t part = __sync_fetch_and_add(&tbl->refs_resize_part, 1); - if (part >= tbl->refs_resize_size/128) return 1; // all parts claimed - - // rehash all - int i; - volatile uint64_t *bucket = tbl->refs_resize_table + part * 128; - for (i=0; i<128; i++) refs_rehash(tbl, *bucket++); - - __sync_fetch_and_add(&tbl->refs_resize_done, 1); - return 1; -} - -static void -refs_resize(refs_table_t *tbl) -{ - while (1) { - uint32_t v = tbl->refs_control; - if (v & 0xf0000000) { - // someone else started resize - // just rehash blocks until done - while (refs_resize_help(tbl)) continue; - return; - } - if (cas(&tbl->refs_control, v, 0x80000000 | v)) { - // wait until all users gone - while (tbl->refs_control != 0x80000000) continue; - break; - } - } - - tbl->refs_resize_table = tbl->refs_table; - tbl->refs_resize_size = tbl->refs_size; - tbl->refs_resize_part = 0; - tbl->refs_resize_done = 0; - - // calculate new size - size_t new_size = tbl->refs_size; - size_t count = refs_count(tbl); - if (count*4 > tbl->refs_size) new_size *= 2; - - // allocate new table - uint64_t *new_table = (uint64_t*)mmap(0, new_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); - if (new_table == (uint64_t*)-1) { - fprintf(stderr, "refs: Unable to allocate memory!\n"); - exit(1); - } - - // set new data and go - tbl->refs_table = new_table; - tbl->refs_size = new_size; - compiler_barrier(); - tbl->refs_control = 0x40000000; - - // until all parts are done, rehash blocks - while (tbl->refs_resize_done != tbl->refs_resize_size/128) refs_resize_help(tbl); - - // done! - compiler_barrier(); - tbl->refs_control = 0; - - // unmap old table - munmap(tbl->refs_resize_table, tbl->refs_resize_size * sizeof(uint64_t)); -} - -/* Enter refs_modify */ -static inline void -refs_enter(refs_table_t *tbl) -{ - for (;;) { - uint32_t v = tbl->refs_control; - if (v & 0xf0000000) { - while (refs_resize_help(tbl)) continue; - } else { - if (cas(&tbl->refs_control, v, v+1)) return; - } - } -} - -/* Leave refs_modify */ -static inline void -refs_leave(refs_table_t *tbl) -{ - for (;;) { - uint32_t v = tbl->refs_control; - if (cas(&tbl->refs_control, v, v-1)) return; - } -} - -static inline int -refs_modify(refs_table_t *tbl, const uint64_t a, const int dir) -{ - volatile uint64_t *bucket; - volatile uint64_t *ts_bucket; - uint64_t v, new_v; - int res, i; - - refs_enter(tbl); - -ref_retry: - bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); - ts_bucket = NULL; // tombstone - i = 128; // try 128 times linear probing - - while (i--) { -ref_restart: - v = *bucket; - if (v == refs_ts) { - if (ts_bucket == NULL) ts_bucket = bucket; - } else if (v == 0) { - // not found - res = 0; - if (dir < 0) goto ref_exit; - if (ts_bucket != NULL) { - bucket = ts_bucket; - ts_bucket = NULL; - v = refs_ts; - } - new_v = a | (1ULL << 40); - goto ref_mod; - } else if ((v & 0x000000ffffffffff) == a) { - // found - res = 1; - uint64_t count = v >> 40; - if (count == 0x7fffff) goto ref_exit; - count += dir; - if (count == 0) new_v = refs_ts; - else new_v = a | (count << 40); - goto ref_mod; - } - - if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; - } - - // not found after linear probing - if (dir < 0) { - res = 0; - goto ref_exit; - } else if (ts_bucket != NULL) { - bucket = ts_bucket; - ts_bucket = NULL; - v = refs_ts; - new_v = a | (1ULL << 40); - if (!cas(bucket, v, new_v)) goto ref_retry; - res = 1; - goto ref_exit; - } else { - // hash table full - refs_leave(tbl); - refs_resize(tbl); - return refs_modify(tbl, a, dir); - } - -ref_mod: - if (!cas(bucket, v, new_v)) goto ref_restart; - -ref_exit: - refs_leave(tbl); - return res; -} - -void -refs_up(refs_table_t *tbl, uint64_t a) -{ - refs_modify(tbl, a, 1); -} - -void -refs_down(refs_table_t *tbl, uint64_t a) -{ -#ifdef NDEBUG - refs_modify(tbl, a, -1); -#else - int res = refs_modify(tbl, a, -1); - assert(res != 0); -#endif -} - -uint64_t* -refs_iter(refs_table_t *tbl, size_t first, size_t end) -{ - // assert(first < tbl->refs_size); - // assert(end <= tbl->refs_size); - - uint64_t *bucket = tbl->refs_table + first; - while (bucket != tbl->refs_table + end) { - if (*bucket != 0 && *bucket != refs_ts) return bucket; - bucket++; - } - return NULL; -} - -uint64_t -refs_next(refs_table_t *tbl, uint64_t **_bucket, size_t end) -{ - uint64_t *bucket = *_bucket; - // assert(bucket != NULL); - // assert(end <= tbl->refs_size); - uint64_t result = *bucket & 0x000000ffffffffff; - bucket++; - while (bucket != tbl->refs_table + end) { - if (*bucket != 0 && *bucket != refs_ts) { - *_bucket = bucket; - return result; - } - bucket++; - } - *_bucket = NULL; - return result; -} - -void -refs_create(refs_table_t *tbl, size_t _refs_size) -{ - if (__builtin_popcountll(_refs_size) != 1) { - fprintf(stderr, "refs: Table size must be a power of 2!\n"); - exit(1); - } - - tbl->refs_size = _refs_size; - tbl->refs_table = (uint64_t*)mmap(0, tbl->refs_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); - if (tbl->refs_table == (uint64_t*)-1) { - fprintf(stderr, "refs: Unable to allocate memory!\n"); - exit(1); - } -} - -void -refs_free(refs_table_t *tbl) -{ - munmap(tbl->refs_table, tbl->refs_size * sizeof(uint64_t)); -} - -/** - * Simple implementation of a 64-bit resizable hash-table - * No idea if this is scalable... :( but it seems thread-safe - */ - -// Count number of unique entries (not number of references) -size_t -protect_count(refs_table_t *tbl) -{ - size_t count = 0; - uint64_t *bucket = tbl->refs_table; - uint64_t * const end = bucket + tbl->refs_size; - while (bucket != end) { - if (*bucket != 0 && *bucket != refs_ts) count++; - bucket++; - } - return count; -} - -static inline void -protect_rehash(refs_table_t *tbl, uint64_t v) -{ - if (v == 0) return; // do not rehash empty value - if (v == refs_ts) return; // do not rehash tombstone - - volatile uint64_t *bucket = tbl->refs_table + (fnv_hash(v) % tbl->refs_size); - uint64_t * const end = tbl->refs_table + tbl->refs_size; - - int i = 128; // try 128 times linear probing - while (i--) { - if (*bucket == 0 && cas(bucket, 0, v)) return; - if (++bucket == end) bucket = tbl->refs_table; - } - - assert(0); // whoops! -} - -/** - * Called internally to assist resize operations - * Returns 1 for retry, 0 for done - */ -static int -protect_resize_help(refs_table_t *tbl) -{ - if (0 == (tbl->refs_control & 0xf0000000)) return 0; // no resize in progress (anymore) - if (tbl->refs_control & 0x80000000) return 1; // still waiting for preparation - if (tbl->refs_resize_part >= tbl->refs_resize_size / 128) return 1; // all parts claimed - size_t part = __sync_fetch_and_add(&tbl->refs_resize_part, 1); - if (part >= tbl->refs_resize_size/128) return 1; // all parts claimed - - // rehash all - int i; - volatile uint64_t *bucket = tbl->refs_resize_table + part * 128; - for (i=0; i<128; i++) protect_rehash(tbl, *bucket++); - - __sync_fetch_and_add(&tbl->refs_resize_done, 1); - return 1; -} - -static void -protect_resize(refs_table_t *tbl) -{ - while (1) { - uint32_t v = tbl->refs_control; - if (v & 0xf0000000) { - // someone else started resize - // just rehash blocks until done - while (protect_resize_help(tbl)) continue; - return; - } - if (cas(&tbl->refs_control, v, 0x80000000 | v)) { - // wait until all users gone - while (tbl->refs_control != 0x80000000) continue; - break; - } - } - - tbl->refs_resize_table = tbl->refs_table; - tbl->refs_resize_size = tbl->refs_size; - tbl->refs_resize_part = 0; - tbl->refs_resize_done = 0; - - // calculate new size - size_t new_size = tbl->refs_size; - size_t count = refs_count(tbl); - if (count*4 > tbl->refs_size) new_size *= 2; - - // allocate new table - uint64_t *new_table = (uint64_t*)mmap(0, new_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); - if (new_table == (uint64_t*)-1) { - fprintf(stderr, "refs: Unable to allocate memory!\n"); - exit(1); - } - - // set new data and go - tbl->refs_table = new_table; - tbl->refs_size = new_size; - compiler_barrier(); - tbl->refs_control = 0x40000000; - - // until all parts are done, rehash blocks - while (tbl->refs_resize_done < tbl->refs_resize_size/128) protect_resize_help(tbl); - - // done! - compiler_barrier(); - tbl->refs_control = 0; - - // unmap old table - munmap(tbl->refs_resize_table, tbl->refs_resize_size * sizeof(uint64_t)); -} - -static inline void -protect_enter(refs_table_t *tbl) -{ - for (;;) { - uint32_t v = tbl->refs_control; - if (v & 0xf0000000) { - while (protect_resize_help(tbl)) continue; - } else { - if (cas(&tbl->refs_control, v, v+1)) return; - } - } -} - -static inline void -protect_leave(refs_table_t *tbl) -{ - for (;;) { - uint32_t v = tbl->refs_control; - if (cas(&tbl->refs_control, v, v-1)) return; - } -} - -void -protect_up(refs_table_t *tbl, uint64_t a) -{ - volatile uint64_t *bucket; - volatile uint64_t *ts_bucket; - uint64_t v; - int i; - - protect_enter(tbl); - -ref_retry: - bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); - ts_bucket = NULL; // tombstone - i = 128; // try 128 times linear probing - - while (i--) { -ref_restart: - v = *bucket; - if (v == refs_ts) { - if (ts_bucket == NULL) ts_bucket = bucket; - } else if (v == 0) { - // go go go - if (ts_bucket != NULL) { - if (cas(ts_bucket, refs_ts, a)) { - protect_leave(tbl); - return; - } else { - goto ref_retry; - } - } else { - if (cas(bucket, 0, a)) { - protect_leave(tbl); - return; - } else { - goto ref_restart; - } - } - } - - if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; - } - - // not found after linear probing - if (ts_bucket != NULL) { - if (cas(ts_bucket, refs_ts, a)) { - protect_leave(tbl); - return; - } else { - goto ref_retry; - } - } else { - // hash table full - protect_leave(tbl); - protect_resize(tbl); - protect_enter(tbl); - goto ref_retry; - } -} - -void -protect_down(refs_table_t *tbl, uint64_t a) -{ - volatile uint64_t *bucket; - protect_enter(tbl); - - bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); - int i = 128; // try 128 times linear probing - - while (i--) { - if (*bucket == a) { - *bucket = refs_ts; - protect_leave(tbl); - return; - } - if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; - } - - // not found after linear probing - assert(0); -} - -uint64_t* -protect_iter(refs_table_t *tbl, size_t first, size_t end) -{ - // assert(first < tbl->refs_size); - // assert(end <= tbl->refs_size); - - uint64_t *bucket = tbl->refs_table + first; - while (bucket != tbl->refs_table + end) { - if (*bucket != 0 && *bucket != refs_ts) return bucket; - bucket++; - } - return NULL; -} - -uint64_t -protect_next(refs_table_t *tbl, uint64_t **_bucket, size_t end) -{ - uint64_t *bucket = *_bucket; - // assert(bucket != NULL); - // assert(end <= tbl->refs_size); - uint64_t result = *bucket; - bucket++; - while (bucket != tbl->refs_table + end) { - if (*bucket != 0 && *bucket != refs_ts) { - *_bucket = bucket; - return result; - } - bucket++; - } - *_bucket = NULL; - return result; -} - -void -protect_create(refs_table_t *tbl, size_t _refs_size) -{ - if (__builtin_popcountll(_refs_size) != 1) { - fprintf(stderr, "refs: Table size must be a power of 2!\n"); - exit(1); - } - - tbl->refs_size = _refs_size; - tbl->refs_table = (uint64_t*)mmap(0, tbl->refs_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); - if (tbl->refs_table == (uint64_t*)-1) { - fprintf(stderr, "refs: Unable to allocate memory!\n"); - exit(1); - } -} - -void -protect_free(refs_table_t *tbl) -{ - munmap(tbl->refs_table, tbl->refs_size * sizeof(uint64_t)); -} diff --git a/resources/3rdparty/sylvan/src/refs.h b/resources/3rdparty/sylvan/src/refs.h deleted file mode 100644 index 928948c90..000000000 --- a/resources/3rdparty/sylvan/src/refs.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include // for uint32_t etc - -#ifndef REFS_INLINE_H -#define REFS_INLINE_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * Implementation of external references - * Based on a hash table for 40-bit non-null values, linear probing - * Use tombstones for deleting, higher bits for reference count - */ -typedef struct -{ - uint64_t *refs_table; // table itself - size_t refs_size; // number of buckets - - /* helpers during resize operation */ - volatile uint32_t refs_control; // control field - uint64_t *refs_resize_table; // previous table - size_t refs_resize_size; // size of previous table - size_t refs_resize_part; // which part is next - size_t refs_resize_done; // how many parts are done -} refs_table_t; - -// Count number of unique entries (not number of references) -size_t refs_count(refs_table_t *tbl); - -// Increase or decrease reference to 40-bit value a -// Will fail (assertion) if more down than up are called for a -void refs_up(refs_table_t *tbl, uint64_t a); -void refs_down(refs_table_t *tbl, uint64_t a); - -// Return a bucket or NULL to start iterating -uint64_t *refs_iter(refs_table_t *tbl, size_t first, size_t end); - -// Continue iterating, set bucket to next bucket or NULL -uint64_t refs_next(refs_table_t *tbl, uint64_t **bucket, size_t end); - -// User must supply a pointer, refs_create and refs_free handle initialization/destruction -void refs_create(refs_table_t *tbl, size_t _refs_size); -void refs_free(refs_table_t *tbl); - -// The same, but now for 64-bit values ("protect pointers") -size_t protect_count(refs_table_t *tbl); -void protect_up(refs_table_t *tbl, uint64_t a); -void protect_down(refs_table_t *tbl, uint64_t a); -uint64_t *protect_iter(refs_table_t *tbl, size_t first, size_t end); -uint64_t protect_next(refs_table_t *tbl, uint64_t **bucket, size_t end); -void protect_create(refs_table_t *tbl, size_t _refs_size); -void protect_free(refs_table_t *tbl); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - - -#endif diff --git a/resources/3rdparty/sylvan/src/sha2.c b/resources/3rdparty/sylvan/src/sha2.c deleted file mode 100644 index db17dbb2d..000000000 --- a/resources/3rdparty/sylvan/src/sha2.c +++ /dev/null @@ -1,1067 +0,0 @@ -/* - * FILE: sha2.c - * AUTHOR: Aaron D. Gifford - http://www.aarongifford.com/ - * - * Copyright (c) 2000-2001, Aaron D. Gifford - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the copyright holder nor the names of contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - */ - -#include /* memcpy()/memset() or bcopy()/bzero() */ -#include /* assert() */ -#include "sha2.h" - -/* - * ASSERT NOTE: - * Some sanity checking code is included using assert(). On my FreeBSD - * system, this additional code can be removed by compiling with NDEBUG - * defined. Check your own systems manpage on assert() to see how to - * compile WITHOUT the sanity checking code on your system. - * - * UNROLLED TRANSFORM LOOP NOTE: - * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform - * loop version for the hash transform rounds (defined using macros - * later in this file). Either define on the command line, for example: - * - * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c - * - * or define below: - * - * #define SHA2_UNROLL_TRANSFORM - * - */ - - -/*** SHA-256/384/512 Machine Architecture Definitions *****************/ -/* - * BYTE_ORDER NOTE: - * - * Please make sure that your system defines BYTE_ORDER. If your - * architecture is little-endian, make sure it also defines - * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are - * equivilent. - * - * If your system does not define the above, then you can do so by - * hand like this: - * - * #define LITTLE_ENDIAN 1234 - * #define BIG_ENDIAN 4321 - * - * And for little-endian machines, add: - * - * #define BYTE_ORDER LITTLE_ENDIAN - * - * Or for big-endian machines: - * - * #define BYTE_ORDER BIG_ENDIAN - * - * The FreeBSD machine this was written on defines BYTE_ORDER - * appropriately by including (which in turn includes - * where the appropriate definitions are actually - * made). - */ -#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN) -#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN -#endif - -/* - * Define the followingsha2_* types to types of the correct length on - * the native archtecture. Most BSD systems and Linux define u_intXX_t - * types. Machines with very recent ANSI C headers, can use the - * uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H - * during compile or in the sha.h header file. - * - * Machines that support neither u_intXX_t nor inttypes.h's uintXX_t - * will need to define these three typedefs below (and the appropriate - * ones in sha.h too) by hand according to their system architecture. - * - * Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t - * types and pointing out recent ANSI C support for uintXX_t in inttypes.h. - */ -#ifdef SHA2_USE_INTTYPES_H - -typedef uint8_t sha2_byte; /* Exactly 1 byte */ -typedef uint32_t sha2_word32; /* Exactly 4 bytes */ -typedef uint64_t sha2_word64; /* Exactly 8 bytes */ - -#else /* SHA2_USE_INTTYPES_H */ - -typedef u_int8_t sha2_byte; /* Exactly 1 byte */ -typedef u_int32_t sha2_word32; /* Exactly 4 bytes */ -typedef u_int64_t sha2_word64; /* Exactly 8 bytes */ - -#endif /* SHA2_USE_INTTYPES_H */ - - -/*** SHA-256/384/512 Various Length Definitions ***********************/ -/* NOTE: Most of these are in sha2.h */ -#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8) -#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16) -#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16) - - -/*** ENDIAN REVERSAL MACROS *******************************************/ -#if BYTE_ORDER == LITTLE_ENDIAN -#define REVERSE32(w,x) { \ - sha2_word32 tmp = (w); \ - tmp = (tmp >> 16) | (tmp << 16); \ - (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \ -} -#define REVERSE64(w,x) { \ - sha2_word64 tmp = (w); \ - tmp = (tmp >> 32) | (tmp << 32); \ - tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \ - ((tmp & 0x00ff00ff00ff00ffULL) << 8); \ - (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \ - ((tmp & 0x0000ffff0000ffffULL) << 16); \ -} -#endif /* BYTE_ORDER == LITTLE_ENDIAN */ - -/* - * Macro for incrementally adding the unsigned 64-bit integer n to the - * unsigned 128-bit integer (represented using a two-element array of - * 64-bit words): - */ -#define ADDINC128(w,n) { \ - (w)[0] += (sha2_word64)(n); \ - if ((w)[0] < (n)) { \ - (w)[1]++; \ - } \ -} - -/* - * Macros for copying blocks of memory and for zeroing out ranges - * of memory. Using these macros makes it easy to switch from - * using memset()/memcpy() and using bzero()/bcopy(). - * - * Please define either SHA2_USE_MEMSET_MEMCPY or define - * SHA2_USE_BZERO_BCOPY depending on which function set you - * choose to use: - */ -#if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY) -/* Default to memset()/memcpy() if no option is specified */ -#define SHA2_USE_MEMSET_MEMCPY 1 -#endif -#if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY) -/* Abort with an error if BOTH options are defined */ -#error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both! -#endif - -#ifdef SHA2_USE_MEMSET_MEMCPY -#define MEMSET_BZERO(p,l) memset((p), 0, (l)) -#define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l)) -#endif -#ifdef SHA2_USE_BZERO_BCOPY -#define MEMSET_BZERO(p,l) bzero((p), (l)) -#define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l)) -#endif - - -/*** THE SIX LOGICAL FUNCTIONS ****************************************/ -/* - * Bit shifting and rotation (used by the six SHA-XYZ logical functions: - * - * NOTE: The naming of R and S appears backwards here (R is a SHIFT and - * S is a ROTATION) because the SHA-256/384/512 description document - * (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this - * same "backwards" definition. - */ -/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */ -#define R(b,x) ((x) >> (b)) -/* 32-bit Rotate-right (used in SHA-256): */ -#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b)))) -/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */ -#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b)))) - -/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */ -#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) -#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) - -/* Four of six logical functions used in SHA-256: */ -#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x))) -#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x))) -#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x))) -#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x))) - -/* Four of six logical functions used in SHA-384 and SHA-512: */ -#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x))) -#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x))) -#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x))) -#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x))) - -/*** INTERNAL FUNCTION PROTOTYPES *************************************/ -/* NOTE: These should not be accessed directly from outside this - * library -- they are intended for private internal visibility/use - * only. - */ -void SHA512_Last(SHA512_CTX*); -void SHA256_Transform(SHA256_CTX*, const sha2_word32*); -void SHA512_Transform(SHA512_CTX*, const sha2_word64*); - - -/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/ -/* Hash constant words K for SHA-256: */ -static const sha2_word32 K256[64] = { - 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, - 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, - 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, - 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, - 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL, - 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, - 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, - 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL, - 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL, - 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL, - 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, - 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, - 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL, - 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL, - 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL, - 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL -}; - -/* Initial hash value H for SHA-256: */ -static const sha2_word32 sha256_initial_hash_value[8] = { - 0x6a09e667UL, - 0xbb67ae85UL, - 0x3c6ef372UL, - 0xa54ff53aUL, - 0x510e527fUL, - 0x9b05688cUL, - 0x1f83d9abUL, - 0x5be0cd19UL -}; - -/* Hash constant words K for SHA-384 and SHA-512: */ -static const sha2_word64 K512[80] = { - 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, - 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, - 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, - 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, - 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, - 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, - 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, - 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, - 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, - 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, - 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, - 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, - 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, - 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, - 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, - 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, - 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, - 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, - 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, - 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, - 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, - 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, - 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, - 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, - 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, - 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, - 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, - 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, - 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, - 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, - 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, - 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, - 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, - 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, - 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, - 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, - 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, - 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, - 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, - 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL -}; - -/* Initial hash value H for SHA-384 */ -static const sha2_word64 sha384_initial_hash_value[8] = { - 0xcbbb9d5dc1059ed8ULL, - 0x629a292a367cd507ULL, - 0x9159015a3070dd17ULL, - 0x152fecd8f70e5939ULL, - 0x67332667ffc00b31ULL, - 0x8eb44a8768581511ULL, - 0xdb0c2e0d64f98fa7ULL, - 0x47b5481dbefa4fa4ULL -}; - -/* Initial hash value H for SHA-512 */ -static const sha2_word64 sha512_initial_hash_value[8] = { - 0x6a09e667f3bcc908ULL, - 0xbb67ae8584caa73bULL, - 0x3c6ef372fe94f82bULL, - 0xa54ff53a5f1d36f1ULL, - 0x510e527fade682d1ULL, - 0x9b05688c2b3e6c1fULL, - 0x1f83d9abfb41bd6bULL, - 0x5be0cd19137e2179ULL -}; - -/* - * Constant used by SHA256/384/512_End() functions for converting the - * digest to a readable hexadecimal character string: - */ -static const char *sha2_hex_digits = "0123456789abcdef"; - - -/*** SHA-256: *********************************************************/ -void SHA256_Init(SHA256_CTX* context) { - if (context == (SHA256_CTX*)0) { - return; - } - MEMCPY_BCOPY(context->state, sha256_initial_hash_value, SHA256_DIGEST_LENGTH); - MEMSET_BZERO(context->buffer, SHA256_BLOCK_LENGTH); - context->bitcount = 0; -} - -#ifdef SHA2_UNROLL_TRANSFORM - -/* Unrolled SHA-256 round macros: */ - -#if BYTE_ORDER == LITTLE_ENDIAN - -#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \ - REVERSE32(*data++, W256[j]); \ - T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \ - K256[j] + W256[j]; \ - (d) += T1; \ - (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ - j++ - - -#else /* BYTE_ORDER == LITTLE_ENDIAN */ - -#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \ - T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \ - K256[j] + (W256[j] = *data++); \ - (d) += T1; \ - (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ - j++ - -#endif /* BYTE_ORDER == LITTLE_ENDIAN */ - -#define ROUND256(a,b,c,d,e,f,g,h) \ - s0 = W256[(j+1)&0x0f]; \ - s0 = sigma0_256(s0); \ - s1 = W256[(j+14)&0x0f]; \ - s1 = sigma1_256(s1); \ - T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \ - (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \ - (d) += T1; \ - (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ - j++ - -void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) { - sha2_word32 a, b, c, d, e, f, g, h, s0, s1; - sha2_word32 T1, *W256; - int j; - - W256 = (sha2_word32*)context->buffer; - - /* Initialize registers with the prev. intermediate value */ - a = context->state[0]; - b = context->state[1]; - c = context->state[2]; - d = context->state[3]; - e = context->state[4]; - f = context->state[5]; - g = context->state[6]; - h = context->state[7]; - - j = 0; - do { - /* Rounds 0 to 15 (unrolled): */ - ROUND256_0_TO_15(a,b,c,d,e,f,g,h); - ROUND256_0_TO_15(h,a,b,c,d,e,f,g); - ROUND256_0_TO_15(g,h,a,b,c,d,e,f); - ROUND256_0_TO_15(f,g,h,a,b,c,d,e); - ROUND256_0_TO_15(e,f,g,h,a,b,c,d); - ROUND256_0_TO_15(d,e,f,g,h,a,b,c); - ROUND256_0_TO_15(c,d,e,f,g,h,a,b); - ROUND256_0_TO_15(b,c,d,e,f,g,h,a); - } while (j < 16); - - /* Now for the remaining rounds to 64: */ - do { - ROUND256(a,b,c,d,e,f,g,h); - ROUND256(h,a,b,c,d,e,f,g); - ROUND256(g,h,a,b,c,d,e,f); - ROUND256(f,g,h,a,b,c,d,e); - ROUND256(e,f,g,h,a,b,c,d); - ROUND256(d,e,f,g,h,a,b,c); - ROUND256(c,d,e,f,g,h,a,b); - ROUND256(b,c,d,e,f,g,h,a); - } while (j < 64); - - /* Compute the current intermediate hash value */ - context->state[0] += a; - context->state[1] += b; - context->state[2] += c; - context->state[3] += d; - context->state[4] += e; - context->state[5] += f; - context->state[6] += g; - context->state[7] += h; - - /* Clean up */ - a = b = c = d = e = f = g = h = T1 = 0; -} - -#else /* SHA2_UNROLL_TRANSFORM */ - -void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) { - sha2_word32 a, b, c, d, e, f, g, h, s0, s1; - sha2_word32 T1, T2, *W256; - int j; - - W256 = (sha2_word32*)context->buffer; - - /* Initialize registers with the prev. intermediate value */ - a = context->state[0]; - b = context->state[1]; - c = context->state[2]; - d = context->state[3]; - e = context->state[4]; - f = context->state[5]; - g = context->state[6]; - h = context->state[7]; - - j = 0; - do { -#if BYTE_ORDER == LITTLE_ENDIAN - /* Copy data while converting to host byte order */ - REVERSE32(*data++,W256[j]); - /* Apply the SHA-256 compression function to update a..h */ - T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j]; -#else /* BYTE_ORDER == LITTLE_ENDIAN */ - /* Apply the SHA-256 compression function to update a..h with copy */ - T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++); -#endif /* BYTE_ORDER == LITTLE_ENDIAN */ - T2 = Sigma0_256(a) + Maj(a, b, c); - h = g; - g = f; - f = e; - e = d + T1; - d = c; - c = b; - b = a; - a = T1 + T2; - - j++; - } while (j < 16); - - do { - /* Part of the message block expansion: */ - s0 = W256[(j+1)&0x0f]; - s0 = sigma0_256(s0); - s1 = W256[(j+14)&0x0f]; - s1 = sigma1_256(s1); - - /* Apply the SHA-256 compression function to update a..h */ - T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + - (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); - T2 = Sigma0_256(a) + Maj(a, b, c); - h = g; - g = f; - f = e; - e = d + T1; - d = c; - c = b; - b = a; - a = T1 + T2; - - j++; - } while (j < 64); - - /* Compute the current intermediate hash value */ - context->state[0] += a; - context->state[1] += b; - context->state[2] += c; - context->state[3] += d; - context->state[4] += e; - context->state[5] += f; - context->state[6] += g; - context->state[7] += h; - - /* Clean up */ - a = b = c = d = e = f = g = h = T1 = T2 = 0; -} - -#endif /* SHA2_UNROLL_TRANSFORM */ - -void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) { - unsigned int freespace, usedspace; - - if (len == 0) { - /* Calling with no data is valid - we do nothing */ - return; - } - - /* Sanity check: */ - assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0); - - usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH; - if (usedspace > 0) { - /* Calculate how much free space is available in the buffer */ - freespace = SHA256_BLOCK_LENGTH - usedspace; - - if (len >= freespace) { - /* Fill the buffer completely and process it */ - MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace); - context->bitcount += freespace << 3; - len -= freespace; - data += freespace; - SHA256_Transform(context, (sha2_word32*)context->buffer); - } else { - /* The buffer is not yet full */ - MEMCPY_BCOPY(&context->buffer[usedspace], data, len); - context->bitcount += len << 3; - /* Clean up: */ - usedspace = freespace = 0; - return; - } - } - while (len >= SHA256_BLOCK_LENGTH) { - /* Process as many complete blocks as we can */ - SHA256_Transform(context, (sha2_word32*)data); - context->bitcount += SHA256_BLOCK_LENGTH << 3; - len -= SHA256_BLOCK_LENGTH; - data += SHA256_BLOCK_LENGTH; - } - if (len > 0) { - /* There's left-overs, so save 'em */ - MEMCPY_BCOPY(context->buffer, data, len); - context->bitcount += len << 3; - } - /* Clean up: */ - usedspace = freespace = 0; -} - -void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) { - sha2_word32 *d = (sha2_word32*)digest; - unsigned int usedspace; - - /* Sanity check: */ - assert(context != (SHA256_CTX*)0); - - /* If no digest buffer is passed, we don't bother doing this: */ - if (digest != (sha2_byte*)0) { - usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH; -#if BYTE_ORDER == LITTLE_ENDIAN - /* Convert FROM host byte order */ - REVERSE64(context->bitcount,context->bitcount); -#endif - if (usedspace > 0) { - /* Begin padding with a 1 bit: */ - context->buffer[usedspace++] = 0x80; - - if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) { - /* Set-up for the last transform: */ - MEMSET_BZERO(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace); - } else { - if (usedspace < SHA256_BLOCK_LENGTH) { - MEMSET_BZERO(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace); - } - /* Do second-to-last transform: */ - SHA256_Transform(context, (sha2_word32*)context->buffer); - - /* And set-up for the last transform: */ - MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH); - } - } else { - /* Set-up for the last transform: */ - MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH); - - /* Begin padding with a 1 bit: */ - *context->buffer = 0x80; - } - /* Set the bit count: */ - sha2_word64* ptr = (sha2_word64*)(&context->buffer[SHA256_SHORT_BLOCK_LENGTH]); - *ptr = context->bitcount; - - /* Final transform: */ - SHA256_Transform(context, (sha2_word32*)context->buffer); - -#if BYTE_ORDER == LITTLE_ENDIAN - { - /* Convert TO host byte order */ - int j; - for (j = 0; j < 8; j++) { - REVERSE32(context->state[j],context->state[j]); - *d++ = context->state[j]; - } - } -#else - MEMCPY_BCOPY(d, context->state, SHA256_DIGEST_LENGTH); -#endif - } - - /* Clean up state data: */ - MEMSET_BZERO(context, sizeof(SHA256_CTX)); - usedspace = 0; -} - -char *SHA256_End(SHA256_CTX* context, char buffer[]) { - sha2_byte digest[SHA256_DIGEST_LENGTH], *d = digest; - int i; - - /* Sanity check: */ - assert(context != (SHA256_CTX*)0); - - if (buffer != (char*)0) { - SHA256_Final(digest, context); - - for (i = 0; i < SHA256_DIGEST_LENGTH; i++) { - *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; - *buffer++ = sha2_hex_digits[*d & 0x0f]; - d++; - } - *buffer = (char)0; - } else { - MEMSET_BZERO(context, sizeof(SHA256_CTX)); - } - MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH); - return buffer; -} - -char* SHA256_Data(const sha2_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) { - SHA256_CTX context; - - SHA256_Init(&context); - SHA256_Update(&context, data, len); - return SHA256_End(&context, digest); -} - - -/*** SHA-512: *********************************************************/ -void SHA512_Init(SHA512_CTX* context) { - if (context == (SHA512_CTX*)0) { - return; - } - MEMCPY_BCOPY(context->state, sha512_initial_hash_value, SHA512_DIGEST_LENGTH); - MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH); - context->bitcount[0] = context->bitcount[1] = 0; -} - -#ifdef SHA2_UNROLL_TRANSFORM - -/* Unrolled SHA-512 round macros: */ -#if BYTE_ORDER == LITTLE_ENDIAN - -#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \ - REVERSE64(*data++, W512[j]); \ - T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \ - K512[j] + W512[j]; \ - (d) += T1, \ - (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \ - j++ - - -#else /* BYTE_ORDER == LITTLE_ENDIAN */ - -#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \ - T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \ - K512[j] + (W512[j] = *data++); \ - (d) += T1; \ - (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \ - j++ - -#endif /* BYTE_ORDER == LITTLE_ENDIAN */ - -#define ROUND512(a,b,c,d,e,f,g,h) \ - s0 = W512[(j+1)&0x0f]; \ - s0 = sigma0_512(s0); \ - s1 = W512[(j+14)&0x0f]; \ - s1 = sigma1_512(s1); \ - T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \ - (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \ - (d) += T1; \ - (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \ - j++ - -void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) { - sha2_word64 a, b, c, d, e, f, g, h, s0, s1; - sha2_word64 T1, *W512 = (sha2_word64*)context->buffer; - int j; - - /* Initialize registers with the prev. intermediate value */ - a = context->state[0]; - b = context->state[1]; - c = context->state[2]; - d = context->state[3]; - e = context->state[4]; - f = context->state[5]; - g = context->state[6]; - h = context->state[7]; - - j = 0; - do { - ROUND512_0_TO_15(a,b,c,d,e,f,g,h); - ROUND512_0_TO_15(h,a,b,c,d,e,f,g); - ROUND512_0_TO_15(g,h,a,b,c,d,e,f); - ROUND512_0_TO_15(f,g,h,a,b,c,d,e); - ROUND512_0_TO_15(e,f,g,h,a,b,c,d); - ROUND512_0_TO_15(d,e,f,g,h,a,b,c); - ROUND512_0_TO_15(c,d,e,f,g,h,a,b); - ROUND512_0_TO_15(b,c,d,e,f,g,h,a); - } while (j < 16); - - /* Now for the remaining rounds up to 79: */ - do { - ROUND512(a,b,c,d,e,f,g,h); - ROUND512(h,a,b,c,d,e,f,g); - ROUND512(g,h,a,b,c,d,e,f); - ROUND512(f,g,h,a,b,c,d,e); - ROUND512(e,f,g,h,a,b,c,d); - ROUND512(d,e,f,g,h,a,b,c); - ROUND512(c,d,e,f,g,h,a,b); - ROUND512(b,c,d,e,f,g,h,a); - } while (j < 80); - - /* Compute the current intermediate hash value */ - context->state[0] += a; - context->state[1] += b; - context->state[2] += c; - context->state[3] += d; - context->state[4] += e; - context->state[5] += f; - context->state[6] += g; - context->state[7] += h; - - /* Clean up */ - a = b = c = d = e = f = g = h = T1 = 0; -} - -#else /* SHA2_UNROLL_TRANSFORM */ - -void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) { - sha2_word64 a, b, c, d, e, f, g, h, s0, s1; - sha2_word64 T1, T2, *W512 = (sha2_word64*)context->buffer; - int j; - - /* Initialize registers with the prev. intermediate value */ - a = context->state[0]; - b = context->state[1]; - c = context->state[2]; - d = context->state[3]; - e = context->state[4]; - f = context->state[5]; - g = context->state[6]; - h = context->state[7]; - - j = 0; - do { -#if BYTE_ORDER == LITTLE_ENDIAN - /* Convert TO host byte order */ - REVERSE64(*data++, W512[j]); - /* Apply the SHA-512 compression function to update a..h */ - T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j]; -#else /* BYTE_ORDER == LITTLE_ENDIAN */ - /* Apply the SHA-512 compression function to update a..h with copy */ - T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++); -#endif /* BYTE_ORDER == LITTLE_ENDIAN */ - T2 = Sigma0_512(a) + Maj(a, b, c); - h = g; - g = f; - f = e; - e = d + T1; - d = c; - c = b; - b = a; - a = T1 + T2; - - j++; - } while (j < 16); - - do { - /* Part of the message block expansion: */ - s0 = W512[(j+1)&0x0f]; - s0 = sigma0_512(s0); - s1 = W512[(j+14)&0x0f]; - s1 = sigma1_512(s1); - - /* Apply the SHA-512 compression function to update a..h */ - T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + - (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); - T2 = Sigma0_512(a) + Maj(a, b, c); - h = g; - g = f; - f = e; - e = d + T1; - d = c; - c = b; - b = a; - a = T1 + T2; - - j++; - } while (j < 80); - - /* Compute the current intermediate hash value */ - context->state[0] += a; - context->state[1] += b; - context->state[2] += c; - context->state[3] += d; - context->state[4] += e; - context->state[5] += f; - context->state[6] += g; - context->state[7] += h; - - /* Clean up */ - a = b = c = d = e = f = g = h = T1 = T2 = 0; -} - -#endif /* SHA2_UNROLL_TRANSFORM */ - -void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) { - unsigned int freespace, usedspace; - - if (len == 0) { - /* Calling with no data is valid - we do nothing */ - return; - } - - /* Sanity check: */ - assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0); - - usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH; - if (usedspace > 0) { - /* Calculate how much free space is available in the buffer */ - freespace = SHA512_BLOCK_LENGTH - usedspace; - - if (len >= freespace) { - /* Fill the buffer completely and process it */ - MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace); - ADDINC128(context->bitcount, freespace << 3); - len -= freespace; - data += freespace; - SHA512_Transform(context, (sha2_word64*)context->buffer); - } else { - /* The buffer is not yet full */ - MEMCPY_BCOPY(&context->buffer[usedspace], data, len); - ADDINC128(context->bitcount, len << 3); - /* Clean up: */ - usedspace = freespace = 0; - return; - } - } - while (len >= SHA512_BLOCK_LENGTH) { - /* Process as many complete blocks as we can */ - SHA512_Transform(context, (sha2_word64*)data); - ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3); - len -= SHA512_BLOCK_LENGTH; - data += SHA512_BLOCK_LENGTH; - } - if (len > 0) { - /* There's left-overs, so save 'em */ - MEMCPY_BCOPY(context->buffer, data, len); - ADDINC128(context->bitcount, len << 3); - } - /* Clean up: */ - usedspace = freespace = 0; -} - -void SHA512_Last(SHA512_CTX* context) { - unsigned int usedspace; - - usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH; -#if BYTE_ORDER == LITTLE_ENDIAN - /* Convert FROM host byte order */ - REVERSE64(context->bitcount[0],context->bitcount[0]); - REVERSE64(context->bitcount[1],context->bitcount[1]); -#endif - if (usedspace > 0) { - /* Begin padding with a 1 bit: */ - context->buffer[usedspace++] = 0x80; - - if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) { - /* Set-up for the last transform: */ - MEMSET_BZERO(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace); - } else { - if (usedspace < SHA512_BLOCK_LENGTH) { - MEMSET_BZERO(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace); - } - /* Do second-to-last transform: */ - SHA512_Transform(context, (sha2_word64*)context->buffer); - - /* And set-up for the last transform: */ - MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH - 2); - } - } else { - /* Prepare for final transform: */ - MEMSET_BZERO(context->buffer, SHA512_SHORT_BLOCK_LENGTH); - - /* Begin padding with a 1 bit: */ - *context->buffer = 0x80; - } - /* Store the length of input data (in bits): */ - sha2_word64 *ptr = (sha2_word64*)(&context->buffer[SHA512_SHORT_BLOCK_LENGTH]); - *ptr = context->bitcount[1]; - ptr = (sha2_word64*)(&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8]); - *ptr = context->bitcount[0]; - - /* Final transform: */ - SHA512_Transform(context, (sha2_word64*)context->buffer); -} - -void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) { - sha2_word64 *d = (sha2_word64*)digest; - - /* Sanity check: */ - assert(context != (SHA512_CTX*)0); - - /* If no digest buffer is passed, we don't bother doing this: */ - if (digest != (sha2_byte*)0) { - SHA512_Last(context); - - /* Save the hash data for output: */ -#if BYTE_ORDER == LITTLE_ENDIAN - { - /* Convert TO host byte order */ - int j; - for (j = 0; j < 8; j++) { - REVERSE64(context->state[j],context->state[j]); - *d++ = context->state[j]; - } - } -#else - MEMCPY_BCOPY(d, context->state, SHA512_DIGEST_LENGTH); -#endif - } - - /* Zero out state data */ - MEMSET_BZERO(context, sizeof(SHA512_CTX)); -} - -char *SHA512_End(SHA512_CTX* context, char buffer[]) { - sha2_byte digest[SHA512_DIGEST_LENGTH], *d = digest; - int i; - - /* Sanity check: */ - assert(context != (SHA512_CTX*)0); - - if (buffer != (char*)0) { - SHA512_Final(digest, context); - - for (i = 0; i < SHA512_DIGEST_LENGTH; i++) { - *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; - *buffer++ = sha2_hex_digits[*d & 0x0f]; - d++; - } - *buffer = (char)0; - } else { - MEMSET_BZERO(context, sizeof(SHA512_CTX)); - } - MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH); - return buffer; -} - -char* SHA512_Data(const sha2_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) { - SHA512_CTX context; - - SHA512_Init(&context); - SHA512_Update(&context, data, len); - return SHA512_End(&context, digest); -} - - -/*** SHA-384: *********************************************************/ -void SHA384_Init(SHA384_CTX* context) { - if (context == (SHA384_CTX*)0) { - return; - } - MEMCPY_BCOPY(context->state, sha384_initial_hash_value, SHA512_DIGEST_LENGTH); - MEMSET_BZERO(context->buffer, SHA384_BLOCK_LENGTH); - context->bitcount[0] = context->bitcount[1] = 0; -} - -void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) { - SHA512_Update((SHA512_CTX*)context, data, len); -} - -void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) { - sha2_word64 *d = (sha2_word64*)digest; - - /* Sanity check: */ - assert(context != (SHA384_CTX*)0); - - /* If no digest buffer is passed, we don't bother doing this: */ - if (digest != (sha2_byte*)0) { - SHA512_Last((SHA512_CTX*)context); - - /* Save the hash data for output: */ -#if BYTE_ORDER == LITTLE_ENDIAN - { - /* Convert TO host byte order */ - int j; - for (j = 0; j < 6; j++) { - REVERSE64(context->state[j],context->state[j]); - *d++ = context->state[j]; - } - } -#else - MEMCPY_BCOPY(d, context->state, SHA384_DIGEST_LENGTH); -#endif - } - - /* Zero out state data */ - MEMSET_BZERO(context, sizeof(SHA384_CTX)); -} - -char *SHA384_End(SHA384_CTX* context, char buffer[]) { - sha2_byte digest[SHA384_DIGEST_LENGTH], *d = digest; - int i; - - /* Sanity check: */ - assert(context != (SHA384_CTX*)0); - - if (buffer != (char*)0) { - SHA384_Final(digest, context); - - for (i = 0; i < SHA384_DIGEST_LENGTH; i++) { - *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; - *buffer++ = sha2_hex_digits[*d & 0x0f]; - d++; - } - *buffer = (char)0; - } else { - MEMSET_BZERO(context, sizeof(SHA384_CTX)); - } - MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH); - return buffer; -} - -char* SHA384_Data(const sha2_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) { - SHA384_CTX context; - - SHA384_Init(&context); - SHA384_Update(&context, data, len); - return SHA384_End(&context, digest); -} - diff --git a/resources/3rdparty/sylvan/src/sha2.h b/resources/3rdparty/sylvan/src/sha2.h deleted file mode 100644 index bf759ad45..000000000 --- a/resources/3rdparty/sylvan/src/sha2.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - * FILE: sha2.h - * AUTHOR: Aaron D. Gifford - http://www.aarongifford.com/ - * - * Copyright (c) 2000-2001, Aaron D. Gifford - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the copyright holder nor the names of contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $Id: sha2.h,v 1.1 2001/11/08 00:02:01 adg Exp adg $ - */ - -#ifndef __SHA2_H__ -#define __SHA2_H__ - -#ifdef __cplusplus -extern "C" { -#endif - - -/* - * Import u_intXX_t size_t type definitions from system headers. You - * may need to change this, or define these things yourself in this - * file. - */ -#include - -#ifdef SHA2_USE_INTTYPES_H - -#include - -#endif /* SHA2_USE_INTTYPES_H */ - - -/*** SHA-256/384/512 Various Length Definitions ***********************/ -#define SHA256_BLOCK_LENGTH 64 -#define SHA256_DIGEST_LENGTH 32 -#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1) -#define SHA384_BLOCK_LENGTH 128 -#define SHA384_DIGEST_LENGTH 48 -#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1) -#define SHA512_BLOCK_LENGTH 128 -#define SHA512_DIGEST_LENGTH 64 -#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1) - - -/*** SHA-256/384/512 Context Structures *******************************/ -/* NOTE: If your architecture does not define either u_intXX_t types or - * uintXX_t (from inttypes.h), you may need to define things by hand - * for your system: - */ -#if 0 -typedef unsigned char u_int8_t; /* 1-byte (8-bits) */ -typedef unsigned int u_int32_t; /* 4-bytes (32-bits) */ -typedef unsigned long long u_int64_t; /* 8-bytes (64-bits) */ -#endif -/* - * Most BSD systems already define u_intXX_t types, as does Linux. - * Some systems, however, like Compaq's Tru64 Unix instead can use - * uintXX_t types defined by very recent ANSI C standards and included - * in the file: - * - * #include - * - * If you choose to use then please define: - * - * #define SHA2_USE_INTTYPES_H - * - * Or on the command line during compile: - * - * cc -DSHA2_USE_INTTYPES_H ... - */ -#ifdef SHA2_USE_INTTYPES_H - -typedef struct _SHA256_CTX { - uint32_t state[8]; - uint64_t bitcount; - uint8_t buffer[SHA256_BLOCK_LENGTH]; -} SHA256_CTX; -typedef struct _SHA512_CTX { - uint64_t state[8]; - uint64_t bitcount[2]; - uint8_t buffer[SHA512_BLOCK_LENGTH]; -} SHA512_CTX; - -#else /* SHA2_USE_INTTYPES_H */ - -typedef struct _SHA256_CTX { - u_int32_t state[8]; - u_int64_t bitcount; - u_int8_t buffer[SHA256_BLOCK_LENGTH]; -} SHA256_CTX; -typedef struct _SHA512_CTX { - u_int64_t state[8]; - u_int64_t bitcount[2]; - u_int8_t buffer[SHA512_BLOCK_LENGTH]; -} SHA512_CTX; - -#endif /* SHA2_USE_INTTYPES_H */ - -typedef SHA512_CTX SHA384_CTX; - - -/*** SHA-256/384/512 Function Prototypes ******************************/ -#ifndef NOPROTO -#ifdef SHA2_USE_INTTYPES_H - -void SHA256_Init(SHA256_CTX *); -void SHA256_Update(SHA256_CTX*, const uint8_t*, size_t); -void SHA256_Final(uint8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*); -char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]); -char* SHA256_Data(const uint8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]); - -void SHA384_Init(SHA384_CTX*); -void SHA384_Update(SHA384_CTX*, const uint8_t*, size_t); -void SHA384_Final(uint8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*); -char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]); -char* SHA384_Data(const uint8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]); - -void SHA512_Init(SHA512_CTX*); -void SHA512_Update(SHA512_CTX*, const uint8_t*, size_t); -void SHA512_Final(uint8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*); -char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]); -char* SHA512_Data(const uint8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]); - -#else /* SHA2_USE_INTTYPES_H */ - -void SHA256_Init(SHA256_CTX *); -void SHA256_Update(SHA256_CTX*, const u_int8_t*, size_t); -void SHA256_Final(u_int8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*); -char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]); -char* SHA256_Data(const u_int8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]); - -void SHA384_Init(SHA384_CTX*); -void SHA384_Update(SHA384_CTX*, const u_int8_t*, size_t); -void SHA384_Final(u_int8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*); -char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]); -char* SHA384_Data(const u_int8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]); - -void SHA512_Init(SHA512_CTX*); -void SHA512_Update(SHA512_CTX*, const u_int8_t*, size_t); -void SHA512_Final(u_int8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*); -char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]); -char* SHA512_Data(const u_int8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]); - -#endif /* SHA2_USE_INTTYPES_H */ - -#else /* NOPROTO */ - -void SHA256_Init(); -void SHA256_Update(); -void SHA256_Final(); -char* SHA256_End(); -char* SHA256_Data(); - -void SHA384_Init(); -void SHA384_Update(); -void SHA384_Final(); -char* SHA384_End(); -char* SHA384_Data(); - -void SHA512_Init(); -void SHA512_Update(); -void SHA512_Final(); -char* SHA512_End(); -char* SHA512_Data(); - -#endif /* NOPROTO */ - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif /* __SHA2_H__ */ - diff --git a/resources/3rdparty/sylvan/src/stats.c b/resources/3rdparty/sylvan/src/stats.c deleted file mode 100644 index 0f5394dbd..000000000 --- a/resources/3rdparty/sylvan/src/stats.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2011-2014 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include // memset -#include -#include -#include -#include // for nodes table - -#ifdef __ELF__ -__thread sylvan_stats_t sylvan_stats; -#else -pthread_key_t sylvan_stats_key; -#endif - -VOID_TASK_0(sylvan_stats_reset_perthread) -{ -#ifdef __ELF__ - for (int i=0; icounters[i] = 0; - } - for (int i=0; itimers[i] = 0; - } -#endif -} - -VOID_TASK_IMPL_0(sylvan_stats_init) -{ -#ifndef __ELF__ - pthread_key_create(&sylvan_stats_key, NULL); -#endif - TOGETHER(sylvan_stats_reset_perthread); -} - -/** - * Reset all counters (for statistics) - */ -VOID_TASK_IMPL_0(sylvan_stats_reset) -{ - TOGETHER(sylvan_stats_reset_perthread); -} - -#define BLACK "\33[22;30m" -#define GRAY "\33[01;30m" -#define RED "\33[22;31m" -#define LRED "\33[01;31m" -#define GREEN "\33[22;32m" -#define LGREEN "\33[01;32m" -#define BLUE "\33[22;34m" -#define LBLUE "\33[01;34m" -#define BROWN "\33[22;33m" -#define YELLOW "\33[01;33m" -#define CYAN "\33[22;36m" -#define LCYAN "\33[22;36m" -#define MAGENTA "\33[22;35m" -#define LMAGENTA "\33[01;35m" -#define NC "\33[0m" -#define BOLD "\33[1m" -#define ULINE "\33[4m" //underline -#define BLINK "\33[5m" -#define INVERT "\33[7m" - -VOID_TASK_1(sylvan_stats_sum, sylvan_stats_t*, target) -{ -#ifdef __ELF__ - for (int i=0; icounters[i], sylvan_stats.counters[i]); - } - for (int i=0; itimers[i], sylvan_stats.timers[i]); - } -#else - sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key); - if (sylvan_stats != NULL) { - for (int i=0; icounters[i], sylvan_stats->counters[i]); - } - for (int i=0; itimers[i], sylvan_stats->timers[i]); - } - } -#endif -} - -void -sylvan_stats_report(FILE *target, int color) -{ -#if !SYLVAN_STATS - (void)target; - (void)color; - return; -#else - (void)color; - - sylvan_stats_t totals; - memset(&totals, 0, sizeof(sylvan_stats_t)); - - LACE_ME; - TOGETHER(sylvan_stats_sum, &totals); - - // fix timers for MACH -#ifdef __MACH__ - mach_timebase_info_data_t timebase; - mach_timebase_info(&timebase); - uint64_t c = timebase.numer/timebase.denom; - for (int i=0;i -#define getabstime() mach_absolute_time() -#else -#include -static uint64_t -getabstime() -{ - struct timespec ts; - clock_gettime(CLOCK_MONOTONIC, &ts); - uint64_t t = ts.tv_sec; - t *= 1000000000UL; - t += ts.tv_nsec; - return t; -} -#endif - -#ifdef __ELF__ -extern __thread sylvan_stats_t sylvan_stats; -#else -#include -extern pthread_key_t sylvan_stats_key; -#endif - -static inline void -sylvan_stats_count(size_t counter) -{ -#ifdef __ELF__ - sylvan_stats.counters[counter]++; -#else - sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); - sylvan_stats->counters[counter]++; -#endif -} - -static inline void -sylvan_stats_add(size_t counter, size_t amount) -{ -#ifdef __ELF__ - sylvan_stats.counters[counter]+=amount; -#else - sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); - sylvan_stats->counters[counter]+=amount; -#endif -} - -static inline void -sylvan_timer_start(size_t timer) -{ - uint64_t t = getabstime(); - -#ifdef __ELF__ - sylvan_stats.timers_startstop[timer] = t; -#else - sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); - sylvan_stats->timers_startstop[timer] = t; -#endif -} - -static inline void -sylvan_timer_stop(size_t timer) -{ - uint64_t t = getabstime(); - -#ifdef __ELF__ - sylvan_stats.timers[timer] += (t - sylvan_stats.timers_startstop[timer]); -#else - sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); - sylvan_stats->timers[timer] += (t - sylvan_stats->timers_startstop[timer]); -#endif -} - -#else - -static inline void -sylvan_stats_count(size_t counter) -{ - (void)counter; -} - -static inline void -sylvan_stats_add(size_t counter, size_t amount) -{ - (void)counter; - (void)amount; -} - -static inline void -sylvan_timer_start(size_t timer) -{ - (void)timer; -} - -static inline void -sylvan_timer_stop(size_t timer) -{ - (void)timer; -} - -#endif - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan.h b/resources/3rdparty/sylvan/src/sylvan.h deleted file mode 100644 index 3b490993a..000000000 --- a/resources/3rdparty/sylvan/src/sylvan.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Sylvan: parallel BDD/ListDD package. - * - * This is a multi-core implementation of BDDs with complement edges. - * - * This package requires parallel the work-stealing framework Lace. - * Lace must be initialized before initializing Sylvan - * - * This package uses explicit referencing. - * Use sylvan_ref and sylvan_deref to manage external references. - * - * Garbage collection requires all workers to cooperate. Garbage collection is either initiated - * by the user (calling sylvan_gc) or when the nodes table is full. All Sylvan operations - * check whether they need to cooperate on garbage collection. Garbage collection cannot occur - * otherwise. This means that it is perfectly fine to do this: - * BDD a = sylvan_ref(sylvan_and(b, c)); - * since it is not possible that garbage collection occurs between the two calls. - * - * To temporarily disable garbage collection, use sylvan_gc_disable() and sylvan_gc_enable(). - */ - -#include "sylvan_config.h" - -#include -#include // for FILE -#include -#include "lace.h" // for definitions - -#include "sylvan_cache.h" -#include "llmsset.h" -#include "stats.h" - -#ifndef SYLVAN_H -#define SYLVAN_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#ifndef SYLVAN_SIZE_FIBONACCI -#define SYLVAN_SIZE_FIBONACCI 0 -#endif - -// For now, only support 64-bit systems -typedef char __sylvan_check_size_t_is_8_bytes[(sizeof(uint64_t) == sizeof(size_t))?1:-1]; - -/** - * Initialize the Sylvan parallel decision diagrams package. - * - * After initialization, call sylvan_init_bdd and/or sylvan_init_ldd if you want to use - * the BDD and/or LDD functionality. - * - * BDDs and LDDs share a common node table and operations cache. - * - * The node table is resizable. - * The table is resized automatically when >50% of the table is filled during garbage collection. - * This behavior can be customized by overriding the gc hook. - * - * Memory usage: - * Every node requires 24 bytes memory. (16 bytes data + 8 bytes overhead) - * Every operation cache entry requires 36 bytes memory. (32 bytes data + 4 bytes overhead) - * - * Reasonable defaults: datasize of 1L<<26 (2048 MB), cachesize of 1L<<25 (1152 MB) - */ -void sylvan_init_package(size_t initial_tablesize, size_t max_tablesize, size_t initial_cachesize, size_t max_cachesize); - -/** - * Frees all Sylvan data (also calls the quit() functions of BDD/MDD parts) - */ -void sylvan_quit(); - -/** - * Return number of occupied buckets in nodes table and total number of buckets. - */ -VOID_TASK_DECL_2(sylvan_table_usage, size_t*, size_t*); -#define sylvan_table_usage(filled, total) (CALL(sylvan_table_usage, filled, total)) - -/** - * Perform garbage collection. - * - * Garbage collection is performed in a new Lace frame, interrupting all ongoing work - * until garbage collection is completed. - * - * Garbage collection procedure: - * 1) The operation cache is cleared and the hash table is reset. - * 2) All live nodes are marked (to be rehashed). This is done by the "mark" callbacks. - * 3) The "hook" callback is called. - * By default, this doubles the hash table size when it is >50% full. - * 4) All live nodes are rehashed into the hash table. - * - * The behavior of garbage collection can be customized by adding "mark" callbacks and - * replacing the "hook" callback. - */ -VOID_TASK_DECL_0(sylvan_gc); -#define sylvan_gc() (CALL(sylvan_gc)) - -/** - * Enable or disable garbage collection. - * - * This affects both automatic and manual garbage collection, i.e., - * calling sylvan_gc() while garbage collection is disabled does not have any effect. - */ -void sylvan_gc_enable(); -void sylvan_gc_disable(); - -/** - * Add a "mark" callback to the list of callbacks. - * - * These are called during garbage collection to recursively mark nodes. - * - * Default "mark" functions that mark external references (via sylvan_ref) and internal - * references (inside operations) are added by sylvan_init_bdd/sylvan_init_bdd. - * - * Functions are called in order. - * level 10: marking functions of Sylvan (external/internal references) - * level 20: call the hook function (for resizing) - * level 30: rehashing - */ -LACE_TYPEDEF_CB(void, gc_mark_cb); -void sylvan_gc_add_mark(int order, gc_mark_cb callback); - -/** - * Set "hook" callback. There can be only one. - * - * The hook is called after the "mark" phase and before the "rehash" phase. - * This allows users to perform certain actions, such as resizing the nodes table - * and the operation cache. Also, dynamic resizing could be performed then. - */ -LACE_TYPEDEF_CB(void, gc_hook_cb); -void sylvan_gc_set_hook(gc_hook_cb new_hook); - -/** - * One of the hooks for resizing behavior. - * Default if SYLVAN_AGGRESSIVE_RESIZE is set. - * Always double size on gc() until maximum reached. - */ -VOID_TASK_DECL_0(sylvan_gc_aggressive_resize); - -/** - * One of the hooks for resizing behavior. - * Default if SYLVAN_AGGRESSIVE_RESIZE is not set. - * Double size on gc() whenever >50% is used. - */ -VOID_TASK_DECL_0(sylvan_gc_default_hook); - -/** - * Set "notify on dead" callback for the nodes table. - * See also documentation in llmsset.h - */ -#define sylvan_set_ondead(cb, ctx) llmsset_set_ondead(nodes, cb, ctx) - -/** - * Global variables (number of workers, nodes table) - */ - -extern llmsset_t nodes; - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#include "sylvan_bdd.h" -#include "sylvan_ldd.h" -#include "sylvan_mtbdd.h" - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd.c b/resources/3rdparty/sylvan/src/sylvan_bdd.c deleted file mode 100644 index 37b154534..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_bdd.c +++ /dev/null @@ -1,2820 +0,0 @@ -/* - * Copyright 2011-2014 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/** - * Complement handling macros - */ -#define BDD_HASMARK(s) (s&sylvan_complement?1:0) -#define BDD_TOGGLEMARK(s) (s^sylvan_complement) -#define BDD_STRIPMARK(s) (s&~sylvan_complement) -#define BDD_TRANSFERMARK(from, to) (to ^ (from & sylvan_complement)) -// Equal under mark -#define BDD_EQUALM(a, b) ((((a)^(b))&(~sylvan_complement))==0) - -/** - * BDD node structure - */ -typedef struct __attribute__((packed)) bddnode { - uint64_t a, b; -} * bddnode_t; // 16 bytes - -#define GETNODE(bdd) ((bddnode_t)llmsset_index_to_ptr(nodes, bdd&0x000000ffffffffff)) - -static inline int __attribute__((unused)) -bddnode_getcomp(bddnode_t n) -{ - return n->a & 0x8000000000000000 ? 1 : 0; -} - -static inline uint64_t -bddnode_getlow(bddnode_t n) -{ - return n->b & 0x000000ffffffffff; // 40 bits -} - -static inline uint64_t -bddnode_gethigh(bddnode_t n) -{ - return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first -} - -static inline uint32_t -bddnode_getvariable(bddnode_t n) -{ - return (uint32_t)(n->b >> 40); -} - -static inline int -bddnode_getmark(bddnode_t n) -{ - return n->a & 0x2000000000000000 ? 1 : 0; -} - -static inline void -bddnode_setmark(bddnode_t n, int mark) -{ - if (mark) n->a |= 0x2000000000000000; - else n->a &= 0xdfffffffffffffff; -} - -static inline void -bddnode_makenode(bddnode_t n, uint32_t var, uint64_t low, uint64_t high) -{ - n->a = high; - n->b = ((uint64_t)var)<<40 | low; -} - -/** - * Implementation of garbage collection. - */ - -/* Recursively mark BDD nodes as 'in use' */ -VOID_TASK_IMPL_1(sylvan_gc_mark_rec, BDD, bdd) -{ - if (bdd == sylvan_false || bdd == sylvan_true) return; - - if (llmsset_mark(nodes, bdd&0x000000ffffffffff)) { - bddnode_t n = GETNODE(bdd); - SPAWN(sylvan_gc_mark_rec, bddnode_getlow(n)); - CALL(sylvan_gc_mark_rec, bddnode_gethigh(n)); - SYNC(sylvan_gc_mark_rec); - } -} - -/** - * External references - */ - -refs_table_t bdd_refs; -refs_table_t bdd_protected; -static int bdd_protected_created = 0; - -BDD -sylvan_ref(BDD a) -{ - if (a == sylvan_false || a == sylvan_true) return a; - refs_up(&bdd_refs, BDD_STRIPMARK(a)); - return a; -} - -void -sylvan_deref(BDD a) -{ - if (a == sylvan_false || a == sylvan_true) return; - refs_down(&bdd_refs, BDD_STRIPMARK(a)); -} - -void -sylvan_protect(BDD *a) -{ - if (!bdd_protected_created) { - // In C++, sometimes sylvan_protect is called before Sylvan is initialized. Just create a table. - protect_create(&bdd_protected, 4096); - bdd_protected_created = 1; - } - protect_up(&bdd_protected, (size_t)a); -} - -void -sylvan_unprotect(BDD *a) -{ - protect_down(&bdd_protected, (size_t)a); -} - -size_t -sylvan_count_refs() -{ - return refs_count(&bdd_refs); -} - -size_t -sylvan_count_protected() -{ - return protect_count(&bdd_protected); -} - -/* Called during garbage collection */ -VOID_TASK_0(sylvan_gc_mark_external_refs) -{ - // iterate through refs hash table, mark all found - size_t count=0; - uint64_t *it = refs_iter(&bdd_refs, 0, bdd_refs.refs_size); - while (it != NULL) { - BDD to_mark = refs_next(&bdd_refs, &it, bdd_refs.refs_size); - SPAWN(sylvan_gc_mark_rec, to_mark); - count++; - } - while (count--) { - SYNC(sylvan_gc_mark_rec); - } -} - -VOID_TASK_0(sylvan_gc_mark_protected) -{ - // iterate through refs hash table, mark all found - size_t count=0; - uint64_t *it = protect_iter(&bdd_protected, 0, bdd_protected.refs_size); - while (it != NULL) { - BDD *to_mark = (BDD*)protect_next(&bdd_protected, &it, bdd_protected.refs_size); - SPAWN(sylvan_gc_mark_rec, *to_mark); - count++; - } - while (count--) { - SYNC(sylvan_gc_mark_rec); - } -} - -/* Infrastructure for internal markings */ -DECLARE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - -VOID_TASK_0(bdd_refs_mark_task) -{ - LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - size_t i, j=0; - for (i=0; ir_count; i++) { - if (j >= 40) { - while (j--) SYNC(sylvan_gc_mark_rec); - j=0; - } - SPAWN(sylvan_gc_mark_rec, bdd_refs_key->results[i]); - j++; - } - for (i=0; is_count; i++) { - Task *t = bdd_refs_key->spawns[i]; - if (!TASK_IS_STOLEN(t)) break; - if (TASK_IS_COMPLETED(t)) { - if (j >= 40) { - while (j--) SYNC(sylvan_gc_mark_rec); - j=0; - } - SPAWN(sylvan_gc_mark_rec, *(BDD*)TASK_RESULT(t)); - j++; - } - } - while (j--) SYNC(sylvan_gc_mark_rec); -} - -VOID_TASK_0(bdd_refs_mark) -{ - TOGETHER(bdd_refs_mark_task); -} - -VOID_TASK_0(bdd_refs_init_task) -{ - bdd_refs_internal_t s = (bdd_refs_internal_t)malloc(sizeof(struct bdd_refs_internal)); - s->r_size = 128; - s->r_count = 0; - s->s_size = 128; - s->s_count = 0; - s->results = (BDD*)malloc(sizeof(BDD) * 128); - s->spawns = (Task**)malloc(sizeof(Task*) * 128); - SET_THREAD_LOCAL(bdd_refs_key, s); -} - -VOID_TASK_0(bdd_refs_init) -{ - INIT_THREAD_LOCAL(bdd_refs_key); - TOGETHER(bdd_refs_init_task); - sylvan_gc_add_mark(10, TASK(bdd_refs_mark)); -} - -/** - * Initialize and quit functions - */ - -static int granularity = 1; // default - -static void -sylvan_quit_bdd() -{ - refs_free(&bdd_refs); - if (bdd_protected_created) { - protect_free(&bdd_protected); - bdd_protected_created = 0; - } -} - -void -sylvan_init_bdd(int _granularity) -{ - sylvan_register_quit(sylvan_quit_bdd); - sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_external_refs)); - sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_protected)); - - granularity = _granularity; - - // Sanity check - if (sizeof(struct bddnode) != 16) { - fprintf(stderr, "Invalid size of bdd nodes: %ld\n", sizeof(struct bddnode)); - exit(1); - } - - refs_create(&bdd_refs, 1024); - if (!bdd_protected_created) { - protect_create(&bdd_protected, 4096); - bdd_protected_created = 1; - } - - LACE_ME; - CALL(bdd_refs_init); -} - -/** - * Core BDD operations - */ - -BDD -sylvan_makenode(BDDVAR level, BDD low, BDD high) -{ - if (low == high) return low; - - // Normalization to keep canonicity - // low will have no mark - - struct bddnode n; - int mark; - - if (BDD_HASMARK(low)) { - mark = 1; - low = BDD_TOGGLEMARK(low); - high = BDD_TOGGLEMARK(high); - } else { - mark = 0; - } - - bddnode_makenode(&n, level, low, high); - - BDD result; - int created; - uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - LACE_ME; - - bdd_refs_push(low); - bdd_refs_push(high); - sylvan_gc(); - bdd_refs_pop(2); - - index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); - exit(1); - } - } - - if (created) sylvan_stats_count(BDD_NODES_CREATED); - else sylvan_stats_count(BDD_NODES_REUSED); - - result = index; - return mark ? result | sylvan_complement : result; -} - -BDD -sylvan_ithvar(BDDVAR level) -{ - return sylvan_makenode(level, sylvan_false, sylvan_true); -} - -BDDVAR -sylvan_var(BDD bdd) -{ - return bddnode_getvariable(GETNODE(bdd)); -} - -static BDD -node_low(BDD bdd, bddnode_t node) -{ - return BDD_TRANSFERMARK(bdd, bddnode_getlow(node)); -} - -static BDD -node_high(BDD bdd, bddnode_t node) -{ - return BDD_TRANSFERMARK(bdd, bddnode_gethigh(node)); -} - -BDD -sylvan_low(BDD bdd) -{ - if (sylvan_isconst(bdd)) return bdd; - return node_low(bdd, GETNODE(bdd)); -} - -BDD -sylvan_high(BDD bdd) -{ - if (sylvan_isconst(bdd)) return bdd; - return node_high(bdd, GETNODE(bdd)); -} - -/** - * Implementation of unary, binary and if-then-else operators. - */ -TASK_IMPL_3(BDD, sylvan_and, BDD, a, BDD, b, BDDVAR, prev_level) -{ - /* Terminal cases */ - if (a == sylvan_true) return b; - if (b == sylvan_true) return a; - if (a == sylvan_false) return sylvan_false; - if (b == sylvan_false) return sylvan_false; - if (a == b) return a; - if (a == BDD_TOGGLEMARK(b)) return sylvan_false; - - sylvan_gc_test(); - - sylvan_stats_count(BDD_AND); - - /* Improve for caching */ - if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { - BDD t = b; - b = a; - a = t; - } - - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR level = va < vb ? va : vb; - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_AND, a, b, sylvan_false, &result)) { - sylvan_stats_count(BDD_AND_CACHED); - return result; - } - } - - // Get cofactors - BDD aLow = a, aHigh = a; - BDD bLow = b, bHigh = b; - if (level == va) { - aLow = node_low(a, na); - aHigh = node_high(a, na); - } - if (level == vb) { - bLow = node_low(b, nb); - bHigh = node_high(b, nb); - } - - // Recursive computation - BDD low=sylvan_invalid, high=sylvan_invalid, result; - - int n=0; - - if (aHigh == sylvan_true) { - high = bHigh; - } else if (aHigh == sylvan_false || bHigh == sylvan_false) { - high = sylvan_false; - } else if (bHigh == sylvan_true) { - high = aHigh; - } else { - bdd_refs_spawn(SPAWN(sylvan_and, aHigh, bHigh, level)); - n=1; - } - - if (aLow == sylvan_true) { - low = bLow; - } else if (aLow == sylvan_false || bLow == sylvan_false) { - low = sylvan_false; - } else if (bLow == sylvan_true) { - low = aLow; - } else { - low = CALL(sylvan_and, aLow, bLow, level); - } - - if (n) { - bdd_refs_push(low); - high = bdd_refs_sync(SYNC(sylvan_and)); - bdd_refs_pop(1); - } - - result = sylvan_makenode(level, low, high); - - if (cachenow) { - if (cache_put3(CACHE_BDD_AND, a, b, sylvan_false, result)) sylvan_stats_count(BDD_AND_CACHEDPUT); - } - - return result; -} - -TASK_IMPL_3(BDD, sylvan_xor, BDD, a, BDD, b, BDDVAR, prev_level) -{ - /* Terminal cases */ - if (a == sylvan_false) return b; - if (b == sylvan_false) return a; - if (a == sylvan_true) return sylvan_not(b); - if (b == sylvan_true) return sylvan_not(a); - if (a == b) return sylvan_false; - if (a == sylvan_not(b)) return sylvan_true; - - sylvan_gc_test(); - - sylvan_stats_count(BDD_XOR); - - /* Improve for caching */ - if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { - BDD t = b; - b = a; - a = t; - } - - // XOR(~A,B) => XOR(A,~B) - if (BDD_HASMARK(a)) { - a = BDD_STRIPMARK(a); - b = sylvan_not(b); - } - - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR level = va < vb ? va : vb; - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_XOR, a, b, sylvan_false, &result)) { - sylvan_stats_count(BDD_XOR_CACHED); - return result; - } - } - - // Get cofactors - BDD aLow = a, aHigh = a; - BDD bLow = b, bHigh = b; - if (level == va) { - aLow = node_low(a, na); - aHigh = node_high(a, na); - } - if (level == vb) { - bLow = node_low(b, nb); - bHigh = node_high(b, nb); - } - - // Recursive computation - BDD low, high, result; - - bdd_refs_spawn(SPAWN(sylvan_xor, aHigh, bHigh, level)); - low = CALL(sylvan_xor, aLow, bLow, level); - bdd_refs_push(low); - high = bdd_refs_sync(SYNC(sylvan_xor)); - bdd_refs_pop(1); - - result = sylvan_makenode(level, low, high); - - if (cachenow) { - if (cache_put3(CACHE_BDD_XOR, a, b, sylvan_false, result)) sylvan_stats_count(BDD_XOR_CACHEDPUT); - } - - return result; -} - - -TASK_IMPL_4(BDD, sylvan_ite, BDD, a, BDD, b, BDD, c, BDDVAR, prev_level) -{ - /* Terminal cases */ - if (a == sylvan_true) return b; - if (a == sylvan_false) return c; - if (a == b) b = sylvan_true; - if (a == sylvan_not(b)) b = sylvan_false; - if (a == c) c = sylvan_false; - if (a == sylvan_not(c)) c = sylvan_true; - if (b == c) return b; - if (b == sylvan_true && c == sylvan_false) return a; - if (b == sylvan_false && c == sylvan_true) return sylvan_not(a); - - /* Cases that reduce to AND and XOR */ - - // ITE(A,B,0) => AND(A,B) - if (c == sylvan_false) return CALL(sylvan_and, a, b, prev_level); - - // ITE(A,1,C) => ~AND(~A,~C) - if (b == sylvan_true) return sylvan_not(CALL(sylvan_and, sylvan_not(a), sylvan_not(c), prev_level)); - - // ITE(A,0,C) => AND(~A,C) - if (b == sylvan_false) return CALL(sylvan_and, sylvan_not(a), c, prev_level); - - // ITE(A,B,1) => ~AND(A,~B) - if (c == sylvan_true) return sylvan_not(CALL(sylvan_and, a, sylvan_not(b), prev_level)); - - // ITE(A,B,~B) => XOR(A,~B) - if (b == sylvan_not(c)) return CALL(sylvan_xor, a, c, 0); - - /* At this point, there are no more terminals */ - - /* Canonical for optimal cache use */ - - // ITE(~A,B,C) => ITE(A,C,B) - if (BDD_HASMARK(a)) { - a = BDD_STRIPMARK(a); - BDD t = c; - c = b; - b = t; - } - - // ITE(A,~B,C) => ~ITE(A,B,~C) - int mark = 0; - if (BDD_HASMARK(b)) { - b = sylvan_not(b); - c = sylvan_not(c); - mark = 1; - } - - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - bddnode_t nc = GETNODE(c); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR vc = bddnode_getvariable(nc); - - // Get lowest level - BDDVAR level = vb < vc ? vb : vc; - - // Fast case - if (va < level && node_low(a, na) == sylvan_false && node_high(a, na) == sylvan_true) { - BDD result = sylvan_makenode(va, c, b); - return mark ? sylvan_not(result) : result; - } - - if (va < level) level = va; - - sylvan_gc_test(); - - sylvan_stats_count(BDD_ITE); - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_ITE, a, b, c, &result)) { - sylvan_stats_count(BDD_ITE_CACHED); - return mark ? sylvan_not(result) : result; - } - } - - // Get cofactors - BDD aLow = a, aHigh = a; - BDD bLow = b, bHigh = b; - BDD cLow = c, cHigh = c; - if (level == va) { - aLow = node_low(a, na); - aHigh = node_high(a, na); - } - if (level == vb) { - bLow = node_low(b, nb); - bHigh = node_high(b, nb); - } - if (level == vc) { - cLow = node_low(c, nc); - cHigh = node_high(c, nc); - } - - // Recursive computation - BDD low=sylvan_invalid, high=sylvan_invalid, result; - - int n=0; - - if (aHigh == sylvan_true) { - high = bHigh; - } else if (aHigh == sylvan_false) { - high = cHigh; - } else { - bdd_refs_spawn(SPAWN(sylvan_ite, aHigh, bHigh, cHigh, level)); - n=1; - } - - if (aLow == sylvan_true) { - low = bLow; - } else if (aLow == sylvan_false) { - low = cLow; - } else { - low = CALL(sylvan_ite, aLow, bLow, cLow, level); - } - - if (n) { - bdd_refs_push(low); - high = bdd_refs_sync(SYNC(sylvan_ite)); - bdd_refs_pop(1); - } - - result = sylvan_makenode(level, low, high); - - if (cachenow) { - if (cache_put3(CACHE_BDD_ITE, a, b, c, result)) sylvan_stats_count(BDD_ITE_CACHEDPUT); - } - - return mark ? sylvan_not(result) : result; -} - -/** - * Calculate constrain a @ c - */ -TASK_IMPL_3(BDD, sylvan_constrain, BDD, a, BDD, b, BDDVAR, prev_level) -{ - /* Trivial cases */ - if (b == sylvan_true) return a; - if (b == sylvan_false) return sylvan_false; - if (sylvan_isconst(a)) return a; - if (a == b) return sylvan_true; - if (a == sylvan_not(b)) return sylvan_false; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_CONSTRAIN); - - // a != constant and b != constant - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR level = va < vb ? va : vb; - - // CONSULT CACHE - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_CONSTRAIN, a, b, 0, &result)) { - sylvan_stats_count(BDD_CONSTRAIN_CACHED); - return result; - } - } - - // DETERMINE TOP BDDVAR AND COFACTORS - - BDD aLow, aHigh, bLow, bHigh; - - if (level == va) { - aLow = node_low(a, na); - aHigh = node_high(a, na); - } else { - aLow = aHigh = a; - } - - if (level == vb) { - bLow = node_low(b, nb); - bHigh = node_high(b, nb); - } else { - bLow = bHigh = b; - } - - BDD result; - - BDD low=sylvan_invalid, high=sylvan_invalid; - if (bLow == sylvan_false) return CALL(sylvan_constrain, aHigh, bHigh, level); - if (bLow == sylvan_true) { - if (bHigh == sylvan_false) return aLow; - if (bHigh == sylvan_true) { - result = sylvan_makenode(level, aLow, bHigh); - } else { - high = CALL(sylvan_constrain, aHigh, bHigh, level); - result = sylvan_makenode(level, aLow, high); - } - } else { - if (bHigh == sylvan_false) return CALL(sylvan_constrain, aLow, bLow, level); - if (bHigh == sylvan_true) { - low = CALL(sylvan_constrain, aLow, bLow, level); - result = sylvan_makenode(level, low, bHigh); - } else { - bdd_refs_spawn(SPAWN(sylvan_constrain, aLow, bLow, level)); - high = CALL(sylvan_constrain, aHigh, bHigh, level); - bdd_refs_push(high); - low = bdd_refs_sync(SYNC(sylvan_constrain)); - bdd_refs_pop(1); - result = sylvan_makenode(level, low, high); - } - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_CONSTRAIN, a, b, 0, result)) sylvan_stats_count(BDD_CONSTRAIN_CACHEDPUT); - } - - return result; -} - -/** - * Calculate restrict a @ b - */ -TASK_IMPL_3(BDD, sylvan_restrict, BDD, a, BDD, b, BDDVAR, prev_level) -{ - /* Trivial cases */ - if (b == sylvan_true) return a; - if (b == sylvan_false) return sylvan_false; - if (sylvan_isconst(a)) return a; - if (a == b) return sylvan_true; - if (a == sylvan_not(b)) return sylvan_false; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_RESTRICT); - - // a != constant and b != constant - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR level = va < vb ? va : vb; - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_RESTRICT, a, b, 0, &result)) { - sylvan_stats_count(BDD_RESTRICT_CACHED); - return result; - } - } - - BDD result; - - if (vb < va) { - BDD c = CALL(sylvan_ite, node_low(b,nb), sylvan_true, node_high(b,nb), 0); - bdd_refs_push(c); - result = CALL(sylvan_restrict, a, c, level); - bdd_refs_pop(1); - } else { - BDD aLow=node_low(a,na),aHigh=node_high(a,na),bLow=b,bHigh=b; - if (va == vb) { - bLow = node_low(b,nb); - bHigh = node_high(b,nb); - } - if (bLow == sylvan_false) { - result = CALL(sylvan_restrict, aHigh, bHigh, level); - } else if (bHigh == sylvan_false) { - result = CALL(sylvan_restrict, aLow, bLow, level); - } else { - bdd_refs_spawn(SPAWN(sylvan_restrict, aLow, bLow, level)); - BDD high = CALL(sylvan_restrict, aHigh, bHigh, level); - bdd_refs_push(high); - BDD low = bdd_refs_sync(SYNC(sylvan_restrict)); - bdd_refs_pop(1); - result = sylvan_makenode(level, low, high); - } - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_RESTRICT, a, b, 0, result)) sylvan_stats_count(BDD_RESTRICT_CACHEDPUT); - } - - return result; -} - -/** - * Calculates \exists variables . a - */ -TASK_IMPL_3(BDD, sylvan_exists, BDD, a, BDD, variables, BDDVAR, prev_level) -{ - /* Terminal cases */ - if (a == sylvan_true) return sylvan_true; - if (a == sylvan_false) return sylvan_false; - if (sylvan_set_isempty(variables)) return a; - - // a != constant - bddnode_t na = GETNODE(a); - BDDVAR level = bddnode_getvariable(na); - - bddnode_t nv = GETNODE(variables); - BDDVAR vv = bddnode_getvariable(nv); - while (vv < level) { - variables = node_high(variables, nv); - if (sylvan_set_isempty(variables)) return a; - nv = GETNODE(variables); - vv = bddnode_getvariable(nv); - } - - sylvan_gc_test(); - - sylvan_stats_count(BDD_EXISTS); - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_EXISTS, a, variables, 0, &result)) { - sylvan_stats_count(BDD_EXISTS_CACHED); - return result; - } - } - - // Get cofactors - BDD aLow = node_low(a, na); - BDD aHigh = node_high(a, na); - - BDD result; - - if (vv == level) { - // level is in variable set, perform abstraction - if (aLow == sylvan_true || aHigh == sylvan_true || aLow == sylvan_not(aHigh)) { - result = sylvan_true; - } else { - BDD _v = sylvan_set_next(variables); - BDD low = CALL(sylvan_exists, aLow, _v, level); - if (low == sylvan_true) { - result = sylvan_true; - } else { - bdd_refs_push(low); - BDD high = CALL(sylvan_exists, aHigh, _v, level); - if (high == sylvan_true) { - result = sylvan_true; - bdd_refs_pop(1); - } else if (low == sylvan_false && high == sylvan_false) { - result = sylvan_false; - bdd_refs_pop(1); - } else { - bdd_refs_push(high); - result = sylvan_or(low, high); - bdd_refs_pop(2); - } - } - } - } else { - // level is not in variable set - BDD low, high; - bdd_refs_spawn(SPAWN(sylvan_exists, aHigh, variables, level)); - low = CALL(sylvan_exists, aLow, variables, level); - bdd_refs_push(low); - high = bdd_refs_sync(SYNC(sylvan_exists)); - bdd_refs_pop(1); - result = sylvan_makenode(level, low, high); - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_EXISTS, a, variables, 0, result)) sylvan_stats_count(BDD_EXISTS_CACHEDPUT); - } - - return result; -} - -/** - * Calculate exists(a AND b, v) - */ -TASK_IMPL_4(BDD, sylvan_and_exists, BDD, a, BDD, b, BDDSET, v, BDDVAR, prev_level) -{ - /* Terminal cases */ - if (a == sylvan_false) return sylvan_false; - if (b == sylvan_false) return sylvan_false; - if (a == sylvan_not(b)) return sylvan_false; - if (a == sylvan_true && b == sylvan_true) return sylvan_true; - - /* Cases that reduce to "exists" and "and" */ - if (a == sylvan_true) return CALL(sylvan_exists, b, v, 0); - if (b == sylvan_true) return CALL(sylvan_exists, a, v, 0); - if (a == b) return CALL(sylvan_exists, a, v, 0); - if (sylvan_set_isempty(v)) return sylvan_and(a, b); - - /* At this point, a and b are proper nodes, and v is non-empty */ - - /* Improve for caching */ - if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { - BDD t = b; - b = a; - a = t; - } - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - sylvan_stats_count(BDD_AND_EXISTS); - - // a != constant - bddnode_t na = GETNODE(a); - bddnode_t nb = GETNODE(b); - bddnode_t nv = GETNODE(v); - - BDDVAR va = bddnode_getvariable(na); - BDDVAR vb = bddnode_getvariable(nb); - BDDVAR vv = bddnode_getvariable(nv); - BDDVAR level = va < vb ? va : vb; - - /* Skip levels in v that are not in a and b */ - while (vv < level) { - v = node_high(v, nv); // get next variable in conjunction - if (sylvan_set_isempty(v)) return sylvan_and(a, b); - nv = GETNODE(v); - vv = bddnode_getvariable(nv); - } - - BDD result; - - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - if (cache_get3(CACHE_BDD_AND_EXISTS, a, b, v, &result)) { - sylvan_stats_count(BDD_AND_EXISTS_CACHED); - return result; - } - } - - // Get cofactors - BDD aLow, aHigh, bLow, bHigh; - if (level == va) { - aLow = node_low(a, na); - aHigh = node_high(a, na); - } else { - aLow = a; - aHigh = a; - } - if (level == vb) { - bLow = node_low(b, nb); - bHigh = node_high(b, nb); - } else { - bLow = b; - bHigh = b; - } - - if (level == vv) { - // level is in variable set, perform abstraction - BDD _v = node_high(v, nv); - BDD low = CALL(sylvan_and_exists, aLow, bLow, _v, level); - if (low == sylvan_true || low == aHigh || low == bHigh) { - result = low; - } else { - bdd_refs_push(low); - BDD high; - if (low == sylvan_not(aHigh)) { - high = CALL(sylvan_exists, bHigh, _v, 0); - } else if (low == sylvan_not(bHigh)) { - high = CALL(sylvan_exists, aHigh, _v, 0); - } else { - high = CALL(sylvan_and_exists, aHigh, bHigh, _v, level); - } - if (high == sylvan_true) { - result = sylvan_true; - bdd_refs_pop(1); - } else if (high == sylvan_false) { - result = low; - bdd_refs_pop(1); - } else if (low == sylvan_false) { - result = high; - bdd_refs_pop(1); - } else { - bdd_refs_push(high); - result = sylvan_or(low, high); - bdd_refs_pop(2); - } - } - } else { - // level is not in variable set - bdd_refs_spawn(SPAWN(sylvan_and_exists, aHigh, bHigh, v, level)); - BDD low = CALL(sylvan_and_exists, aLow, bLow, v, level); - bdd_refs_push(low); - BDD high = bdd_refs_sync(SYNC(sylvan_and_exists)); - bdd_refs_pop(1); - result = sylvan_makenode(level, low, high); - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_AND_EXISTS, a, b, v, result)) sylvan_stats_count(BDD_AND_EXISTS_CACHEDPUT); - } - - return result; -} - - -TASK_IMPL_4(BDD, sylvan_relnext, BDD, a, BDD, b, BDDSET, vars, BDDVAR, prev_level) -{ - /* Compute R(s) = \exists x: A(x) \and B(x,s) with support(result) = s, support(A) = s, support(B) = s+t - * if vars == sylvan_false, then every level is in s or t - * any other levels (outside s,t) in B are ignored / existentially quantified - */ - - /* Terminals */ - if (a == sylvan_true && b == sylvan_true) return sylvan_true; - if (a == sylvan_false) return sylvan_false; - if (b == sylvan_false) return sylvan_false; - if (sylvan_set_isempty(vars)) return a; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_RELNEXT); - - /* Determine top level */ - bddnode_t na = sylvan_isconst(a) ? 0 : GETNODE(a); - bddnode_t nb = sylvan_isconst(b) ? 0 : GETNODE(b); - - BDDVAR va = na ? bddnode_getvariable(na) : 0xffffffff; - BDDVAR vb = nb ? bddnode_getvariable(nb) : 0xffffffff; - BDDVAR level = va < vb ? va : vb; - - /* Skip vars */ - int is_s_or_t = 0; - bddnode_t nv = 0; - if (vars == sylvan_false) { - is_s_or_t = 1; - } else { - nv = GETNODE(vars); - for (;;) { - /* check if level is s/t */ - BDDVAR vv = bddnode_getvariable(nv); - if (level == vv || (level^1) == vv) { - is_s_or_t = 1; - break; - } - /* check if level < s/t */ - if (level < vv) break; - vars = node_high(vars, nv); // get next in vars - if (sylvan_set_isempty(vars)) return a; - nv = GETNODE(vars); - } - } - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_RELNEXT, a, b, vars, &result)) { - sylvan_stats_count(BDD_RELNEXT_CACHED); - return result; - } - } - - BDD result; - - if (is_s_or_t) { - /* Get s and t */ - BDDVAR s = level & (~1); - BDDVAR t = s+1; - - BDD a0, a1, b0, b1; - if (na && va == s) { - a0 = node_low(a, na); - a1 = node_high(a, na); - } else { - a0 = a1 = a; - } - if (nb && vb == s) { - b0 = node_low(b, nb); - b1 = node_high(b, nb); - } else { - b0 = b1 = b; - } - - BDD b00, b01, b10, b11; - if (!sylvan_isconst(b0)) { - bddnode_t nb0 = GETNODE(b0); - if (bddnode_getvariable(nb0) == t) { - b00 = node_low(b0, nb0); - b01 = node_high(b0, nb0); - } else { - b00 = b01 = b0; - } - } else { - b00 = b01 = b0; - } - if (!sylvan_isconst(b1)) { - bddnode_t nb1 = GETNODE(b1); - if (bddnode_getvariable(nb1) == t) { - b10 = node_low(b1, nb1); - b11 = node_high(b1, nb1); - } else { - b10 = b11 = b1; - } - } else { - b10 = b11 = b1; - } - - BDD _vars = vars == sylvan_false ? sylvan_false : node_high(vars, nv); - - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b00, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b10, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b01, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b11, _vars, level)); - - BDD f = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(f); - BDD e = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(e); - BDD d = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(d); - BDD c = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(c); - - bdd_refs_spawn(SPAWN(sylvan_ite, c, sylvan_true, d, 0)); /* a0 b00 \or a1 b01 */ - bdd_refs_spawn(SPAWN(sylvan_ite, e, sylvan_true, f, 0)); /* a0 b01 \or a1 b11 */ - - /* R1 */ d = bdd_refs_sync(SYNC(sylvan_ite)); bdd_refs_push(d); - /* R0 */ c = bdd_refs_sync(SYNC(sylvan_ite)); // not necessary: bdd_refs_push(c); - - bdd_refs_pop(5); - result = sylvan_makenode(s, c, d); - } else { - /* Variable not in vars! Take a, quantify b */ - BDD a0, a1, b0, b1; - if (na && va == level) { - a0 = node_low(a, na); - a1 = node_high(a, na); - } else { - a0 = a1 = a; - } - if (nb && vb == level) { - b0 = node_low(b, nb); - b1 = node_high(b, nb); - } else { - b0 = b1 = b; - } - - if (b0 != b1) { - if (a0 == a1) { - /* Quantify "b" variables */ - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r0); - result = sylvan_or(r0, r1); - bdd_refs_pop(2); - } else { - /* Quantify "b" variables, but keep "a" variables */ - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b1, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); - - BDD r11 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r11); - BDD r10 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r10); - BDD r01 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r01); - BDD r00 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r00); - - bdd_refs_spawn(SPAWN(sylvan_ite, r00, sylvan_true, r01, 0)); - bdd_refs_spawn(SPAWN(sylvan_ite, r10, sylvan_true, r11, 0)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_ite)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_ite)); - bdd_refs_pop(5); - - result = sylvan_makenode(level, r0, r1); - } - } else { - /* Keep "a" variables */ - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_pop(1); - result = sylvan_makenode(level, r0, r1); - } - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_RELNEXT, a, b, vars, result)) sylvan_stats_count(BDD_RELNEXT_CACHEDPUT); - } - - return result; -} - -TASK_IMPL_4(BDD, sylvan_relprev, BDD, a, BDD, b, BDDSET, vars, BDDVAR, prev_level) -{ - /* Compute \exists x: A(s,x) \and B(x,t) - * if vars == sylvan_false, then every level is in s or t - * any other levels (outside s,t) in A are ignored / existentially quantified - */ - - /* Terminals */ - if (a == sylvan_true && b == sylvan_true) return sylvan_true; - if (a == sylvan_false) return sylvan_false; - if (b == sylvan_false) return sylvan_false; - if (sylvan_set_isempty(vars)) return b; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_RELPREV); - - /* Determine top level */ - bddnode_t na = sylvan_isconst(a) ? 0 : GETNODE(a); - bddnode_t nb = sylvan_isconst(b) ? 0 : GETNODE(b); - - BDDVAR va = na ? bddnode_getvariable(na) : 0xffffffff; - BDDVAR vb = nb ? bddnode_getvariable(nb) : 0xffffffff; - BDDVAR level = va < vb ? va : vb; - - /* Skip vars */ - int is_s_or_t = 0; - bddnode_t nv = 0; - if (vars == sylvan_false) { - is_s_or_t = 1; - } else { - nv = GETNODE(vars); - for (;;) { - /* check if level is s/t */ - BDDVAR vv = bddnode_getvariable(nv); - if (level == vv || (level^1) == vv) { - is_s_or_t = 1; - break; - } - /* check if level < s/t */ - if (level < vv) break; - vars = node_high(vars, nv); // get next in vars - if (sylvan_set_isempty(vars)) return b; - nv = GETNODE(vars); - } - } - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_RELPREV, a, b, vars, &result)) { - sylvan_stats_count(BDD_RELPREV_CACHED); - return result; - } - } - - BDD result; - - if (is_s_or_t) { - /* Get s and t */ - BDDVAR s = level & (~1); - BDDVAR t = s+1; - - BDD a0, a1, b0, b1; - if (na && va == s) { - a0 = node_low(a, na); - a1 = node_high(a, na); - } else { - a0 = a1 = a; - } - if (nb && vb == s) { - b0 = node_low(b, nb); - b1 = node_high(b, nb); - } else { - b0 = b1 = b; - } - - BDD a00, a01, a10, a11; - if (!sylvan_isconst(a0)) { - bddnode_t na0 = GETNODE(a0); - if (bddnode_getvariable(na0) == t) { - a00 = node_low(a0, na0); - a01 = node_high(a0, na0); - } else { - a00 = a01 = a0; - } - } else { - a00 = a01 = a0; - } - if (!sylvan_isconst(a1)) { - bddnode_t na1 = GETNODE(a1); - if (bddnode_getvariable(na1) == t) { - a10 = node_low(a1, na1); - a11 = node_high(a1, na1); - } else { - a10 = a11 = a1; - } - } else { - a10 = a11 = a1; - } - - BDD b00, b01, b10, b11; - if (!sylvan_isconst(b0)) { - bddnode_t nb0 = GETNODE(b0); - if (bddnode_getvariable(nb0) == t) { - b00 = node_low(b0, nb0); - b01 = node_high(b0, nb0); - } else { - b00 = b01 = b0; - } - } else { - b00 = b01 = b0; - } - if (!sylvan_isconst(b1)) { - bddnode_t nb1 = GETNODE(b1); - if (bddnode_getvariable(nb1) == t) { - b10 = node_low(b1, nb1); - b11 = node_high(b1, nb1); - } else { - b10 = b11 = b1; - } - } else { - b10 = b11 = b1; - } - - BDD _vars; - if (vars != sylvan_false) { - _vars = node_high(vars, nv); - if (sylvan_set_var(_vars) == t) _vars = sylvan_set_next(_vars); - } else { - _vars = sylvan_false; - } - - if (b00 == b01) { - bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b0, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b0, _vars, level)); - } else { - bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b00, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b01, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b00, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b01, _vars, level)); - } - - if (b10 == b11) { - bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b1, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b1, _vars, level)); - } else { - bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b10, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b11, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b10, _vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b11, _vars, level)); - } - - BDD r00, r01, r10, r11; - - if (b10 == b11) { - r11 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r01 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - } else { - BDD r111 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - BDD r110 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r11 = sylvan_makenode(t, r110, r111); - bdd_refs_pop(2); - bdd_refs_push(r11); - BDD r011 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - BDD r010 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r01 = sylvan_makenode(t, r010, r011); - bdd_refs_pop(2); - bdd_refs_push(r01); - } - - if (b00 == b01) { - r10 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r00 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - } else { - BDD r101 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - BDD r100 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r10 = sylvan_makenode(t, r100, r101); - bdd_refs_pop(2); - bdd_refs_push(r10); - BDD r001 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - BDD r000 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); - r00 = sylvan_makenode(t, r000, r001); - bdd_refs_pop(2); - bdd_refs_push(r00); - } - - bdd_refs_spawn(SPAWN(sylvan_and, sylvan_not(r00), sylvan_not(r01), 0)); - bdd_refs_spawn(SPAWN(sylvan_and, sylvan_not(r10), sylvan_not(r11), 0)); - - BDD r1 = sylvan_not(bdd_refs_push(bdd_refs_sync(SYNC(sylvan_and)))); - BDD r0 = sylvan_not(bdd_refs_sync(SYNC(sylvan_and))); - bdd_refs_pop(5); - result = sylvan_makenode(s, r0, r1); - } else { - BDD a0, a1, b0, b1; - if (na && va == level) { - a0 = node_low(a, na); - a1 = node_high(a, na); - } else { - a0 = a1 = a; - } - if (nb && vb == level) { - b0 = node_low(b, nb); - b1 = node_high(b, nb); - } else { - b0 = b1 = b; - } - - if (a0 != a1) { - if (b0 == b1) { - /* Quantify "a" variables */ - bdd_refs_spawn(SPAWN(sylvan_relprev, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a1, b1, vars, level)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_relprev)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_relprev)); - bdd_refs_push(r0); - result = CALL(sylvan_ite, r0, sylvan_true, r1, 0); - bdd_refs_pop(2); - - } else { - /* Quantify "a" variables, but keep "b" variables */ - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b1, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); - - BDD r11 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r11); - BDD r01 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r01); - BDD r10 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r10); - BDD r00 = bdd_refs_sync(SYNC(sylvan_relnext)); - bdd_refs_push(r00); - - bdd_refs_spawn(SPAWN(sylvan_ite, r00, sylvan_true, r10, 0)); - bdd_refs_spawn(SPAWN(sylvan_ite, r01, sylvan_true, r11, 0)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_ite)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_ite)); - bdd_refs_pop(5); - - result = sylvan_makenode(level, r0, r1); - } - } else { - bdd_refs_spawn(SPAWN(sylvan_relprev, a0, b0, vars, level)); - bdd_refs_spawn(SPAWN(sylvan_relprev, a1, b1, vars, level)); - - BDD r1 = bdd_refs_sync(SYNC(sylvan_relprev)); - bdd_refs_push(r1); - BDD r0 = bdd_refs_sync(SYNC(sylvan_relprev)); - bdd_refs_pop(1); - result = sylvan_makenode(level, r0, r1); - } - } - - if (cachenow) { - if (cache_put3(CACHE_BDD_RELPREV, a, b, vars, result)) sylvan_stats_count(BDD_RELPREV_CACHEDPUT); - } - - return result; -} - -/** - * Computes the transitive closure by traversing the BDD recursively. - * See Y. Matsunaga, P. C. McGeer, R. K. Brayton - * On Computing the Transitive Closre of a State Transition Relation - * 30th ACM Design Automation Conference, 1993. - */ -TASK_IMPL_2(BDD, sylvan_closure, BDD, a, BDDVAR, prev_level) -{ - /* Terminals */ - if (a == sylvan_true) return a; - if (a == sylvan_false) return a; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_CLOSURE); - - /* Determine top level */ - bddnode_t n = GETNODE(a); - BDDVAR level = bddnode_getvariable(n); - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_CLOSURE, a, 0, 0, &result)) { - sylvan_stats_count(BDD_CLOSURE_CACHED); - return result; - } - } - - BDDVAR s = level & (~1); - BDDVAR t = s+1; - - BDD a0, a1; - if (level == s) { - a0 = node_low(a, n); - a1 = node_high(a, n); - } else { - a0 = a1 = a; - } - - BDD a00, a01, a10, a11; - if (!sylvan_isconst(a0)) { - bddnode_t na0 = GETNODE(a0); - if (bddnode_getvariable(na0) == t) { - a00 = node_low(a0, na0); - a01 = node_high(a0, na0); - } else { - a00 = a01 = a0; - } - } else { - a00 = a01 = a0; - } - if (!sylvan_isconst(a1)) { - bddnode_t na1 = GETNODE(a1); - if (bddnode_getvariable(na1) == t) { - a10 = node_low(a1, na1); - a11 = node_high(a1, na1); - } else { - a10 = a11 = a1; - } - } else { - a10 = a11 = a1; - } - - BDD u1 = CALL(sylvan_closure, a11, level); - bdd_refs_push(u1); - /* u3 = */ bdd_refs_spawn(SPAWN(sylvan_relprev, a01, u1, sylvan_false, level)); - BDD u2 = CALL(sylvan_relprev, u1, a10, sylvan_false, level); - bdd_refs_push(u2); - BDD e = CALL(sylvan_relprev, a01, u2, sylvan_false, level); - bdd_refs_push(e); - e = CALL(sylvan_ite, a00, sylvan_true, e, level); - bdd_refs_pop(1); - bdd_refs_push(e); - e = CALL(sylvan_closure, e, level); - bdd_refs_pop(1); - bdd_refs_push(e); - BDD g = CALL(sylvan_relprev, u2, e, sylvan_false, level); - bdd_refs_push(g); - BDD u3 = bdd_refs_sync(SYNC(sylvan_relprev)); - bdd_refs_push(u3); - BDD f = CALL(sylvan_relprev, e, u3, sylvan_false, level); - bdd_refs_push(f); - BDD h = CALL(sylvan_relprev, u2, f, sylvan_false, level); - bdd_refs_push(h); - h = CALL(sylvan_ite, u1, sylvan_true, h, level); - bdd_refs_pop(1); - bdd_refs_push(h); - - BDD r0, r1; - /* R0 */ r0 = sylvan_makenode(t, e, f); - bdd_refs_pop(7); - bdd_refs_push(r0); - /* R1 */ r1 = sylvan_makenode(t, g, h); - bdd_refs_pop(1); - BDD result = sylvan_makenode(s, r0, r1); - - if (cachenow) { - if (cache_put3(CACHE_BDD_CLOSURE, a, 0, 0, result)) sylvan_stats_count(BDD_CLOSURE_CACHEDPUT); - } - - return result; -} - - -/** - * Function composition - */ -TASK_IMPL_3(BDD, sylvan_compose, BDD, a, BDDMAP, map, BDDVAR, prev_level) -{ - /* Trivial cases */ - if (a == sylvan_false || a == sylvan_true) return a; - if (sylvan_map_isempty(map)) return a; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Count operation */ - sylvan_stats_count(BDD_COMPOSE); - - /* Determine top level */ - bddnode_t n = GETNODE(a); - BDDVAR level = bddnode_getvariable(n); - - /* Skip map */ - bddnode_t map_node = GETNODE(map); - BDDVAR map_var = bddnode_getvariable(map_node); - while (map_var < level) { - map = node_low(map, map_node); - if (sylvan_map_isempty(map)) return a; - map_node = GETNODE(map); - map_var = bddnode_getvariable(map_node); - } - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - BDD result; - if (cache_get3(CACHE_BDD_COMPOSE, a, map, 0, &result)) { - sylvan_stats_count(BDD_COMPOSE_CACHED); - return result; - } - } - - /* Recursively calculate low and high */ - bdd_refs_spawn(SPAWN(sylvan_compose, node_low(a, n), map, level)); - BDD high = CALL(sylvan_compose, node_high(a, n), map, level); - bdd_refs_push(high); - BDD low = bdd_refs_sync(SYNC(sylvan_compose)); - bdd_refs_push(low); - - /* Calculate result */ - BDD root = map_var == level ? node_high(map, map_node) : sylvan_ithvar(level); - bdd_refs_push(root); - BDD result = CALL(sylvan_ite, root, high, low, 0); - bdd_refs_pop(3); - - if (cachenow) { - if (cache_put3(CACHE_BDD_COMPOSE, a, map, 0, result)) sylvan_stats_count(BDD_COMPOSE_CACHEDPUT); - } - - return result; -} - -/** - * Count number of nodes in BDD - */ -uint64_t sylvan_nodecount_do_1(BDD a) -{ - if (sylvan_isconst(a)) return 0; - bddnode_t na = GETNODE(a); - if (bddnode_getmark(na)) return 0; - bddnode_setmark(na, 1); - uint64_t result = 1; - result += sylvan_nodecount_do_1(bddnode_getlow(na)); - result += sylvan_nodecount_do_1(bddnode_gethigh(na)); - return result; -} - -void sylvan_nodecount_do_2(BDD a) -{ - if (sylvan_isconst(a)) return; - bddnode_t na = GETNODE(a); - if (!bddnode_getmark(na)) return; - bddnode_setmark(na, 0); - sylvan_nodecount_do_2(bddnode_getlow(na)); - sylvan_nodecount_do_2(bddnode_gethigh(na)); -} - -size_t sylvan_nodecount(BDD a) -{ - uint32_t result = sylvan_nodecount_do_1(a); - sylvan_nodecount_do_2(a); - return result; -} - -/** - * Calculate the number of distinct paths to True. - */ -TASK_IMPL_2(double, sylvan_pathcount, BDD, bdd, BDDVAR, prev_level) -{ - /* Trivial cases */ - if (bdd == sylvan_false) return 0.0; - if (bdd == sylvan_true) return 1.0; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - sylvan_stats_count(BDD_PATHCOUNT); - - BDD level = sylvan_var(bdd); - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; - if (cachenow) { - double result; - if (cache_get3(CACHE_BDD_PATHCOUNT, bdd, 0, 0, (uint64_t*)&result)) { - sylvan_stats_count(BDD_PATHCOUNT_CACHED); - return result; - } - } - - SPAWN(sylvan_pathcount, sylvan_low(bdd), level); - SPAWN(sylvan_pathcount, sylvan_high(bdd), level); - double res1 = SYNC(sylvan_pathcount); - res1 += SYNC(sylvan_pathcount); - - if (cachenow) { - if (cache_put3(CACHE_BDD_PATHCOUNT, bdd, 0, 0, *(uint64_t*)&res1)) sylvan_stats_count(BDD_PATHCOUNT_CACHEDPUT); - } - - return res1; -} - -/** - * Calculate the number of satisfying variable assignments according to . - */ -TASK_IMPL_3(double, sylvan_satcount, BDD, bdd, BDDSET, variables, BDDVAR, prev_level) -{ - /* Trivial cases */ - if (bdd == sylvan_false) return 0.0; - if (bdd == sylvan_true) return powl(2.0L, sylvan_set_count(variables)); - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - sylvan_stats_count(BDD_SATCOUNT); - - /* Count variables before var(bdd) */ - size_t skipped = 0; - BDDVAR var = sylvan_var(bdd); - bddnode_t set_node = GETNODE(variables); - BDDVAR set_var = bddnode_getvariable(set_node); - while (var != set_var) { - skipped++; - variables = node_high(variables, set_node); - // if this assertion fails, then variables is not the support of - assert(!sylvan_set_isempty(variables)); - set_node = GETNODE(variables); - set_var = bddnode_getvariable(set_node); - } - - union { - double d; - uint64_t s; - } hack; - - /* Consult cache */ - int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != var / granularity; - if (cachenow) { - if (cache_get3(CACHE_BDD_SATCOUNT, bdd, variables, 0, &hack.s)) { - sylvan_stats_count(BDD_SATCOUNT_CACHED); - return hack.d * powl(2.0L, skipped); - } - } - - SPAWN(sylvan_satcount, sylvan_high(bdd), node_high(variables, set_node), var); - double low = CALL(sylvan_satcount, sylvan_low(bdd), node_high(variables, set_node), var); - double result = low + SYNC(sylvan_satcount); - - if (cachenow) { - hack.d = result; - if (cache_put3(CACHE_BDD_SATCOUNT, bdd, variables, 0, hack.s)) sylvan_stats_count(BDD_SATCOUNT_CACHEDPUT); - } - - return result * powl(2.0L, skipped); -} - -int -sylvan_sat_one(BDD bdd, BDDSET vars, uint8_t *str) -{ - if (bdd == sylvan_false) return 0; - if (str == NULL) return 0; - if (sylvan_set_isempty(vars)) return 1; - - for (;;) { - bddnode_t n_vars = GETNODE(vars); - if (bdd == sylvan_true) { - *str = 0; - } else { - bddnode_t n_bdd = GETNODE(bdd); - if (bddnode_getvariable(n_bdd) != bddnode_getvariable(n_vars)) { - *str = 0; - } else { - if (node_low(bdd, n_bdd) == sylvan_false) { - // take high edge - *str = 1; - bdd = node_high(bdd, n_bdd); - } else { - // take low edge - *str = 0; - bdd = node_low(bdd, n_bdd); - } - } - } - vars = node_high(vars, n_vars); - if (sylvan_set_isempty(vars)) break; - str++; - } - - return 1; -} - -BDD -sylvan_sat_one_bdd(BDD bdd) -{ - if (bdd == sylvan_false) return sylvan_false; - if (bdd == sylvan_true) return sylvan_true; - - bddnode_t node = GETNODE(bdd); - BDD low = node_low(bdd, node); - BDD high = node_high(bdd, node); - - BDD m; - - BDD result; - if (low == sylvan_false) { - m = sylvan_sat_one_bdd(high); - result = sylvan_makenode(bddnode_getvariable(node), sylvan_false, m); - } else if (high == sylvan_false) { - m = sylvan_sat_one_bdd(low); - result = sylvan_makenode(bddnode_getvariable(node), m, sylvan_false); - } else { - if (rand() & 0x2000) { - m = sylvan_sat_one_bdd(low); - result = sylvan_makenode(bddnode_getvariable(node), m, sylvan_false); - } else { - m = sylvan_sat_one_bdd(high); - result = sylvan_makenode(bddnode_getvariable(node), sylvan_false, m); - } - } - - return result; -} - -BDD -sylvan_cube(BDDSET vars, uint8_t *cube) -{ - if (sylvan_set_isempty(vars)) return sylvan_true; - - bddnode_t n = GETNODE(vars); - BDDVAR v = bddnode_getvariable(n); - vars = node_high(vars, n); - - BDD result = sylvan_cube(vars, cube+1); - if (*cube == 0) { - result = sylvan_makenode(v, result, sylvan_false); - } else if (*cube == 1) { - result = sylvan_makenode(v, sylvan_false, result); - } - - return result; -} - -TASK_IMPL_3(BDD, sylvan_union_cube, BDD, bdd, BDDSET, vars, uint8_t *, cube) -{ - /* Terminal cases */ - if (bdd == sylvan_true) return sylvan_true; - if (bdd == sylvan_false) return sylvan_cube(vars, cube); - if (sylvan_set_isempty(vars)) return sylvan_true; - - bddnode_t nv = GETNODE(vars); - - for (;;) { - if (*cube == 0 || *cube == 1) break; - // *cube should be 2 - cube++; - vars = node_high(vars, nv); - if (sylvan_set_isempty(vars)) return sylvan_true; - nv = GETNODE(vars); - } - - sylvan_gc_test(); - - // missing: SV_CNT_OP - - bddnode_t n = GETNODE(bdd); - BDD result = bdd; - BDDVAR v = bddnode_getvariable(nv); - BDDVAR n_level = bddnode_getvariable(n); - - if (v < n_level) { - vars = node_high(vars, nv); - if (*cube == 0) { - result = sylvan_union_cube(bdd, vars, cube+1); - result = sylvan_makenode(v, result, bdd); - } else /* *cube == 1 */ { - result = sylvan_union_cube(bdd, vars, cube+1); - result = sylvan_makenode(v, bdd, result); - } - } else if (v > n_level) { - BDD high = node_high(bdd, n); - BDD low = node_low(bdd, n); - SPAWN(sylvan_union_cube, high, vars, cube); - BDD new_low = sylvan_union_cube(low, vars, cube); - bdd_refs_push(new_low); - BDD new_high = SYNC(sylvan_union_cube); - bdd_refs_pop(1); - if (new_low != low || new_high != high) { - result = sylvan_makenode(n_level, new_low, new_high); - } - } else /* v == n_level */ { - vars = node_high(vars, nv); - BDD high = node_high(bdd, n); - BDD low = node_low(bdd, n); - if (*cube == 0) { - BDD new_low = sylvan_union_cube(low, vars, cube+1); - if (new_low != low) { - result = sylvan_makenode(n_level, new_low, high); - } - } else /* *cube == 1 */ { - BDD new_high = sylvan_union_cube(high, vars, cube+1); - if (new_high != high) { - result = sylvan_makenode(n_level, low, new_high); - } - } - } - - return result; -} - -struct bdd_path -{ - struct bdd_path *prev; - BDDVAR var; - int8_t val; // 0=false, 1=true, 2=both -}; - -VOID_TASK_5(sylvan_enum_do, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context, struct bdd_path*, path) -{ - if (bdd == sylvan_false) return; - - if (sylvan_set_isempty(vars)) { - /* bdd should now be true */ - assert(bdd == sylvan_true); - /* compute length of path */ - int i=0; - struct bdd_path *pp; - for (pp = path; pp != NULL; pp = pp->prev) i++; - /* if length is 0 (enum called with empty vars??), return */ - if (i == 0) return; - /* fill cube and vars with trace */ - uint8_t cube[i]; - BDDVAR vars[i]; - int j=0; - for (pp = path; pp != NULL; pp = pp->prev) { - cube[i-j-1] = pp->val; - vars[i-j-1] = pp->var; - j++; - } - /* call callback */ - WRAP(cb, context, vars, cube, i); - return; - } - - BDDVAR var = sylvan_var(vars); - vars = sylvan_set_next(vars); - BDDVAR bdd_var = sylvan_var(bdd); - - /* assert var <= bdd_var */ - if (bdd == sylvan_true || var < bdd_var) { - struct bdd_path pp0 = (struct bdd_path){path, var, 0}; - CALL(sylvan_enum_do, bdd, vars, cb, context, &pp0); - struct bdd_path pp1 = (struct bdd_path){path, var, 1}; - CALL(sylvan_enum_do, bdd, vars, cb, context, &pp1); - } else if (var == bdd_var) { - struct bdd_path pp0 = (struct bdd_path){path, var, 0}; - CALL(sylvan_enum_do, sylvan_low(bdd), vars, cb, context, &pp0); - struct bdd_path pp1 = (struct bdd_path){path, var, 1}; - CALL(sylvan_enum_do, sylvan_high(bdd), vars, cb, context, &pp1); - } else { - printf("var %u not expected (expecting %u)!\n", bdd_var, var); - assert(var <= bdd_var); - } -} - -VOID_TASK_5(sylvan_enum_par_do, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context, struct bdd_path*, path) -{ - if (bdd == sylvan_false) return; - - if (sylvan_set_isempty(vars)) { - /* bdd should now be true */ - assert(bdd == sylvan_true); - /* compute length of path */ - int i=0; - struct bdd_path *pp; - for (pp = path; pp != NULL; pp = pp->prev) i++; - /* if length is 0 (enum called with empty vars??), return */ - if (i == 0) return; - /* fill cube and vars with trace */ - uint8_t cube[i]; - BDDVAR vars[i]; - int j=0; - for (pp = path; pp != NULL; pp = pp->prev) { - cube[i-j-1] = pp->val; - vars[i-j-1] = pp->var; - j++; - } - /* call callback */ - WRAP(cb, context, vars, cube, i); - return; - } - - BDD var = sylvan_var(vars); - vars = sylvan_set_next(vars); - BDD bdd_var = sylvan_var(bdd); - - /* assert var <= bdd_var */ - if (var < bdd_var) { - struct bdd_path pp1 = (struct bdd_path){path, var, 1}; - SPAWN(sylvan_enum_par_do, bdd, vars, cb, context, &pp1); - struct bdd_path pp0 = (struct bdd_path){path, var, 0}; - CALL(sylvan_enum_par_do, bdd, vars, cb, context, &pp0); - SYNC(sylvan_enum_par_do); - } else if (var == bdd_var) { - struct bdd_path pp1 = (struct bdd_path){path, var, 1}; - SPAWN(sylvan_enum_par_do, sylvan_high(bdd), vars, cb, context, &pp1); - struct bdd_path pp0 = (struct bdd_path){path, var, 0}; - CALL(sylvan_enum_par_do, sylvan_low(bdd), vars, cb, context, &pp0); - SYNC(sylvan_enum_par_do); - } else { - assert(var <= bdd_var); - } -} - -VOID_TASK_IMPL_4(sylvan_enum, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context) -{ - CALL(sylvan_enum_do, bdd, vars, cb, context, 0); -} - -VOID_TASK_IMPL_4(sylvan_enum_par, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context) -{ - CALL(sylvan_enum_par_do, bdd, vars, cb, context, 0); -} - -TASK_5(BDD, sylvan_collect_do, BDD, bdd, BDDSET, vars, sylvan_collect_cb, cb, void*, context, struct bdd_path*, path) -{ - if (bdd == sylvan_false) return sylvan_false; - - if (sylvan_set_isempty(vars)) { - /* compute length of path */ - int i=0; - struct bdd_path *pp; - for (pp = path; pp != NULL; pp = pp->prev) i++; - /* if length is 0 (enum called with empty vars??), return */ - if (i == 0) return WRAP(cb, context, NULL); - /* fill cube and vars with trace */ - uint8_t cube[i]; - int j=0; - for (pp = path; pp != NULL; pp = pp->prev) { - cube[i-j-1] = pp->val; - j++; - } - /* call callback */ - return WRAP(cb, context, cube); - } else { - BDD var = sylvan_var(vars); - vars = sylvan_set_next(vars); - BDD bdd_var = sylvan_var(bdd); - - /* if fails, then has variables not in */ - assert(var <= bdd_var); - - struct bdd_path pp1 = (struct bdd_path){path, var, 1}; - struct bdd_path pp0 = (struct bdd_path){path, var, 0}; - if (var < bdd_var) { - bdd_refs_spawn(SPAWN(sylvan_collect_do, bdd, vars, cb, context, &pp1)); - BDD low = bdd_refs_push(CALL(sylvan_collect_do, bdd, vars, cb, context, &pp0)); - BDD high = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_collect_do))); - BDD res = sylvan_or(low, high); - bdd_refs_pop(2); - return res; - } else if (var == bdd_var) { - bdd_refs_spawn(SPAWN(sylvan_collect_do, sylvan_high(bdd), vars, cb, context, &pp1)); - BDD low = bdd_refs_push(CALL(sylvan_collect_do, sylvan_low(bdd), vars, cb, context, &pp0)); - BDD high = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_collect_do))); - BDD res = sylvan_or(low, high); - bdd_refs_pop(2); - return res; - } else { - return sylvan_invalid; // unreachable - } - } -} - -TASK_IMPL_4(BDD, sylvan_collect, BDD, bdd, BDDSET, vars, sylvan_collect_cb, cb, void*, context) -{ - return CALL(sylvan_collect_do, bdd, vars, cb, context, 0); -} - -/** - * IMPLEMENTATION OF BDDSET - */ - -int -sylvan_set_in(BDDSET set, BDDVAR level) -{ - while (!sylvan_set_isempty(set)) { - bddnode_t n = GETNODE(set); - BDDVAR n_level = bddnode_getvariable(n); - if (n_level == level) return 1; - if (n_level > level) return 0; // BDDs are ordered - set = node_high(set, n); - } - - return 0; -} - -size_t -sylvan_set_count(BDDSET set) -{ - size_t result = 0; - for (;!sylvan_set_isempty(set);set = sylvan_set_next(set)) result++; - return result; -} - -void -sylvan_set_toarray(BDDSET set, BDDVAR *arr) -{ - size_t i = 0; - while (!sylvan_set_isempty(set)) { - bddnode_t n = GETNODE(set); - arr[i++] = bddnode_getvariable(n); - set = node_high(set, n); - } -} - -TASK_IMPL_2(BDDSET, sylvan_set_fromarray, BDDVAR*, arr, size_t, length) -{ - if (length == 0) return sylvan_set_empty(); - BDDSET sub = sylvan_set_fromarray(arr+1, length-1); - bdd_refs_push(sub); - BDDSET result = sylvan_set_add(sub, *arr); - bdd_refs_pop(1); - return result; -} - -void -sylvan_test_isset(BDDSET set) -{ - while (set != sylvan_false) { - assert(set != sylvan_true); - assert(llmsset_is_marked(nodes, set)); - bddnode_t n = GETNODE(set); - assert(node_low(set, n) == sylvan_true); - set = node_high(set, n); - } -} - -/** - * IMPLEMENTATION OF BDDMAP - */ - -BDDMAP -sylvan_map_add(BDDMAP map, BDDVAR key, BDD value) -{ - if (sylvan_map_isempty(map)) return sylvan_makenode(key, sylvan_map_empty(), value); - - bddnode_t n = GETNODE(map); - BDDVAR key_m = bddnode_getvariable(n); - - if (key_m < key) { - // add recursively and rebuild tree - BDDMAP low = sylvan_map_add(node_low(map, n), key, value); - BDDMAP result = sylvan_makenode(key_m, low, node_high(map, n)); - return result; - } else if (key_m > key) { - return sylvan_makenode(key, map, value); - } else { - // replace old - return sylvan_makenode(key, node_low(map, n), value); - } -} - -BDDMAP -sylvan_map_addall(BDDMAP map_1, BDDMAP map_2) -{ - // one of the maps is empty - if (sylvan_map_isempty(map_1)) return map_2; - if (sylvan_map_isempty(map_2)) return map_1; - - bddnode_t n_1 = GETNODE(map_1); - BDDVAR key_1 = bddnode_getvariable(n_1); - - bddnode_t n_2 = GETNODE(map_2); - BDDVAR key_2 = bddnode_getvariable(n_2); - - BDDMAP result; - if (key_1 < key_2) { - // key_1, recurse on n_1->low, map_2 - BDDMAP low = sylvan_map_addall(node_low(map_1, n_1), map_2); - result = sylvan_makenode(key_1, low, node_high(map_1, n_1)); - } else if (key_1 > key_2) { - // key_2, recurse on map_1, n_2->low - BDDMAP low = sylvan_map_addall(map_1, node_low(map_2, n_2)); - result = sylvan_makenode(key_2, low, node_high(map_2, n_2)); - } else { - // equal: key_2, recurse on n_1->low, n_2->low - BDDMAP low = sylvan_map_addall(node_low(map_1, n_1), node_low(map_2, n_2)); - result = sylvan_makenode(key_2, low, node_high(map_2, n_2)); - } - return result; -} - -BDDMAP -sylvan_map_remove(BDDMAP map, BDDVAR key) -{ - if (sylvan_map_isempty(map)) return map; - - bddnode_t n = GETNODE(map); - BDDVAR key_m = bddnode_getvariable(n); - - if (key_m < key) { - BDDMAP low = sylvan_map_remove(node_low(map, n), key); - BDDMAP result = sylvan_makenode(key_m, low, node_high(map, n)); - return result; - } else if (key_m > key) { - return map; - } else { - return node_low(map, n); - } -} - -BDDMAP -sylvan_map_removeall(BDDMAP map, BDDSET toremove) -{ - if (sylvan_map_isempty(map)) return map; - if (sylvan_set_isempty(toremove)) return map; - - bddnode_t n_1 = GETNODE(map); - BDDVAR key_1 = bddnode_getvariable(n_1); - - bddnode_t n_2 = GETNODE(toremove); - BDDVAR key_2 = bddnode_getvariable(n_2); - - if (key_1 < key_2) { - BDDMAP low = sylvan_map_removeall(node_low(map, n_1), toremove); - BDDMAP result = sylvan_makenode(key_1, low, node_high(map, n_1)); - return result; - } else if (key_1 > key_2) { - return sylvan_map_removeall(map, node_high(toremove, n_2)); - } else { - return sylvan_map_removeall(node_low(map, n_1), node_high(toremove, n_2)); - } -} - -int -sylvan_map_in(BDDMAP map, BDDVAR key) -{ - while (!sylvan_map_isempty(map)) { - bddnode_t n = GETNODE(map); - BDDVAR n_level = bddnode_getvariable(n); - if (n_level == key) return 1; - if (n_level > key) return 0; // BDDs are ordered - map = node_low(map, n); - } - - return 0; -} - -size_t -sylvan_map_count(BDDMAP map) -{ - size_t r=0; - while (!sylvan_map_isempty(map)) { r++; map=sylvan_map_next(map); } - return r; -} - -BDDMAP -sylvan_set_to_map(BDDSET set, BDD value) -{ - if (sylvan_set_isempty(set)) return sylvan_map_empty(); - bddnode_t set_n = GETNODE(set); - BDD sub = sylvan_set_to_map(node_high(set, set_n), value); - BDD result = sylvan_makenode(sub, bddnode_getvariable(set_n), value); - return result; -} - -/** - * Determine the support of a BDD (all variables used in the BDD) - */ -TASK_IMPL_1(BDD, sylvan_support, BDD, bdd) -{ - if (bdd == sylvan_true || bdd == sylvan_false) return sylvan_set_empty(); // return empty set - - sylvan_gc_test(); - - sylvan_stats_count(BDD_SUPPORT); - - BDD result; - if (cache_get3(CACHE_BDD_SUPPORT, bdd, 0, 0, &result)) { - sylvan_stats_count(BDD_SUPPORT_CACHED); - return result; - } - - bddnode_t n = GETNODE(bdd); - BDD high, low, set; - - /* compute recursively */ - bdd_refs_spawn(SPAWN(sylvan_support, bddnode_getlow(n))); - high = bdd_refs_push(CALL(sylvan_support, bddnode_gethigh(n))); - low = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_support))); - - /* take intersection of support of low and support of high */ - set = sylvan_and(low, high); - bdd_refs_pop(2); - - /* add current level to set */ - result = sylvan_makenode(bddnode_getvariable(n), sylvan_false, set); - - if (cache_put3(CACHE_BDD_SUPPORT, bdd, 0, 0, result)) sylvan_stats_count(BDD_SUPPORT_CACHEDPUT); - return result; -} - -static void -sylvan_unmark_rec(bddnode_t node) -{ - if (bddnode_getmark(node)) { - bddnode_setmark(node, 0); - if (!sylvan_isconst(bddnode_getlow(node))) sylvan_unmark_rec(GETNODE(bddnode_getlow(node))); - if (!sylvan_isconst(bddnode_gethigh(node))) sylvan_unmark_rec(GETNODE(bddnode_gethigh(node))); - } -} - -/** - * fprint, print - */ -void -sylvan_fprint(FILE *f, BDD bdd) -{ - sylvan_serialize_reset(); - size_t v = sylvan_serialize_add(bdd); - fprintf(f, "%s%zu,", bdd&sylvan_complement?"!":"", v); - sylvan_serialize_totext(f); -} - -void -sylvan_print(BDD bdd) -{ - sylvan_fprint(stdout, bdd); -} - -/** - * Output to .DOT files - */ - -/*** - * We keep a set [level -> [node]] using AVLset - */ -struct level_to_nodeset { - BDDVAR level; - avl_node_t *set; -}; - -AVL(level_to_nodeset, struct level_to_nodeset) -{ - if (left->level > right->level) return 1; - if (right->level > left->level) return -1; - return 0; -} - -AVL(nodeset, BDD) -{ - if (*left > *right) return 1; - if (*right > *left) return -1; - return 0; -} - -/* returns 1 if inserted, 0 if already existed */ -static int __attribute__((noinline)) -sylvan_dothelper_register(avl_node_t **set, BDD bdd) -{ - struct level_to_nodeset s, *ss; - s.level = sylvan_var(bdd); - ss = level_to_nodeset_search(*set, &s); - if (ss == NULL) { - s.set = NULL; - ss = level_to_nodeset_put(set, &s, NULL); - } - assert(ss != NULL); - return nodeset_insert(&ss->set, &bdd); -} - -static void -sylvan_fprintdot_rec(FILE *out, BDD bdd, avl_node_t **levels) -{ - bdd = BDD_STRIPMARK(bdd); - if (bdd == sylvan_false) return; - if (!sylvan_dothelper_register(levels, bdd)) return; - - BDD low = sylvan_low(bdd); - BDD high = sylvan_high(bdd); - fprintf(out, "\"%" PRIx64 "\" [label=\"%d\"];\n", bdd, sylvan_var(bdd)); - fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=dashed];\n", bdd, low); - fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=solid dir=both arrowtail=%s];\n", bdd, BDD_STRIPMARK(high), BDD_HASMARK(high) ? "dot" : "none"); - sylvan_fprintdot_rec(out, low, levels); - sylvan_fprintdot_rec(out, high, levels); -} - -void -sylvan_fprintdot(FILE *out, BDD bdd) -{ - fprintf(out, "digraph \"DD\" {\n"); - fprintf(out, "graph [dpi = 300];\n"); - fprintf(out, "center = true;\n"); - fprintf(out, "edge [dir = forward];\n"); - fprintf(out, "0 [shape=box, label=\"0\", style=filled, shape=box, height=0.3, width=0.3];\n"); - fprintf(out, "root [style=invis];\n"); - fprintf(out, "root -> \"%" PRIx64 "\" [style=solid dir=both arrowtail=%s];\n", BDD_STRIPMARK(bdd), BDD_HASMARK(bdd) ? "dot" : "none"); - - avl_node_t *levels = NULL; - sylvan_fprintdot_rec(out, bdd, &levels); - - if (levels != NULL) { - size_t levels_count = avl_count(levels); - struct level_to_nodeset *arr = level_to_nodeset_toarray(levels); - size_t i; - for (i=0;i \"%" PRIx64 "\" [style=dashed];\n", bdd, low); - fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=solid];\n", bdd, high); - sylvan_fprintdot_nc_rec(out, low, levels); - sylvan_fprintdot_nc_rec(out, high, levels); -} - -void -sylvan_fprintdot_nc(FILE *out, BDD bdd) -{ - fprintf(out, "digraph \"DD\" {\n"); - fprintf(out, "graph [dpi = 300];\n"); - fprintf(out, "center = true;\n"); - fprintf(out, "edge [dir = forward];\n"); - fprintf(out, "\"%" PRIx64 "\" [shape=box, label=\"F\", style=filled, shape=box, height=0.3, width=0.3];\n", sylvan_false); - fprintf(out, "\"%" PRIx64 "\" [shape=box, label=\"T\", style=filled, shape=box, height=0.3, width=0.3];\n", sylvan_true); - fprintf(out, "root [style=invis];\n"); - fprintf(out, "root -> \"%" PRIx64 "\" [style=solid];\n", bdd); - - avl_node_t *levels = NULL; - sylvan_fprintdot_nc_rec(out, bdd, &levels); - - if (levels != NULL) { - size_t levels_count = avl_count(levels); - struct level_to_nodeset *arr = level_to_nodeset_toarray(levels); - size_t i; - for (i=0;ibdd > right->bdd) return 1; - if (left->bdd < right->bdd) return -1; - return 0; -} - -// Define a AVL tree type with prefix 'sylvan_ser_reversed' holding -// nodes of struct sylvan_ser with the following compare() function... -AVL(sylvan_ser_reversed, struct sylvan_ser) -{ - if (left->assigned > right->assigned) return 1; - if (left->assigned < right->assigned) return -1; - return 0; -} - -// Initially, both sets are empty -static avl_node_t *sylvan_ser_set = NULL; -static avl_node_t *sylvan_ser_reversed_set = NULL; - -// Start counting (assigning numbers to BDDs) at 1 -static size_t sylvan_ser_counter = 1; -static size_t sylvan_ser_done = 0; - -// Given a BDD, assign unique numbers to all nodes -static size_t -sylvan_serialize_assign_rec(BDD bdd) -{ - if (sylvan_isnode(bdd)) { - bddnode_t n = GETNODE(bdd); - - struct sylvan_ser s, *ss; - s.bdd = BDD_STRIPMARK(bdd); - ss = sylvan_ser_search(sylvan_ser_set, &s); - if (ss == NULL) { - // assign dummy value - s.assigned = 0; - ss = sylvan_ser_put(&sylvan_ser_set, &s, NULL); - - // first assign recursively - sylvan_serialize_assign_rec(bddnode_getlow(n)); - sylvan_serialize_assign_rec(bddnode_gethigh(n)); - - // assign real value - ss->assigned = sylvan_ser_counter++; - - // put a copy in the reversed table - sylvan_ser_reversed_insert(&sylvan_ser_reversed_set, ss); - } - - return ss->assigned; - } - - return BDD_STRIPMARK(bdd); -} - -size_t -sylvan_serialize_add(BDD bdd) -{ - return BDD_TRANSFERMARK(bdd, sylvan_serialize_assign_rec(bdd)); -} - -void -sylvan_serialize_reset() -{ - sylvan_ser_free(&sylvan_ser_set); - sylvan_ser_free(&sylvan_ser_reversed_set); - sylvan_ser_counter = 1; - sylvan_ser_done = 0; -} - -size_t -sylvan_serialize_get(BDD bdd) -{ - if (!sylvan_isnode(bdd)) return bdd; - struct sylvan_ser s, *ss; - s.bdd = BDD_STRIPMARK(bdd); - ss = sylvan_ser_search(sylvan_ser_set, &s); - assert(ss != NULL); - return BDD_TRANSFERMARK(bdd, ss->assigned); -} - -BDD -sylvan_serialize_get_reversed(size_t value) -{ - if (!sylvan_isnode(value)) return value; - struct sylvan_ser s, *ss; - s.assigned = BDD_STRIPMARK(value); - ss = sylvan_ser_reversed_search(sylvan_ser_reversed_set, &s); - assert(ss != NULL); - return BDD_TRANSFERMARK(value, ss->bdd); -} - -void -sylvan_serialize_totext(FILE *out) -{ - fprintf(out, "["); - avl_iter_t *it = sylvan_ser_reversed_iter(sylvan_ser_reversed_set); - struct sylvan_ser *s; - - while ((s=sylvan_ser_reversed_iter_next(it))) { - BDD bdd = s->bdd; - bddnode_t n = GETNODE(bdd); - fprintf(out, "(%zu,%u,%zu,%zu,%u),", s->assigned, - bddnode_getvariable(n), - (size_t)bddnode_getlow(n), - (size_t)BDD_STRIPMARK(bddnode_gethigh(n)), - BDD_HASMARK(bddnode_gethigh(n)) ? 1 : 0); - } - - sylvan_ser_reversed_iter_free(it); - fprintf(out, "]"); -} - -void -sylvan_serialize_tofile(FILE *out) -{ - size_t count = avl_count(sylvan_ser_reversed_set); - assert(count >= sylvan_ser_done); - assert(count == sylvan_ser_counter-1); - count -= sylvan_ser_done; - fwrite(&count, sizeof(size_t), 1, out); - - struct sylvan_ser *s; - avl_iter_t *it = sylvan_ser_reversed_iter(sylvan_ser_reversed_set); - - /* Skip already written entries */ - size_t index = 0; - while (index < sylvan_ser_done && (s=sylvan_ser_reversed_iter_next(it))) { - index++; - assert(s->assigned == index); - } - - while ((s=sylvan_ser_reversed_iter_next(it))) { - index++; - assert(s->assigned == index); - - bddnode_t n = GETNODE(s->bdd); - - struct bddnode node; - bddnode_makenode(&node, bddnode_getvariable(n), sylvan_serialize_get(bddnode_getlow(n)), sylvan_serialize_get(bddnode_gethigh(n))); - - fwrite(&node, sizeof(struct bddnode), 1, out); - } - - sylvan_ser_done = sylvan_ser_counter-1; - sylvan_ser_reversed_iter_free(it); -} - -void -sylvan_serialize_fromfile(FILE *in) -{ - size_t count, i; - if (fread(&count, sizeof(size_t), 1, in) != 1) { - // TODO FIXME return error - printf("sylvan_serialize_fromfile: file format error, giving up\n"); - exit(-1); - } - - for (i=1; i<=count; i++) { - struct bddnode node; - if (fread(&node, sizeof(struct bddnode), 1, in) != 1) { - // TODO FIXME return error - printf("sylvan_serialize_fromfile: file format error, giving up\n"); - exit(-1); - } - - BDD low = sylvan_serialize_get_reversed(bddnode_getlow(&node)); - BDD high = sylvan_serialize_get_reversed(bddnode_gethigh(&node)); - - struct sylvan_ser s; - s.bdd = sylvan_makenode(bddnode_getvariable(&node), low, high); - s.assigned = ++sylvan_ser_done; // starts at 0 but we want 1-based... - - sylvan_ser_insert(&sylvan_ser_set, &s); - sylvan_ser_reversed_insert(&sylvan_ser_reversed_set, &s); - } -} - -/** - * Generate SHA2 structural hashes. - * Hashes are independent of location. - * Mainly useful for debugging purposes. - */ -static void -sylvan_sha2_rec(BDD bdd, SHA256_CTX *ctx) -{ - if (bdd == sylvan_true || bdd == sylvan_false) { - SHA256_Update(ctx, (void*)&bdd, sizeof(BDD)); - return; - } - - bddnode_t node = GETNODE(bdd); - if (bddnode_getmark(node) == 0) { - bddnode_setmark(node, 1); - uint32_t level = bddnode_getvariable(node); - if (BDD_STRIPMARK(bddnode_gethigh(node))) level |= 0x80000000; - SHA256_Update(ctx, (void*)&level, sizeof(uint32_t)); - sylvan_sha2_rec(bddnode_gethigh(node), ctx); - sylvan_sha2_rec(bddnode_getlow(node), ctx); - } -} - -void -sylvan_printsha(BDD bdd) -{ - sylvan_fprintsha(stdout, bdd); -} - -void -sylvan_fprintsha(FILE *f, BDD bdd) -{ - char buf[80]; - sylvan_getsha(bdd, buf); - fprintf(f, "%s", buf); -} - -void -sylvan_getsha(BDD bdd, char *target) -{ - SHA256_CTX ctx; - SHA256_Init(&ctx); - sylvan_sha2_rec(bdd, &ctx); - if (bdd != sylvan_true && bdd != sylvan_false) sylvan_unmark_rec(GETNODE(bdd)); - SHA256_End(&ctx, target); -} - -/** - * Debug tool to check that a BDD is properly ordered. - * Also that every BDD node is marked 'in-use' in the hash table. - */ -TASK_2(int, sylvan_test_isbdd_rec, BDD, bdd, BDDVAR, parent_var) -{ - if (bdd == sylvan_true || bdd == sylvan_false) return 1; - assert(llmsset_is_marked(nodes, BDD_STRIPMARK(bdd))); - - sylvan_stats_count(BDD_ISBDD); - - uint64_t result; - if (cache_get3(CACHE_BDD_ISBDD, bdd, 0, 0, &result)) { - sylvan_stats_count(BDD_ISBDD_CACHED); - return result; - } - - bddnode_t n = GETNODE(bdd); - BDDVAR var = bddnode_getvariable(n); - if (var <= parent_var) { - result = 0; - } else { - SPAWN(sylvan_test_isbdd_rec, node_low(bdd, n), var); - result = (uint64_t)CALL(sylvan_test_isbdd_rec, node_high(bdd, n), var); - if (!SYNC(sylvan_test_isbdd_rec)) result = 0; - } - - if (cache_put3(CACHE_BDD_ISBDD, bdd, 0, 0, result)) sylvan_stats_count(BDD_ISBDD_CACHEDPUT); - return result; -} - -TASK_IMPL_1(int, sylvan_test_isbdd, BDD, bdd) -{ - if (bdd == sylvan_true) return 1; - if (bdd == sylvan_false) return 1; - - assert(llmsset_is_marked(nodes, BDD_STRIPMARK(bdd))); - - bddnode_t n = GETNODE(bdd); - BDDVAR var = bddnode_getvariable(n); - SPAWN(sylvan_test_isbdd_rec, node_low(bdd, n), var); - int result = CALL(sylvan_test_isbdd_rec, node_high(bdd, n), var); - if (!SYNC(sylvan_test_isbdd_rec)) result = 0; - return result; -} diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd.h b/resources/3rdparty/sylvan/src/sylvan_bdd.h deleted file mode 100644 index 96566e63e..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_bdd.h +++ /dev/null @@ -1,423 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Do not include this file directly. Instead, include sylvan.h */ - -#include - -#ifndef SYLVAN_BDD_H -#define SYLVAN_BDD_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -typedef uint64_t BDD; // low 40 bits used for index, highest bit for complement, rest 0 -// BDDSET uses the BDD node hash table. A BDDSET is an ordered BDD. -typedef uint64_t BDDSET; // encodes a set of variables (e.g. for exists etc.) -// BDDMAP also uses the BDD node hash table. A BDDMAP is *not* an ordered BDD. -typedef uint64_t BDDMAP; // encodes a function of variable->BDD (e.g. for substitute) -typedef uint32_t BDDVAR; // low 24 bits only - -#define sylvan_complement ((uint64_t)0x8000000000000000) -#define sylvan_false ((BDD)0x0000000000000000) -#define sylvan_true (sylvan_false|sylvan_complement) -#define sylvan_invalid ((BDD)0x7fffffffffffffff) - -#define sylvan_isconst(bdd) (bdd == sylvan_true || bdd == sylvan_false) -#define sylvan_isnode(bdd) (bdd != sylvan_true && bdd != sylvan_false) - -/** - * Initialize BDD functionality. - * - * Granularity (BDD only) determines usage of operation cache. Smallest value is 1: use the operation cache always. - * Higher values mean that the cache is used less often. Variables are grouped such that - * the cache is used when going to the next group, i.e., with granularity=3, variables [0,1,2] are in the - * first group, [3,4,5] in the next, etc. Then no caching occur between 0->1, 1->2, 0->2. Caching occurs - * on 0->3, 1->4, 2->3, etc. - * - * A reasonable default is a granularity of 4-16, strongly depending on the structure of the BDDs. - */ -void sylvan_init_bdd(int granularity); - -/* Create a BDD representing just or the negation of */ -BDD sylvan_ithvar(BDDVAR var); -static inline BDD sylvan_nithvar(BDD var) { return sylvan_ithvar(var) ^ sylvan_complement; } - -/* Retrieve the of the BDD node */ -BDDVAR sylvan_var(BDD bdd); - -/* Follow and edges */ -BDD sylvan_low(BDD bdd); -BDD sylvan_high(BDD bdd); - -/* Add or remove external reference to BDD */ -BDD sylvan_ref(BDD a); -void sylvan_deref(BDD a); - -/* For use in custom mark functions */ -VOID_TASK_DECL_1(sylvan_gc_mark_rec, BDD); -#define sylvan_gc_mark_rec(mdd) CALL(sylvan_gc_mark_rec, mdd) - -/* Return the number of external references */ -size_t sylvan_count_refs(); - -/* Add or remove BDD pointers to protect (indirect external references) */ -void sylvan_protect(BDD* ptr); -void sylvan_unprotect(BDD* ptr); - -/* Return the number of protected BDD pointers */ -size_t sylvan_count_protected(); - -/* Mark BDD for "notify on dead" */ -#define sylvan_notify_ondead(bdd) llmsset_notify_ondead(nodes, bdd&~sylvan_complement) - -/* Unary, binary and if-then-else operations */ -#define sylvan_not(a) (((BDD)a)^sylvan_complement) -TASK_DECL_4(BDD, sylvan_ite, BDD, BDD, BDD, BDDVAR); -#define sylvan_ite(a,b,c) (CALL(sylvan_ite,a,b,c,0)) -TASK_DECL_3(BDD, sylvan_and, BDD, BDD, BDDVAR); -#define sylvan_and(a,b) (CALL(sylvan_and,a,b,0)) -TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR); -#define sylvan_xor(a,b) (CALL(sylvan_xor,a,b,0)) -/* Do not use nested calls for xor/equiv parameter b! */ -#define sylvan_equiv(a,b) sylvan_not(sylvan_xor(a,b)) -#define sylvan_or(a,b) sylvan_not(sylvan_and(sylvan_not(a),sylvan_not(b))) -#define sylvan_nand(a,b) sylvan_not(sylvan_and(a,b)) -#define sylvan_nor(a,b) sylvan_not(sylvan_or(a,b)) -#define sylvan_imp(a,b) sylvan_not(sylvan_and(a,sylvan_not(b))) -#define sylvan_invimp(a,b) sylvan_not(sylvan_and(sylvan_not(a),b)) -#define sylvan_biimp sylvan_equiv -#define sylvan_diff(a,b) sylvan_and(a,sylvan_not(b)) -#define sylvan_less(a,b) sylvan_and(sylvan_not(a),b) - -/* Existential and Universal quantifiers */ -TASK_DECL_3(BDD, sylvan_exists, BDD, BDD, BDDVAR); -#define sylvan_exists(a, vars) (CALL(sylvan_exists, a, vars, 0)) -#define sylvan_forall(a, vars) (sylvan_not(CALL(sylvan_exists, sylvan_not(a), vars, 0))) - -/** - * Compute \exists v: A(...) \and B(...) - * Parameter vars is the cube (conjunction) of all v variables. - */ -TASK_DECL_4(BDD, sylvan_and_exists, BDD, BDD, BDDSET, BDDVAR); -#define sylvan_and_exists(a,b,vars) CALL(sylvan_and_exists,a,b,vars,0) - -/** - * Compute R(s,t) = \exists x: A(s,x) \and B(x,t) - * or R(s) = \exists x: A(s,x) \and B(x) - * Assumes s,t are interleaved with s even and t odd (s+1). - * Parameter vars is the cube of all s and/or t variables. - * Other variables in A are "ignored" (existential quantification) - * Other variables in B are kept - * Alternatively, vars=false means all variables are in vars - * - * Use this function to concatenate two relations --> --> - * or to take the 'previous' of a set --> S - */ -TASK_DECL_4(BDD, sylvan_relprev, BDD, BDD, BDDSET, BDDVAR); -#define sylvan_relprev(a,b,vars) CALL(sylvan_relprev,a,b,vars,0) - -/** - * Compute R(s) = \exists x: A(x) \and B(x,s) - * with support(result) = s, support(A) = s, support(B) = s+t - * Assumes s,t are interleaved with s even and t odd (s+1). - * Parameter vars is the cube of all s and/or t variables. - * Other variables in A are kept - * Other variables in B are "ignored" (existential quantification) - * Alternatively, vars=false means all variables are in vars - * - * Use this function to take the 'next' of a set S --> - */ -TASK_DECL_4(BDD, sylvan_relnext, BDD, BDD, BDDSET, BDDVAR); -#define sylvan_relnext(a,b,vars) CALL(sylvan_relnext,a,b,vars,0) - -/** - * Computes the transitive closure by traversing the BDD recursively. - * See Y. Matsunaga, P. C. McGeer, R. K. Brayton - * On Computing the Transitive Closure of a State Transition Relation - * 30th ACM Design Automation Conference, 1993. - * - * The input BDD must be a transition relation that only has levels of s,t - * with s,t interleaved with s even and t odd, i.e. - * s level 0,2,4 matches with t level 1,3,5 and so forth. - */ -TASK_DECL_2(BDD, sylvan_closure, BDD, BDDVAR); -#define sylvan_closure(a) CALL(sylvan_closure,a,0); - -/** - * Calculate a@b (a constrain b), such that (b -> a@b) = (b -> a) - * Special cases: - * - a@0 = 0 - * - a@1 = f - * - 0@b = 0 - * - 1@b = 1 - * - a@a = 1 - * - a@not(a) = 0 - */ -TASK_DECL_3(BDD, sylvan_constrain, BDD, BDD, BDDVAR); -#define sylvan_constrain(f,c) (CALL(sylvan_constrain, (f), (c), 0)) - -TASK_DECL_3(BDD, sylvan_restrict, BDD, BDD, BDDVAR); -#define sylvan_restrict(f,c) (CALL(sylvan_restrict, (f), (c), 0)) - -TASK_DECL_3(BDD, sylvan_compose, BDD, BDDMAP, BDDVAR); -#define sylvan_compose(f,m) (CALL(sylvan_compose, (f), (m), 0)) - -/** - * Calculate the support of a BDD. - * A variable v is in the support of a Boolean function f iff f[v<-0] != f[v<-1] - * It is also the set of all variables in the BDD nodes of the BDD. - */ -TASK_DECL_1(BDD, sylvan_support, BDD); -#define sylvan_support(bdd) (CALL(sylvan_support, bdd)) - -/** - * A set of BDD variables is a cube (conjunction) of variables in their positive form. - * Note 2015-06-10: This used to be a union (disjunction) of variables in their positive form. - */ -// empty bddset -#define sylvan_set_empty() sylvan_true -#define sylvan_set_isempty(set) (set == sylvan_true) -// add variables to the bddset -#define sylvan_set_add(set, var) sylvan_and(set, sylvan_ithvar(var)) -#define sylvan_set_addall(set, set_to_add) sylvan_and(set, set_to_add) -// remove variables from the bddset -#define sylvan_set_remove(set, var) sylvan_exists(set, var) -#define sylvan_set_removeall(set, set_to_remove) sylvan_exists(set, set_to_remove) -// iterate through all variables -#define sylvan_set_var(set) (sylvan_var(set)) -#define sylvan_set_next(set) (sylvan_high(set)) -int sylvan_set_in(BDDSET set, BDDVAR var); -size_t sylvan_set_count(BDDSET set); -void sylvan_set_toarray(BDDSET set, BDDVAR *arr); -// variables in arr should be ordered -TASK_DECL_2(BDDSET, sylvan_set_fromarray, BDDVAR*, size_t); -#define sylvan_set_fromarray(arr, length) ( CALL(sylvan_set_fromarray, arr, length) ) -void sylvan_test_isset(BDDSET set); - -/** - * BDDMAP maps BDDVAR-->BDD, implemented using BDD nodes. - * Based on disjunction of variables, but with high edges to BDDs instead of True terminals. - */ -// empty bddmap -static inline BDDMAP sylvan_map_empty() { return sylvan_false; } -static inline int sylvan_map_isempty(BDDMAP map) { return map == sylvan_false ? 1 : 0; } -// add key-value pairs to the bddmap -BDDMAP sylvan_map_add(BDDMAP map, BDDVAR key, BDD value); -BDDMAP sylvan_map_addall(BDDMAP map_1, BDDMAP map_2); -// remove key-value pairs from the bddmap -BDDMAP sylvan_map_remove(BDDMAP map, BDDVAR key); -BDDMAP sylvan_map_removeall(BDDMAP map, BDDSET toremove); -// iterate through all pairs -static inline BDDVAR sylvan_map_key(BDDMAP map) { return sylvan_var(map); } -static inline BDD sylvan_map_value(BDDMAP map) { return sylvan_high(map); } -static inline BDDMAP sylvan_map_next(BDDMAP map) { return sylvan_low(map); } -// is a key in the map -int sylvan_map_in(BDDMAP map, BDDVAR key); -// count number of keys -size_t sylvan_map_count(BDDMAP map); -// convert a BDDSET (cube of variables) to a map, with all variables pointing on the value -BDDMAP sylvan_set_to_map(BDDSET set, BDD value); - -/** - * Node creation primitive. - * Careful: does not check ordering! - * Preferably only use when debugging! - */ -BDD sylvan_makenode(BDDVAR level, BDD low, BDD high); - -/** - * Write a DOT representation of a BDD - */ -void sylvan_printdot(BDD bdd); -void sylvan_fprintdot(FILE *out, BDD bdd); - -/** - * Write a DOT representation of a BDD. - * This variant does not print complement edges. - */ -void sylvan_printdot_nc(BDD bdd); -void sylvan_fprintdot_nc(FILE *out, BDD bdd); - -void sylvan_print(BDD bdd); -void sylvan_fprint(FILE *f, BDD bdd); - -void sylvan_printsha(BDD bdd); -void sylvan_fprintsha(FILE *f, BDD bdd); -void sylvan_getsha(BDD bdd, char *target); // target must be at least 65 bytes... - -/** - * Calculate number of satisfying variable assignments. - * The set of variables must be >= the support of the BDD. - */ - -TASK_DECL_3(double, sylvan_satcount, BDD, BDDSET, BDDVAR); -#define sylvan_satcount(bdd, variables) CALL(sylvan_satcount, bdd, variables, 0) - -/** - * Create a BDD cube representing the conjunction of variables in their positive or negative - * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). - * CHANGED 2014/09/19: vars is now a BDDSET (ordered!) - */ -BDD sylvan_cube(BDDSET variables, uint8_t *cube); -TASK_DECL_3(BDD, sylvan_union_cube, BDD, BDDSET, uint8_t*); -#define sylvan_union_cube(bdd, variables, cube) CALL(sylvan_union_cube, bdd, variables, cube) - -/** - * Pick one satisfying variable assignment randomly for which is true. - * The set must include all variables in the support of . - * - * The function will set the values of str, such that - * str[index] where index is the index in the set is set to - * 0 when the variable is negative, 1 when positive, or 2 when it could be either. - * - * This implies that str[i] will be set in the variable ordering as in . - * - * Returns 1 when succesful, or 0 when no assignment is found (i.e. bdd==sylvan_false). - */ -int sylvan_sat_one(BDD bdd, BDDSET variables, uint8_t* str); - -/** - * Pick one satisfying variable assignment randomly from the given . - * Functionally equivalent to performing sylvan_cube on the result of sylvan_sat_one. - * For the result: sylvan_and(res, bdd) = res. - */ -BDD sylvan_sat_one_bdd(BDD bdd); -#define sylvan_pick_cube sylvan_sat_one_bdd - -/** - * Enumerate all satisfying variable assignments from the given using variables . - * Calls with four parameters: a user-supplied context, the array of BDD variables in , - * the cube (array of values 0 and 1 for each variables in ) and the length of the two arrays. - */ -LACE_TYPEDEF_CB(void, enum_cb, void*, BDDVAR*, uint8_t*, int); -VOID_TASK_DECL_4(sylvan_enum, BDD, BDDSET, enum_cb, void*); -#define sylvan_enum(bdd, vars, cb, context) CALL(sylvan_enum, bdd, vars, cb, context) -VOID_TASK_DECL_4(sylvan_enum_par, BDD, BDDSET, enum_cb, void*); -#define sylvan_enum_par(bdd, vars, cb, context) CALL(sylvan_enum_par, bdd, vars, cb, context) - -/** - * Enumerate all satisfyable variable assignments of the given using variables . - * Calls with two parameters: a user-supplied context and the cube (array of - * values 0 and 1 for each variable in ). - * The BDD that returns is pair-wise merged (using or) and returned. - */ -LACE_TYPEDEF_CB(BDD, sylvan_collect_cb, void*, uint8_t*); -TASK_DECL_4(BDD, sylvan_collect, BDD, BDDSET, sylvan_collect_cb, void*); -#define sylvan_collect(bdd, vars, cb, context) CALL(sylvan_collect, bdd, vars, cb, context) - -/** - * Compute the number of distinct paths to sylvan_true in the BDD - */ -TASK_DECL_2(double, sylvan_pathcount, BDD, BDDVAR); -#define sylvan_pathcount(bdd) (CALL(sylvan_pathcount, bdd, 0)) - -/** - * Compute the number of BDD nodes in the BDD - */ -size_t sylvan_nodecount(BDD a); - -/** - * SAVING: - * use sylvan_serialize_add on every BDD you want to store - * use sylvan_serialize_get to retrieve the key of every stored BDD - * use sylvan_serialize_tofile - * - * LOADING: - * use sylvan_serialize_fromfile (implies sylvan_serialize_reset) - * use sylvan_serialize_get_reversed for every key - * - * MISC: - * use sylvan_serialize_reset to free all allocated structures - * use sylvan_serialize_totext to write a textual list of tuples of all BDDs. - * format: [(,,,,),...] - * - * for the old sylvan_print functions, use sylvan_serialize_totext - */ -size_t sylvan_serialize_add(BDD bdd); -size_t sylvan_serialize_get(BDD bdd); -BDD sylvan_serialize_get_reversed(size_t value); -void sylvan_serialize_reset(); -void sylvan_serialize_totext(FILE *out); -void sylvan_serialize_tofile(FILE *out); -void sylvan_serialize_fromfile(FILE *in); - -/** - * For debugging - * if (part of) the BDD is not 'marked' in the nodes table, assertion fails - * if the BDD is not ordered, returns 0 - * if nicely ordered, returns 1 - */ -TASK_DECL_1(int, sylvan_test_isbdd, BDD); -#define sylvan_test_isbdd(bdd) CALL(sylvan_test_isbdd, bdd) - -/* Infrastructure for internal markings */ -typedef struct bdd_refs_internal -{ - size_t r_size, r_count; - size_t s_size, s_count; - BDD *results; - Task **spawns; -} *bdd_refs_internal_t; - -extern DECLARE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - -static inline BDD -bdd_refs_push(BDD bdd) -{ - LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - if (bdd_refs_key->r_count >= bdd_refs_key->r_size) { - bdd_refs_key->r_size *= 2; - bdd_refs_key->results = (BDD*)realloc(bdd_refs_key->results, sizeof(BDD) * bdd_refs_key->r_size); - } - bdd_refs_key->results[bdd_refs_key->r_count++] = bdd; - return bdd; -} - -static inline void -bdd_refs_pop(int amount) -{ - LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - bdd_refs_key->r_count-=amount; -} - -static inline void -bdd_refs_spawn(Task *t) -{ - LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - if (bdd_refs_key->s_count >= bdd_refs_key->s_size) { - bdd_refs_key->s_size *= 2; - bdd_refs_key->spawns = (Task**)realloc(bdd_refs_key->spawns, sizeof(Task*) * bdd_refs_key->s_size); - } - bdd_refs_key->spawns[bdd_refs_key->s_count++] = t; -} - -static inline BDD -bdd_refs_sync(BDD result) -{ - LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); - bdd_refs_key->s_count--; - return result; -} - -#include "sylvan_bdd_storm.h" - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h b/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h deleted file mode 100644 index 5806fba5d..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h +++ /dev/null @@ -1,3 +0,0 @@ -#define bdd_isnegated(dd) ((dd & sylvan_complement) ? 1 : 0) -#define bdd_regular(dd) (dd & ~sylvan_complement) -#define bdd_isterminal(dd) (dd == sylvan_false || dd == sylvan_true) \ No newline at end of file diff --git a/resources/3rdparty/sylvan/src/sylvan_cache.c b/resources/3rdparty/sylvan/src/sylvan_cache.c deleted file mode 100644 index 55f29f555..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_cache.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include // for fprintf -#include // for uint32_t etc -#include // for exit -#include // for mmap - -#include - -#ifndef MAP_ANONYMOUS -#define MAP_ANONYMOUS MAP_ANON -#endif - -#ifndef compiler_barrier -#define compiler_barrier() { asm volatile("" ::: "memory"); } -#endif - -#ifndef cas -#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) -#endif - -/** - * This cache is designed to store a,b,c->res, with a,b,c,res 64-bit integers. - * - * Each cache bucket takes 32 bytes, 2 per cache line. - * Each cache status bucket takes 4 bytes, 16 per cache line. - * Therefore, size 2^N = 36*(2^N) bytes. - */ - -struct __attribute__((packed)) cache_entry { - uint64_t a; - uint64_t b; - uint64_t c; - uint64_t res; -}; - -static size_t cache_size; // power of 2 -static size_t cache_max; // power of 2 -#if CACHE_MASK -static size_t cache_mask; // cache_size-1 -#endif -static cache_entry_t cache_table; -static uint32_t* cache_status; - -static uint64_t next_opid; - -uint64_t -cache_next_opid() -{ - return __sync_fetch_and_add(&next_opid, 1LL<<40); -} - -// status: 0x80000000 - bitlock -// 0x7fff0000 - hash (part of the 64-bit hash not used to position) -// 0x0000ffff - tag (every put increases tag field) - -/* Rotating 64-bit FNV-1a hash */ -static uint64_t -cache_hash(uint64_t a, uint64_t b, uint64_t c) -{ - const uint64_t prime = 1099511628211; - uint64_t hash = 14695981039346656037LLU; - hash = (hash ^ (a>>32)); - hash = (hash ^ a) * prime; - hash = (hash ^ b) * prime; - hash = (hash ^ c) * prime; - return hash; -} - -int -cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res) -{ - const uint64_t hash = cache_hash(a, b, c); -#if CACHE_MASK - volatile uint32_t *s_bucket = cache_status + (hash & cache_mask); - cache_entry_t bucket = cache_table + (hash & cache_mask); -#else - volatile uint32_t *s_bucket = cache_status + (hash % cache_size); - cache_entry_t bucket = cache_table + (hash % cache_size); -#endif - const uint32_t s = *s_bucket; - compiler_barrier(); - // abort if locked - if (s & 0x80000000) return 0; - // abort if different hash - if ((s ^ (hash>>32)) & 0x7fff0000) return 0; - // abort if key different - if (bucket->a != a || bucket->b != b || bucket->c != c) return 0; - *res = bucket->res; - compiler_barrier(); - // abort if status field changed after compiler_barrier() - return *s_bucket == s ? 1 : 0; -} - -int -cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res) -{ - const uint64_t hash = cache_hash(a, b, c); -#if CACHE_MASK - volatile uint32_t *s_bucket = cache_status + (hash & cache_mask); - cache_entry_t bucket = cache_table + (hash & cache_mask); -#else - volatile uint32_t *s_bucket = cache_status + (hash % cache_size); - cache_entry_t bucket = cache_table + (hash % cache_size); -#endif - const uint32_t s = *s_bucket; - // abort if locked - if (s & 0x80000000) return 0; - // abort if hash identical -> no: in iscasmc this occasionally causes timeouts?! - const uint32_t hash_mask = (hash>>32) & 0x7fff0000; - // if ((s & 0x7fff0000) == hash_mask) return 0; - // use cas to claim bucket - const uint32_t new_s = ((s+1) & 0x0000ffff) | hash_mask; - if (!cas(s_bucket, s, new_s | 0x80000000)) return 0; - // cas succesful: write data - bucket->a = a; - bucket->b = b; - bucket->c = c; - bucket->res = res; - compiler_barrier(); - // after compiler_barrier(), unlock status field - *s_bucket = new_s; - return 1; -} - -void -cache_create(size_t _cache_size, size_t _max_size) -{ -#if CACHE_MASK - // Cache size must be a power of 2 - if (__builtin_popcountll(_cache_size) != 1 || __builtin_popcountll(_max_size) != 1) { - fprintf(stderr, "cache_create: Table size must be a power of 2!\n"); - exit(1); - } -#endif - - cache_size = _cache_size; - cache_max = _max_size; -#if CACHE_MASK - cache_mask = cache_size - 1; -#endif - - if (cache_size > cache_max) { - fprintf(stderr, "cache_create: Table size must be <= max size!\n"); - exit(1); - } - - cache_table = (cache_entry_t)mmap(0, cache_max * sizeof(struct cache_entry), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - cache_status = (uint32_t*)mmap(0, cache_max * sizeof(uint32_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); - - if (cache_table == (cache_entry_t)-1 || cache_status == (uint32_t*)-1) { - fprintf(stderr, "cache_create: Unable to allocate memory!\n"); - exit(1); - } - - next_opid = 512LL << 40; -} - -void -cache_free() -{ - munmap(cache_table, cache_max * sizeof(struct cache_entry)); - munmap(cache_status, cache_max * sizeof(uint32_t)); -} - -void -cache_clear() -{ - // a bit silly, but this works just fine, and does not require writing 0 everywhere... - cache_free(); - cache_create(cache_size, cache_max); -} - -void -cache_setsize(size_t size) -{ - // easy solution - cache_free(); - cache_create(size, cache_max); -} - -size_t -cache_getsize() -{ - return cache_size; -} - -size_t -cache_getused() -{ - size_t result = 0; - for (size_t i=0;i // for uint32_t etc - -#include "sylvan_config.h" - -#ifndef CACHE_H -#define CACHE_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#ifndef CACHE_MASK -#define CACHE_MASK 1 -#endif - -/** - * Operation cache - * - * Use cache_next_opid() at initialization time to generate unique "operation identifiers". - * You should store these operation identifiers as static globals in your implementation .C/.CPP file. - * - * For custom operations, just use the following functions: - * - cache_get3/cache_put3 for any operation with 1 BDD and 2 other values (that can be BDDs) - * int success = cache_get3(opid, dd1, value2, value3, &result); - * int success = cache_put3(opid, dd1, value2, value3, result); - * - cache_get4/cache_put4 for any operation with 4 BDDs - * int success = cache_get4(opid, dd1, dd2, dd3, dd4, &result); - * int success = cache_get4(opid, dd1, dd2, dd3, dd4, result); - * - * Notes: - * - The "result" is any 64-bit value - * - Use "0" for unused parameters - */ - -typedef struct cache_entry *cache_entry_t; - -/** - * Primitives for cache get/put - */ -int cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res); -int cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res); - -/** - * Helper function to get next 'operation id' (during initialization of modules) - */ -uint64_t cache_next_opid(); - -/** - * dd must be MTBDD, d2/d3 can be anything - */ -static inline int __attribute__((unused)) -cache_get3(uint64_t opid, uint64_t dd, uint64_t d2, uint64_t d3, uint64_t *res) -{ - return cache_get(dd | opid, d2, d3, res); -} - -/** - * dd/dd2/dd3/dd4 must be MTBDDs - */ -static inline int __attribute__((unused)) -cache_get4(uint64_t opid, uint64_t dd, uint64_t dd2, uint64_t dd3, uint64_t dd4, uint64_t *res) -{ - uint64_t p2 = dd2 | ((dd4 & 0x00000000000fffff) << 40); // 20 bits and complement bit - if (dd4 & 0x8000000000000000) p2 |= 0x4000000000000000; - uint64_t p3 = dd3 | ((dd4 & 0x000000fffff00000) << 20); // 20 bits - - return cache_get3(opid, dd, p2, p3, res); -} - -/** - * dd must be MTBDD, d2/d3 can be anything - */ -static inline int __attribute__((unused)) -cache_put3(uint64_t opid, uint64_t dd, uint64_t d2, uint64_t d3, uint64_t res) -{ - return cache_put(dd | opid, d2, d3, res); -} - -/** - * dd/dd2/dd3/dd4 must be MTBDDs - */ -static inline int __attribute__((unused)) -cache_put4(uint64_t opid, uint64_t dd, uint64_t dd2, uint64_t dd3, uint64_t dd4, uint64_t res) -{ - uint64_t p2 = dd2 | ((dd4 & 0x00000000000fffff) << 40); // 20 bits and complement bit - if (dd4 & 0x8000000000000000) p2 |= 0x4000000000000000; - uint64_t p3 = dd3 | ((dd4 & 0x000000fffff00000) << 20); // 20 bits - - return cache_put3(opid, dd, p2, p3, res); -} -/** - * Functions for Sylvan for cache management - */ - -void cache_create(size_t _cache_size, size_t _max_size); - -void cache_free(); - -void cache_clear(); - -void cache_setsize(size_t size); - -size_t cache_getused(); - -size_t cache_getsize(); - -size_t cache_getmaxsize(); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_common.c b/resources/3rdparty/sylvan/src/sylvan_common.c deleted file mode 100644 index aa0374a7b..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_common.c +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include - -#ifndef cas -#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) -#endif - -/** - * Static global variables - */ - -llmsset_t nodes; - -/** - * Retrieve nodes - */ - -llmsset_t -__sylvan_get_internal_data() -{ - return nodes; -} - -/** - * Calculate table usage (in parallel) - */ -VOID_TASK_IMPL_2(sylvan_table_usage, size_t*, filled, size_t*, total) -{ - size_t tot = llmsset_get_size(nodes); - if (filled != NULL) *filled = llmsset_count_marked(nodes); - if (total != NULL) *total = tot; -} - -/** - * Implementation of garbage collection - */ -static int gc_enabled = 1; -static volatile int gc; // variable used in cas switch to ensure only one gc at a time - -struct reg_gc_mark_entry -{ - struct reg_gc_mark_entry *next; - gc_mark_cb cb; - int order; -}; - -static struct reg_gc_mark_entry *gc_mark_register = NULL; - -void -sylvan_gc_add_mark(int order, gc_mark_cb cb) -{ - struct reg_gc_mark_entry *e = (struct reg_gc_mark_entry*)malloc(sizeof(struct reg_gc_mark_entry)); - e->cb = cb; - e->order = order; - if (gc_mark_register == NULL || gc_mark_register->order>order) { - e->next = gc_mark_register; - gc_mark_register = e; - return; - } - struct reg_gc_mark_entry *f = gc_mark_register; - for (;;) { - if (f->next == NULL) { - e->next = NULL; - f->next = e; - return; - } - if (f->next->order > order) { - e->next = f->next; - f->next = e; - return; - } - f = f->next; - } -} - -static gc_hook_cb gc_hook; - -void -sylvan_gc_set_hook(gc_hook_cb new_hook) -{ - gc_hook = new_hook; -} - -void -sylvan_gc_enable() -{ - gc_enabled = 1; -} - -void -sylvan_gc_disable() -{ - gc_enabled = 0; -} - -/* Mark hook for cache */ -VOID_TASK_0(sylvan_gc_mark_cache) -{ - /* We simply clear the cache. - * Alternatively, we could implement for example some strategy - * where part of the cache is cleared and part is marked - */ - cache_clear(); -} - -/* Default hook */ - -size_t -next_size(size_t n) -{ -#if SYLVAN_SIZE_FIBONACCI - size_t f1=1, f2=1; - for (;;) { - f2 += f1; - if (f2 > n) return f2; - f1 += f2; - if (f1 > n) return f1; - } -#else - return n*2; -#endif -} - -VOID_TASK_IMPL_0(sylvan_gc_aggressive_resize) -{ - /** - * Always resize when gc called - */ - size_t max_size = llmsset_get_max_size(nodes); - size_t size = llmsset_get_size(nodes); - if (size < max_size) { - size_t new_size = next_size(size); - if (new_size > max_size) new_size = max_size; - llmsset_set_size(nodes, new_size); - size_t cache_size = cache_getsize(); - size_t cache_max = cache_getmaxsize(); - if (cache_size < cache_max) { - new_size = next_size(cache_size); - if (new_size > cache_max) new_size = cache_max; - cache_setsize(new_size); - } - } -} - -VOID_TASK_IMPL_0(sylvan_gc_default_hook) -{ - /** - * Default behavior: - * if we can resize the nodes set, and if we use more than 50%, then increase size - */ - size_t max_size = llmsset_get_max_size(nodes); - size_t size = llmsset_get_size(nodes); - if (size < max_size) { - size_t marked = llmsset_count_marked(nodes); - if (marked*2 > size) { - size_t new_size = next_size(size); - if (new_size > max_size) new_size = max_size; - llmsset_set_size(nodes, new_size); - size_t cache_size = cache_getsize(); - size_t cache_max = cache_getmaxsize(); - if (cache_size < cache_max) { - new_size = next_size(cache_size); - if (new_size > cache_max) new_size = cache_max; - cache_setsize(new_size); - } - } - } -} - -VOID_TASK_0(sylvan_gc_call_hook) -{ - // call hook function (resizing, reordering, etc) - WRAP(gc_hook); -} - -VOID_TASK_0(sylvan_gc_rehash) -{ - // rehash marked nodes - llmsset_rehash(nodes); -} - -VOID_TASK_0(sylvan_gc_destroy_unmarked) -{ - llmsset_destroy_unmarked(nodes); -} - -VOID_TASK_0(sylvan_gc_go) -{ - sylvan_stats_count(SYLVAN_GC_COUNT); - sylvan_timer_start(SYLVAN_GC); - - // clear hash array - llmsset_clear(nodes); - - // call mark functions, hook and rehash - struct reg_gc_mark_entry *e = gc_mark_register; - while (e != NULL) { - WRAP(e->cb); - e = e->next; - } - - sylvan_timer_stop(SYLVAN_GC); -} - -/* Perform garbage collection */ -VOID_TASK_IMPL_0(sylvan_gc) -{ - if (!gc_enabled) return; - if (cas(&gc, 0, 1)) { - NEWFRAME(sylvan_gc_go); - gc = 0; - } else { - /* wait for new frame to appear */ - while (*(Task* volatile*)&(lace_newframe.t) == 0) {} - lace_yield(__lace_worker, __lace_dq_head); - } -} - -/** - * Package init and quit functions - */ -void -sylvan_init_package(size_t tablesize, size_t maxsize, size_t cachesize, size_t max_cachesize) -{ - if (tablesize > maxsize) tablesize = maxsize; - if (cachesize > max_cachesize) cachesize = max_cachesize; - - if (maxsize > 0x000003ffffffffff) { - fprintf(stderr, "sylvan_init_package error: tablesize must be <= 42 bits!\n"); - exit(1); - } - - nodes = llmsset_create(tablesize, maxsize); - cache_create(cachesize, max_cachesize); - - gc = 0; -#if SYLVAN_AGGRESSIVE_RESIZE - gc_hook = TASK(sylvan_gc_aggressive_resize); -#else - gc_hook = TASK(sylvan_gc_default_hook); -#endif - sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_cache)); - sylvan_gc_add_mark(19, TASK(sylvan_gc_destroy_unmarked)); - sylvan_gc_add_mark(20, TASK(sylvan_gc_call_hook)); - sylvan_gc_add_mark(30, TASK(sylvan_gc_rehash)); - - LACE_ME; - sylvan_stats_init(); -} - -struct reg_quit_entry -{ - struct reg_quit_entry *next; - quit_cb cb; -}; - -static struct reg_quit_entry *quit_register = NULL; - -void -sylvan_register_quit(quit_cb cb) -{ - struct reg_quit_entry *e = (struct reg_quit_entry*)malloc(sizeof(struct reg_quit_entry)); - e->next = quit_register; - e->cb = cb; - quit_register = e; -} - -void -sylvan_quit() -{ - while (quit_register != NULL) { - struct reg_quit_entry *e = quit_register; - quit_register = e->next; - e->cb(); - free(e); - } - - while (gc_mark_register != NULL) { - struct reg_gc_mark_entry *e = gc_mark_register; - gc_mark_register = e->next; - free(e); - } - - cache_free(); - llmsset_free(nodes); -} diff --git a/resources/3rdparty/sylvan/src/sylvan_common.h b/resources/3rdparty/sylvan/src/sylvan_common.h deleted file mode 100644 index 7f512a904..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_common.h +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef SYLVAN_COMMON_H -#define SYLVAN_COMMON_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* Garbage collection test task - t */ -#define sylvan_gc_test() YIELD_NEWFRAME() - -// BDD operations -#define CACHE_BDD_ITE (0LL<<40) -#define CACHE_BDD_AND (1LL<<40) -#define CACHE_BDD_XOR (2LL<<40) -#define CACHE_BDD_EXISTS (3LL<<40) -#define CACHE_BDD_AND_EXISTS (4LL<<40) -#define CACHE_BDD_RELNEXT (5LL<<40) -#define CACHE_BDD_RELPREV (6LL<<40) -#define CACHE_BDD_SATCOUNT (7LL<<40) -#define CACHE_BDD_COMPOSE (8LL<<40) -#define CACHE_BDD_RESTRICT (9LL<<40) -#define CACHE_BDD_CONSTRAIN (10LL<<40) -#define CACHE_BDD_CLOSURE (11LL<<40) -#define CACHE_BDD_ISBDD (12LL<<40) -#define CACHE_BDD_SUPPORT (13LL<<40) -#define CACHE_BDD_PATHCOUNT (14LL<<40) - -// MDD operations -#define CACHE_MDD_RELPROD (20LL<<40) -#define CACHE_MDD_MINUS (21LL<<40) -#define CACHE_MDD_UNION (22LL<<40) -#define CACHE_MDD_INTERSECT (23LL<<40) -#define CACHE_MDD_PROJECT (24LL<<40) -#define CACHE_MDD_JOIN (25LL<<40) -#define CACHE_MDD_MATCH (26LL<<40) -#define CACHE_MDD_RELPREV (27LL<<40) -#define CACHE_MDD_SATCOUNT (28LL<<40) -#define CACHE_MDD_SATCOUNTL1 (29LL<<40) -#define CACHE_MDD_SATCOUNTL2 (30LL<<40) - -// MTBDD operations -#define CACHE_MTBDD_APPLY (40LL<<40) -#define CACHE_MTBDD_UAPPLY (41LL<<40) -#define CACHE_MTBDD_ABSTRACT (42LL<<40) -#define CACHE_MTBDD_ITE (43LL<<40) -#define CACHE_MTBDD_AND_EXISTS (44LL<<40) -#define CACHE_MTBDD_SUPPORT (45LL<<40) -#define CACHE_MTBDD_COMPOSE (46LL<<40) -#define CACHE_MTBDD_EQUAL_NORM (47LL<<40) -#define CACHE_MTBDD_EQUAL_NORM_REL (48LL<<40) -#define CACHE_MTBDD_MINIMUM (49LL<<40) -#define CACHE_MTBDD_MAXIMUM (50LL<<40) -#define CACHE_MTBDD_LEQ (51LL<<40) -#define CACHE_MTBDD_LESS (52LL<<40) -#define CACHE_MTBDD_GEQ (53LL<<40) -#define CACHE_MTBDD_GREATER (54LL<<40) -#define CACHE_MTBDD_NONZERO_COUNT (55LL<<40) - -/** - * Registration of quit functions - */ -typedef void (*quit_cb)(); -void sylvan_register_quit(quit_cb cb); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_config.h b/resources/3rdparty/sylvan/src/sylvan_config.h deleted file mode 100644 index cb234227d..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_config.h +++ /dev/null @@ -1,30 +0,0 @@ -/* Operation cache: use bitmasks for module (size must be power of 2!) */ -#ifndef CACHE_MASK -#define CACHE_MASK 1 -#endif - -/* Nodes table: use bitmasks for module (size must be power of 2!) */ -#ifndef LLMSSET_MASK -#define LLMSSET_MASK 1 -#endif - -/** - * Use Fibonacci sequence as resizing strategy. - * This MAY result in more conservative memory consumption, but is not - * great for performance. - * By default, powers of 2 should be used. - * If you set this, then set CACHE_MASK and LLMSSET_MASK to 0. - */ -#ifndef SYLVAN_SIZE_FIBONACCI -#define SYLVAN_SIZE_FIBONACCI 0 -#endif - -/* Enable/disable counters and timers */ -#ifndef SYLVAN_STATS -#define SYLVAN_STATS 0 -#endif - -/* Aggressive or conservative resizing strategy */ -#ifndef SYLVAN_AGGRESSIVE_RESIZE -#define SYLVAN_AGGRESSIVE_RESIZE 1 -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_gmp.c b/resources/3rdparty/sylvan/src/sylvan_gmp.c deleted file mode 100644 index 0437b1be8..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_gmp.c +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - - -/** - * helper function for hash - */ -#ifndef rotl64 -static inline uint64_t -rotl64(uint64_t x, int8_t r) -{ - return ((x<>(64-r))); -} -#endif - -static uint64_t -gmp_hash(const uint64_t v, const uint64_t seed) -{ - /* Hash the mpq in pointer v - * A simpler way would be to hash the result of mpq_get_d. - * We just hash on the contents of the memory */ - - mpq_ptr x = (mpq_ptr)(size_t)v; - - const uint64_t prime = 1099511628211; - uint64_t hash = seed; - mp_limb_t *limbs; - - // hash "numerator" limbs - limbs = x[0]._mp_num._mp_d; - for (int i=0; i> 32); -} - -static int -gmp_equals(const uint64_t left, const uint64_t right) -{ - /* This function is called by the unique table when comparing a new - leaf with an existing leaf */ - mpq_ptr x = (mpq_ptr)(size_t)left; - mpq_ptr y = (mpq_ptr)(size_t)right; - - /* Just compare x and y */ - return mpq_equal(x, y) ? 1 : 0; -} - -static void -gmp_create(uint64_t *val) -{ - /* This function is called by the unique table when a leaf does not yet exist. - We make a copy, which will be stored in the hash table. */ - mpq_ptr x = (mpq_ptr)malloc(sizeof(__mpq_struct)); - mpq_init(x); - mpq_set(x, *(mpq_ptr*)val); - *(mpq_ptr*)val = x; -} - -static void -gmp_destroy(uint64_t val) -{ - /* This function is called by the unique table - when a leaf is removed during garbage collection. */ - mpq_clear((mpq_ptr)val); - free((void*)val); -} - -static uint32_t gmp_type; -static uint64_t CACHE_GMP_AND_EXISTS; - -/** - * Initialize gmp custom leaves - */ -void -gmp_init() -{ - /* Register custom leaf 3 */ - gmp_type = mtbdd_register_custom_leaf(gmp_hash, gmp_equals, gmp_create, gmp_destroy); - CACHE_GMP_AND_EXISTS = cache_next_opid(); -} - -/** - * Create GMP mpq leaf - */ -MTBDD -mtbdd_gmp(mpq_t val) -{ - mpq_canonicalize(val); - return mtbdd_makeleaf(gmp_type, (size_t)val); -} - -/** - * Operation "plus" for two mpq MTBDDs - * Interpret partial function as "0" - */ -TASK_IMPL_2(MTBDD, gmp_op_plus, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions */ - if (a == mtbdd_false) return b; - if (b == mtbdd_false) return a; - - /* If both leaves, compute plus */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - - mpq_t mres; - mpq_init(mres); - mpq_add(mres, ma, mb); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - /* Commutative, so swap a,b for better cache performance */ - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Operation "minus" for two mpq MTBDDs - * Interpret partial function as "0" - */ -TASK_IMPL_2(MTBDD, gmp_op_minus, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions */ - if (a == mtbdd_false) return gmp_neg(b); - if (b == mtbdd_false) return a; - - /* If both leaves, compute plus */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - - mpq_t mres; - mpq_init(mres); - mpq_sub(mres, ma, mb); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - return mtbdd_invalid; -} - -/** - * Operation "times" for two mpq MTBDDs. - * One of the parameters can be a BDD, then it is interpreted as a filter. - * For partial functions, domain is intersection - */ -TASK_IMPL_2(MTBDD, gmp_op_times, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions and for Boolean (filter) */ - if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; - - /* If one of Boolean, interpret as filter */ - if (a == mtbdd_true) return b; - if (b == mtbdd_true) return a; - - /* Handle multiplication of leaves */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - - // compute result - mpq_t mres; - mpq_init(mres); - mpq_mul(mres, ma, mb); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - /* Commutative, so make "a" the lowest for better cache performance */ - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Operation "divide" for two mpq MTBDDs. - * For partial functions, domain is intersection - */ -TASK_IMPL_2(MTBDD, gmp_op_divide, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions */ - if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; - - /* Handle division of leaves */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - - // compute result - mpq_t mres; - mpq_init(mres); - mpq_div(mres, ma, mb); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - return mtbdd_invalid; -} - -/** - * Operation "min" for two mpq MTBDDs. - */ -TASK_IMPL_2(MTBDD, gmp_op_min, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Handle partial functions */ - if (a == mtbdd_false) return b; - if (b == mtbdd_false) return a; - - /* Handle trivial case */ - if (a == b) return a; - - /* Compute result for leaves */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - int cmp = mpq_cmp(ma, mb); - return cmp < 0 ? a : b; - } - - /* For cache performance */ - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Operation "max" for two mpq MTBDDs. - */ -TASK_IMPL_2(MTBDD, gmp_op_max, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Handle partial functions */ - if (a == mtbdd_false) return b; - if (b == mtbdd_false) return a; - - /* Handle trivial case */ - if (a == b) return a; - - /* Compute result for leaves */ - if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - int cmp = mpq_cmp(ma, mb); - return cmp > 0 ? a : b; - } - - /* For cache performance */ - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Operation "neg" for one mpq MTBDD - */ -TASK_IMPL_2(MTBDD, gmp_op_neg, MTBDD, dd, size_t, p) -{ - /* Handle partial functions */ - if (dd == mtbdd_false) return mtbdd_false; - - /* Compute result for leaf */ - if (mtbdd_isleaf(dd)) { - mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd); - - mpq_t mres; - mpq_init(mres); - mpq_neg(mres, m); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - return mtbdd_invalid; - (void)p; -} - -/** - * Operation "abs" for one mpq MTBDD - */ -TASK_IMPL_2(MTBDD, gmp_op_abs, MTBDD, dd, size_t, p) -{ - /* Handle partial functions */ - if (dd == mtbdd_false) return mtbdd_false; - - /* Compute result for leaf */ - if (mtbdd_isleaf(dd)) { - mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd); - - mpq_t mres; - mpq_init(mres); - mpq_abs(mres, m); - MTBDD res = mtbdd_gmp(mres); - mpq_clear(mres); - return res; - } - - return mtbdd_invalid; - (void)p; -} - -/** - * The abstraction operators are called in either of two ways: - * - with k=0, then just calculate "a op b" - * - with k<>0, then just calculate "a := a op a", k times - */ - -TASK_IMPL_3(MTBDD, gmp_abstract_op_plus, MTBDD, a, MTBDD, b, int, k) -{ - if (k==0) { - return mtbdd_apply(a, b, TASK(gmp_op_plus)); - } else { - MTBDD res = a; - for (int i=0; i= value (double) to True, or False otherwise. - */ -TASK_2(MTBDD, gmp_op_threshold_d, MTBDD, a, size_t, svalue) -{ - /* Handle partial function */ - if (a == mtbdd_false) return mtbdd_false; - - /* Compute result */ - if (mtbdd_isleaf(a)) { - double value = *(double*)&svalue; - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - return mpq_get_d(ma) >= value ? mtbdd_true : mtbdd_false; - } - - return mtbdd_invalid; -} - -/** - * Convert to Boolean MTBDD, terminals > value (double) to True, or False otherwise. - */ -TASK_2(MTBDD, gmp_op_strict_threshold_d, MTBDD, a, size_t, svalue) -{ - /* Handle partial function */ - if (a == mtbdd_false) return mtbdd_false; - - /* Compute result */ - if (mtbdd_isleaf(a)) { - double value = *(double*)&svalue; - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - return mpq_get_d(ma) > value ? mtbdd_true : mtbdd_false; - } - - return mtbdd_invalid; -} - -TASK_IMPL_2(MTBDD, gmp_threshold_d, MTBDD, dd, double, d) -{ - return mtbdd_uapply(dd, TASK(gmp_op_threshold_d), *(size_t*)&d); -} - -TASK_IMPL_2(MTBDD, gmp_strict_threshold_d, MTBDD, dd, double, d) -{ - return mtbdd_uapply(dd, TASK(gmp_op_strict_threshold_d), *(size_t*)&d); -} - -/** - * Operation "threshold" for mpq MTBDDs. - * The second parameter must be a mpq leaf. - */ -TASK_IMPL_2(MTBDD, gmp_op_threshold, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions */ - if (a == mtbdd_false) return mtbdd_false; - - /* Handle comparison of leaves */ - if (mtbdd_isleaf(a)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - int cmp = mpq_cmp(ma, mb); - return cmp >= 0 ? mtbdd_true : mtbdd_false; - } - - return mtbdd_invalid; -} - -/** - * Operation "strict threshold" for mpq MTBDDs. - * The second parameter must be a mpq leaf. - */ -TASK_IMPL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - /* Check for partial functions */ - if (a == mtbdd_false) return mtbdd_false; - - /* Handle comparison of leaves */ - if (mtbdd_isleaf(a)) { - mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); - mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); - int cmp = mpq_cmp(ma, mb); - return cmp > 0 ? mtbdd_true : mtbdd_false; - } - - return mtbdd_invalid; -} - -/** - * Multiply and , and abstract variables using summation. - * This is similar to the "and_exists" operation in BDDs. - */ -TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v) -{ - /* Check terminal cases */ - - /* If v == true, then is an empty set */ - if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(gmp_op_times)); - - /* Try the times operator on a and b */ - MTBDD result = CALL(gmp_op_times, &a, &b); - if (result != mtbdd_invalid) { - /* Times operator successful, store reference (for garbage collection) */ - mtbdd_refs_push(result); - /* ... and perform abstraction */ - result = mtbdd_abstract(result, v, TASK(gmp_abstract_op_plus)); - mtbdd_refs_pop(1); - /* Note that the operation cache is used in mtbdd_abstract */ - return result; - } - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache. Note that we do this now, since the times operator might swap a and b (commutative) */ - if (cache_get3(CACHE_GMP_AND_EXISTS, a, b, v, &result)) return result; - - /* Now, v is not a constant, and either a or b is not a constant */ - - /* Get top variable */ - int la = mtbdd_isleaf(a); - int lb = mtbdd_isleaf(b); - mtbddnode_t na = la ? 0 : GETNODE(a); - mtbddnode_t nb = lb ? 0 : GETNODE(b); - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - mtbddnode_t nv = GETNODE(v); - uint32_t vv = mtbddnode_getvariable(nv); - - if (vv < var) { - /* Recursive, then abstract result */ - result = CALL(gmp_and_exists, a, b, node_gethigh(v, nv)); - mtbdd_refs_push(result); - result = mtbdd_apply(result, result, TASK(gmp_op_plus)); - mtbdd_refs_pop(1); - } else { - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = (!la && va == var) ? node_getlow(a, na) : a; - ahigh = (!la && va == var) ? node_gethigh(a, na) : a; - blow = (!lb && vb == var) ? node_getlow(b, nb) : b; - bhigh = (!lb && vb == var) ? node_gethigh(b, nb) : b; - - if (vv == var) { - /* Recursive, then abstract result */ - mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, node_gethigh(v, nv))); - MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, node_gethigh(v, nv))); - MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(gmp_and_exists))); - result = CALL(mtbdd_apply, low, high, TASK(gmp_op_plus)); - mtbdd_refs_pop(2); - } else /* vv > v */ { - /* Recursive, then create node */ - mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, v)); - MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, v)); - MTBDD high = mtbdd_refs_sync(SYNC(gmp_and_exists)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(var, low, high); - } - } - - /* Store in cache */ - cache_put3(CACHE_GMP_AND_EXISTS, a, b, v, result); - return result; -} diff --git a/resources/3rdparty/sylvan/src/sylvan_gmp.h b/resources/3rdparty/sylvan/src/sylvan_gmp.h deleted file mode 100644 index 9650c56d3..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_gmp.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This is an implementation of GMP mpq custom leaves of MTBDDs - */ - -#ifndef SYLVAN_GMP_H -#define SYLVAN_GMP_H - -#include "sylvan.h" -#include - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * Initialize GMP custom leaves - */ -void gmp_init(); - -/** - * Create MPQ leaf - */ -MTBDD mtbdd_gmp(mpq_t val); - -/** - * Operation "plus" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_plus, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, gmp_abstract_op_plus, MTBDD, MTBDD, int); - -/** - * Operation "minus" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_minus, MTBDD*, MTBDD*); - -/** - * Operation "times" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_times, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, gmp_abstract_op_times, MTBDD, MTBDD, int); - -/** - * Operation "divide" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_divide, MTBDD*, MTBDD*); - -/** - * Operation "min" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_min, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, gmp_abstract_op_min, MTBDD, MTBDD, int); - -/** - * Operation "max" for two mpq MTBDDs - */ -TASK_DECL_2(MTBDD, gmp_op_max, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, gmp_abstract_op_max, MTBDD, MTBDD, int); - -/** - * Operation "negate" for one mpq MTBDD - */ -TASK_DECL_2(MTBDD, gmp_op_neg, MTBDD, size_t); - -/** - * Operation "abs" for one mpq MTBDD - */ -TASK_DECL_2(MTBDD, gmp_op_abs, MTBDD, size_t); - -/** - * Compute a + b - */ -#define gmp_plus(a, b) mtbdd_apply(a, b, TASK(gmp_op_plus)) - -/** - * Compute a + b - */ -#define gmp_minus(a, b) mtbdd_apply(a, b, TASK(gmp_op_minus)) - -/** - * Compute a * b - */ -#define gmp_times(a, b) mtbdd_apply(a, b, TASK(gmp_op_times)) - -/** - * Compute a * b - */ -#define gmp_divide(a, b) mtbdd_apply(a, b, TASK(gmp_op_divide)) - -/** - * Compute min(a, b) - */ -#define gmp_min(a, b) mtbdd_apply(a, b, TASK(gmp_op_min)) - -/** - * Compute max(a, b) - */ -#define gmp_max(a, b) mtbdd_apply(a, b, TASK(gmp_op_max)) - -/** - * Compute -a - */ -#define gmp_neg(a) mtbdd_uapply(a, TASK(gmp_op_neg), 0); - -/** - * Compute abs(a) - */ -#define gmp_abs(a) mtbdd_uapply(a, TASK(gmp_op_abs), 0); - -/** - * Abstract the variables in from by taking the sum of all values - */ -#define gmp_abstract_plus(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_plus)) - -/** - * Abstract the variables in from by taking the product of all values - */ -#define gmp_abstract_times(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_times)) - -/** - * Abstract the variables in from by taking the minimum of all values - */ -#define gmp_abstract_min(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_min)) - -/** - * Abstract the variables in from by taking the maximum of all values - */ -#define gmp_abstract_max(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_max)) - -/** - * Multiply and , and abstract variables using summation. - * This is similar to the "and_exists" operation in BDDs. - */ -TASK_DECL_3(MTBDD, gmp_and_exists, MTBDD, MTBDD, MTBDD); -#define gmp_and_exists(a, b, vars) CALL(gmp_and_exists, a, b, vars) - -/** - * Convert to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; - * Parameter
    is the MTBDD to convert; parameter is an GMP mpq leaf - */ -TASK_DECL_2(MTBDD, gmp_op_threshold, MTBDD*, MTBDD*); -#define gmp_threshold(dd, value) mtbdd_apply(dd, value, TASK(gmp_op_threshold)); - -/** - * Convert to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; - * Parameter
    is the MTBDD to convert; parameter is an GMP mpq leaf - */ -TASK_DECL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, MTBDD*); -#define gmp_strict_threshold(dd, value) mtbdd_apply(dd, value, TASK(gmp_op_strict_threshold)); - -/** - * Convert to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, gmp_threshold_d, MTBDD, double); -#define gmp_threshold_d(dd, value) CALL(gmp_threshold_d, dd, value) - -/** - * Convert to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, gmp_strict_threshold_d, MTBDD, double); -#define gmp_strict_threshold_d(dd, value) CALL(gmp_strict_threshold_d, dd, value) - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_ldd.c b/resources/3rdparty/sylvan/src/sylvan_ldd.c deleted file mode 100644 index 2bd10c5ba..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_ldd.c +++ /dev/null @@ -1,2558 +0,0 @@ -/* - * Copyright 2011-2014 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/** - * MDD node structure - */ -typedef struct __attribute__((packed)) mddnode { - uint64_t a, b; -} * mddnode_t; // 16 bytes - -// RmRR RRRR RRRR VVVV | VVVV DcDD DDDD DDDD (little endian - in memory) -// VVVV RRRR RRRR RRRm | DDDD DDDD DDDc VVVV (big endian) - -// Ensure our mddnode is 16 bytes -typedef char __lddmc_check_mddnode_t_is_16_bytes[(sizeof(struct mddnode)==16) ? 1 : -1]; - -inline uint32_t -mddnode_getvalue(mddnode_t n) -{ - return *(uint32_t*)((uint8_t*)n+6); -} - -inline uint8_t -mddnode_getmark(mddnode_t n) -{ - return n->a & 1; -} - -inline uint8_t -mddnode_getcopy(mddnode_t n) -{ - return n->b & 0x10000 ? 1 : 0; -} - -inline uint64_t -mddnode_getright(mddnode_t n) -{ - return (n->a & 0x0000ffffffffffff) >> 1; -} - -inline uint64_t -mddnode_getdown(mddnode_t n) -{ - return n->b >> 17; -} - -inline void -mddnode_setvalue(mddnode_t n, uint32_t value) -{ - *(uint32_t*)((uint8_t*)n+6) = value; -} - -inline void -mddnode_setmark(mddnode_t n, uint8_t mark) -{ - n->a = (n->a & 0xfffffffffffffffe) | (mark ? 1 : 0); -} - -inline void -mddnode_setright(mddnode_t n, uint64_t right) -{ - n->a = (n->a & 0xffff000000000001) | (right << 1); -} - -inline void -mddnode_setdown(mddnode_t n, uint64_t down) -{ - n->b = (n->b & 0x000000000001ffff) | (down << 16); -} - -inline void -mddnode_make(mddnode_t n, uint32_t value, uint64_t right, uint64_t down) -{ - n->a = right << 1; - n->b = down << 17; - *(uint32_t*)((uint8_t*)n+6) = value; -} - -inline void -mddnode_makecopy(mddnode_t n, uint64_t right, uint64_t down) -{ - n->a = right << 1; - n->b = ((down << 1) | 1) << 16; -} - -#define GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd)) - -/** - * Implementation of garbage collection - */ - -/* Recursively mark MDD nodes as 'in use' */ -VOID_TASK_IMPL_1(lddmc_gc_mark_rec, MDD, mdd) -{ - if (mdd <= lddmc_true) return; - - if (llmsset_mark(nodes, mdd)) { - mddnode_t n = GETNODE(mdd); - SPAWN(lddmc_gc_mark_rec, mddnode_getright(n)); - CALL(lddmc_gc_mark_rec, mddnode_getdown(n)); - SYNC(lddmc_gc_mark_rec); - } -} - -/** - * External references - */ - -refs_table_t mdd_refs; - -MDD -lddmc_ref(MDD a) -{ - if (a == lddmc_true || a == lddmc_false) return a; - refs_up(&mdd_refs, a); - return a; -} - -void -lddmc_deref(MDD a) -{ - if (a == lddmc_true || a == lddmc_false) return; - refs_down(&mdd_refs, a); -} - -size_t -lddmc_count_refs() -{ - return refs_count(&mdd_refs); -} - -/* Called during garbage collection */ -VOID_TASK_0(lddmc_gc_mark_external_refs) -{ - // iterate through refs hash table, mark all found - size_t count=0; - uint64_t *it = refs_iter(&mdd_refs, 0, mdd_refs.refs_size); - while (it != NULL) { - SPAWN(lddmc_gc_mark_rec, refs_next(&mdd_refs, &it, mdd_refs.refs_size)); - count++; - } - while (count--) { - SYNC(lddmc_gc_mark_rec); - } -} - -/* Infrastructure for internal markings */ -DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - -VOID_TASK_0(lddmc_refs_mark_task) -{ - LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - size_t i, j=0; - for (i=0; ir_count; i++) { - if (j >= 40) { - while (j--) SYNC(lddmc_gc_mark_rec); - j=0; - } - SPAWN(lddmc_gc_mark_rec, lddmc_refs_key->results[i]); - j++; - } - for (i=0; is_count; i++) { - Task *t = lddmc_refs_key->spawns[i]; - if (!TASK_IS_STOLEN(t)) break; - if (TASK_IS_COMPLETED(t)) { - if (j >= 40) { - while (j--) SYNC(lddmc_gc_mark_rec); - j=0; - } - SPAWN(lddmc_gc_mark_rec, *(BDD*)TASK_RESULT(t)); - j++; - } - } - while (j--) SYNC(lddmc_gc_mark_rec); -} - -VOID_TASK_0(lddmc_refs_mark) -{ - TOGETHER(lddmc_refs_mark_task); -} - -VOID_TASK_0(lddmc_refs_init_task) -{ - lddmc_refs_internal_t s = (lddmc_refs_internal_t)malloc(sizeof(struct lddmc_refs_internal)); - s->r_size = 128; - s->r_count = 0; - s->s_size = 128; - s->s_count = 0; - s->results = (BDD*)malloc(sizeof(BDD) * 128); - s->spawns = (Task**)malloc(sizeof(Task*) * 128); - SET_THREAD_LOCAL(lddmc_refs_key, s); -} - -VOID_TASK_0(lddmc_refs_init) -{ - INIT_THREAD_LOCAL(lddmc_refs_key); - TOGETHER(lddmc_refs_init_task); - sylvan_gc_add_mark(10, TASK(lddmc_refs_mark)); -} - -/** - * Initialize and quit functions - */ - -static void -lddmc_quit() -{ - refs_free(&mdd_refs); -} - -void -sylvan_init_ldd() -{ - sylvan_register_quit(lddmc_quit); - sylvan_gc_add_mark(10, TASK(lddmc_gc_mark_external_refs)); - - // Sanity check - if (sizeof(struct mddnode) != 16) { - fprintf(stderr, "Invalid size of mdd nodes: %ld\n", sizeof(struct mddnode)); - exit(1); - } - - refs_create(&mdd_refs, 1024); - - LACE_ME; - CALL(lddmc_refs_init); -} - -/** - * Primitives - */ - -MDD -lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq) -{ - if (ifeq == lddmc_false) return ifneq; - - // check if correct (should be false, or next in value) - assert(ifneq != lddmc_true); - if (ifneq != lddmc_false) assert(value < mddnode_getvalue(GETNODE(ifneq))); - - struct mddnode n; - mddnode_make(&n, value, ifneq, ifeq); - - int created; - uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - lddmc_refs_push(ifeq); - lddmc_refs_push(ifneq); - LACE_ME; - sylvan_gc(); - lddmc_refs_pop(1); - - index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); - exit(1); - } - } - - if (created) sylvan_stats_count(LDD_NODES_CREATED); - else sylvan_stats_count(LDD_NODES_REUSED); - - return (MDD)index; -} - -MDD -lddmc_make_copynode(MDD ifeq, MDD ifneq) -{ - struct mddnode n; - mddnode_makecopy(&n, ifneq, ifeq); - - int created; - uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - lddmc_refs_push(ifeq); - lddmc_refs_push(ifneq); - LACE_ME; - sylvan_gc(); - lddmc_refs_pop(1); - - index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); - exit(1); - } - } - - if (created) sylvan_stats_count(LDD_NODES_CREATED); - else sylvan_stats_count(LDD_NODES_REUSED); - - return (MDD)index; -} - -MDD -lddmc_extendnode(MDD mdd, uint32_t value, MDD ifeq) -{ - if (mdd <= lddmc_true) return lddmc_makenode(value, ifeq, mdd); - - mddnode_t n = GETNODE(mdd); - if (mddnode_getcopy(n)) return lddmc_make_copynode(mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq)); - uint32_t n_value = mddnode_getvalue(n); - if (n_value < value) return lddmc_makenode(n_value, mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq)); - if (n_value == value) return lddmc_makenode(value, ifeq, mddnode_getright(n)); - /* (n_value > value) */ return lddmc_makenode(value, ifeq, mdd); -} - -uint32_t -lddmc_getvalue(MDD mdd) -{ - return mddnode_getvalue(GETNODE(mdd)); -} - -MDD -lddmc_getdown(MDD mdd) -{ - return mddnode_getdown(GETNODE(mdd)); -} - -MDD -lddmc_getright(MDD mdd) -{ - return mddnode_getright(GETNODE(mdd)); -} - -MDD -lddmc_follow(MDD mdd, uint32_t value) -{ - for (;;) { - if (mdd <= lddmc_true) return mdd; - const mddnode_t n = GETNODE(mdd); - if (!mddnode_getcopy(n)) { - const uint32_t v = mddnode_getvalue(n); - if (v == value) return mddnode_getdown(n); - if (v > value) return lddmc_false; - } - mdd = mddnode_getright(n); - } -} - -int -lddmc_iscopy(MDD mdd) -{ - if (mdd <= lddmc_true) return 0; - - mddnode_t n = GETNODE(mdd); - return mddnode_getcopy(n) ? 1 : 0; -} - -MDD -lddmc_followcopy(MDD mdd) -{ - if (mdd <= lddmc_true) return lddmc_false; - - mddnode_t n = GETNODE(mdd); - if (mddnode_getcopy(n)) return mddnode_getdown(n); - else return lddmc_false; -} - -/** - * MDD operations - */ -static inline int -match_ldds(MDD *one, MDD *two) -{ - MDD m1 = *one, m2 = *two; - if (m1 == lddmc_false || m2 == lddmc_false) return 0; - mddnode_t n1 = GETNODE(m1), n2 = GETNODE(m2); - uint32_t v1 = mddnode_getvalue(n1), v2 = mddnode_getvalue(n2); - while (v1 != v2) { - if (v1 < v2) { - m1 = mddnode_getright(n1); - if (m1 == lddmc_false) return 0; - n1 = GETNODE(m1); - v1 = mddnode_getvalue(n1); - } else if (v1 > v2) { - m2 = mddnode_getright(n2); - if (m2 == lddmc_false) return 0; - n2 = GETNODE(m2); - v2 = mddnode_getvalue(n2); - } - } - *one = m1; - *two = m2; - return 1; -} - -TASK_IMPL_2(MDD, lddmc_union, MDD, a, MDD, b) -{ - /* Terminal cases */ - if (a == b) return a; - if (a == lddmc_false) return b; - if (b == lddmc_false) return a; - assert(a != lddmc_true && b != lddmc_true); // expecting same length - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_UNION); - - /* Improve cache behavior */ - if (a < b) { MDD tmp=b; b=a; a=tmp; } - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result)) { - sylvan_stats_count(LDD_UNION_CACHED); - return result; - } - - /* Get nodes */ - mddnode_t na = GETNODE(a); - mddnode_t nb = GETNODE(b); - - const int na_copy = mddnode_getcopy(na) ? 1 : 0; - const int nb_copy = mddnode_getcopy(nb) ? 1 : 0; - const uint32_t na_value = mddnode_getvalue(na); - const uint32_t nb_value = mddnode_getvalue(nb); - - /* Perform recursive calculation */ - if (na_copy && nb_copy) { - lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb))); - MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb)); - lddmc_refs_push(right); - MDD down = lddmc_refs_sync(SYNC(lddmc_union)); - lddmc_refs_pop(1); - result = lddmc_make_copynode(down, right); - } else if (na_copy) { - MDD right = CALL(lddmc_union, mddnode_getright(na), b); - result = lddmc_make_copynode(mddnode_getdown(na), right); - } else if (nb_copy) { - MDD right = CALL(lddmc_union, a, mddnode_getright(nb)); - result = lddmc_make_copynode(mddnode_getdown(nb), right); - } else if (na_value < nb_value) { - MDD right = CALL(lddmc_union, mddnode_getright(na), b); - result = lddmc_makenode(na_value, mddnode_getdown(na), right); - } else if (na_value == nb_value) { - lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb))); - MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb)); - lddmc_refs_push(right); - MDD down = lddmc_refs_sync(SYNC(lddmc_union)); - lddmc_refs_pop(1); - result = lddmc_makenode(na_value, down, right); - } else /* na_value > nb_value */ { - MDD right = CALL(lddmc_union, a, mddnode_getright(nb)); - result = lddmc_makenode(nb_value, mddnode_getdown(nb), right); - } - - /* Write to cache */ - if (cache_put3(CACHE_MDD_UNION, a, b, 0, result)) sylvan_stats_count(LDD_UNION_CACHEDPUT); - - return result; -} - -TASK_IMPL_2(MDD, lddmc_minus, MDD, a, MDD, b) -{ - /* Terminal cases */ - if (a == b) return lddmc_false; - if (a == lddmc_false) return lddmc_false; - if (b == lddmc_false) return a; - assert(b != lddmc_true); - assert(a != lddmc_true); // Universe is unknown!! // Possibly depth issue? - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_MINUS); - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_MINUS, a, b, 0, &result)) { - sylvan_stats_count(LDD_MINUS_CACHED); - return result; - } - - /* Get nodes */ - mddnode_t na = GETNODE(a); - mddnode_t nb = GETNODE(b); - uint32_t na_value = mddnode_getvalue(na); - uint32_t nb_value = mddnode_getvalue(nb); - - /* Perform recursive calculation */ - if (na_value < nb_value) { - MDD right = CALL(lddmc_minus, mddnode_getright(na), b); - result = lddmc_makenode(na_value, mddnode_getdown(na), right); - } else if (na_value == nb_value) { - lddmc_refs_spawn(SPAWN(lddmc_minus, mddnode_getright(na), mddnode_getright(nb))); - MDD down = CALL(lddmc_minus, mddnode_getdown(na), mddnode_getdown(nb)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_minus)); - lddmc_refs_pop(1); - result = lddmc_makenode(na_value, down, right); - } else /* na_value > nb_value */ { - result = CALL(lddmc_minus, a, mddnode_getright(nb)); - } - - /* Write to cache */ - if (cache_put3(CACHE_MDD_MINUS, a, b, 0, result)) sylvan_stats_count(LDD_MINUS_CACHEDPUT); - - return result; -} - -/* result: a plus b; res2: b minus a */ -TASK_IMPL_3(MDD, lddmc_zip, MDD, a, MDD, b, MDD*, res2) -{ - /* Terminal cases */ - if (a == b) { - *res2 = lddmc_false; - return a; - } - if (a == lddmc_false) { - *res2 = b; - return b; - } - if (b == lddmc_false) { - *res2 = lddmc_false; - return a; - } - - assert(a != lddmc_true && b != lddmc_true); // expecting same length - - /* Test gc */ - sylvan_gc_test(); - - /* Maybe not the ideal way */ - sylvan_stats_count(LDD_ZIP); - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result) && - cache_get3(CACHE_MDD_MINUS, b, a, 0, res2)) { - sylvan_stats_count(LDD_ZIP); - return result; - } - - /* Get nodes */ - mddnode_t na = GETNODE(a); - mddnode_t nb = GETNODE(b); - uint32_t na_value = mddnode_getvalue(na); - uint32_t nb_value = mddnode_getvalue(nb); - - /* Perform recursive calculation */ - if (na_value < nb_value) { - MDD right = CALL(lddmc_zip, mddnode_getright(na), b, res2); - result = lddmc_makenode(na_value, mddnode_getdown(na), right); - } else if (na_value == nb_value) { - MDD down2, right2; - lddmc_refs_spawn(SPAWN(lddmc_zip, mddnode_getdown(na), mddnode_getdown(nb), &down2)); - MDD right = CALL(lddmc_zip, mddnode_getright(na), mddnode_getright(nb), &right2); - lddmc_refs_push(right); - lddmc_refs_push(right2); - MDD down = lddmc_refs_sync(SYNC(lddmc_zip)); - lddmc_refs_pop(2); - result = lddmc_makenode(na_value, down, right); - *res2 = lddmc_makenode(na_value, down2, right2); - } else /* na_value > nb_value */ { - MDD right2; - MDD right = CALL(lddmc_zip, a, mddnode_getright(nb), &right2); - result = lddmc_makenode(nb_value, mddnode_getdown(nb), right); - *res2 = lddmc_makenode(nb_value, mddnode_getdown(nb), right2); - } - - /* Write to cache */ - int c1 = cache_put3(CACHE_MDD_UNION, a, b, 0, result); - int c2 = cache_put3(CACHE_MDD_MINUS, b, a, 0, *res2); - if (c1 && c2) sylvan_stats_count(LDD_ZIP_CACHEDPUT); - - return result; -} - -TASK_IMPL_2(MDD, lddmc_intersect, MDD, a, MDD, b) -{ - /* Terminal cases */ - if (a == b) return a; - if (a == lddmc_false || b == lddmc_false) return lddmc_false; - assert(a != lddmc_true && b != lddmc_true); - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_INTERSECT); - - /* Get nodes */ - mddnode_t na = GETNODE(a); - mddnode_t nb = GETNODE(b); - uint32_t na_value = mddnode_getvalue(na); - uint32_t nb_value = mddnode_getvalue(nb); - - /* Skip nodes if possible */ - while (na_value != nb_value) { - if (na_value < nb_value) { - a = mddnode_getright(na); - if (a == lddmc_false) return lddmc_false; - na = GETNODE(a); - na_value = mddnode_getvalue(na); - } - if (nb_value < na_value) { - b = mddnode_getright(nb); - if (b == lddmc_false) return lddmc_false; - nb = GETNODE(b); - nb_value = mddnode_getvalue(nb); - } - } - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_INTERSECT, a, b, 0, &result)) { - sylvan_stats_count(LDD_INTERSECT_CACHED); - return result; - } - - /* Perform recursive calculation */ - lddmc_refs_spawn(SPAWN(lddmc_intersect, mddnode_getright(na), mddnode_getright(nb))); - MDD down = CALL(lddmc_intersect, mddnode_getdown(na), mddnode_getdown(nb)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_intersect)); - lddmc_refs_pop(1); - result = lddmc_makenode(na_value, down, right); - - /* Write to cache */ - if (cache_put3(CACHE_MDD_INTERSECT, a, b, 0, result)) sylvan_stats_count(LDD_INTERSECT_CACHEDPUT); - - return result; -} - -// proj: -1 (rest 0), 0 (no match), 1 (match) -TASK_IMPL_3(MDD, lddmc_match, MDD, a, MDD, b, MDD, proj) -{ - if (a == b) return a; - if (a == lddmc_false || b == lddmc_false) return lddmc_false; - - mddnode_t p_node = GETNODE(proj); - uint32_t p_val = mddnode_getvalue(p_node); - if (p_val == (uint32_t)-1) return a; - - assert(a != lddmc_true); - if (p_val == 1) assert(b != lddmc_true); - - /* Test gc */ - sylvan_gc_test(); - - /* Skip nodes if possible */ - if (p_val == 1) { - if (!match_ldds(&a, &b)) return lddmc_false; - } - - sylvan_stats_count(LDD_MATCH); - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_MATCH, a, b, proj, &result)) { - sylvan_stats_count(LDD_MATCH_CACHED); - return result; - } - - /* Perform recursive calculation */ - mddnode_t na = GETNODE(a); - MDD down; - if (p_val == 1) { - mddnode_t nb = GETNODE(b); - /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), mddnode_getright(nb), proj)); - down = CALL(lddmc_match, mddnode_getdown(na), mddnode_getdown(nb), mddnode_getdown(p_node)); - } else { - /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), b, proj)); - down = CALL(lddmc_match, mddnode_getdown(na), b, mddnode_getdown(p_node)); - } - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_match)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(na), down, right); - - /* Write to cache */ - if (cache_put3(CACHE_MDD_MATCH, a, b, proj, result)) sylvan_stats_count(LDD_MATCH_CACHEDPUT); - - return result; -} - -TASK_4(MDD, lddmc_relprod_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj) -{ - return lddmc_makenode(val, CALL(lddmc_relprod, set, rel, proj), lddmc_false); -} - -// meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write) -TASK_IMPL_3(MDD, lddmc_relprod, MDD, set, MDD, rel, MDD, meta) -{ - if (set == lddmc_false) return lddmc_false; - if (rel == lddmc_false) return lddmc_false; - - mddnode_t n_meta = GETNODE(meta); - uint32_t m_val = mddnode_getvalue(n_meta); - if (m_val == (uint32_t)-1) return set; - if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true); - - /* Skip nodes if possible */ - if (!mddnode_getcopy(GETNODE(rel))) { - if (m_val == 1 || m_val == 3) { - if (!match_ldds(&set, &rel)) return lddmc_false; - } - } - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_RELPROD); - - /* Access cache */ - MDD result; - if (cache_get3(CACHE_MDD_RELPROD, set, rel, meta, &result)) { - sylvan_stats_count(LDD_RELPROD_CACHED); - return result; - } - - mddnode_t n_set = GETNODE(set); - mddnode_t n_rel = GETNODE(rel); - - /* Recursive operations */ - if (m_val == 0) { // not in rel - lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta)); - MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } else if (m_val == 1) { // read - // read layer: if not copy, then set&rel are already matched - lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn next read in list - - // for this read, either it is copy ('for all') or it is normal match - if (mddnode_getcopy(n_rel)) { - // spawn for every value to copy (set) - int count = 0; - for (;;) { - // stay same level of set (for write) - lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta))); - count++; - set = mddnode_getright(n_set); - if (set == lddmc_false) break; - n_set = GETNODE(set); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } else { - // stay same level of set (for write) - result = CALL(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta)); - } - - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); // sync next read in list - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else if (m_val == 3) { // only-read - if (mddnode_getcopy(n_rel)) { - // copy on read ('for any value') - // result = union(result_with_copy, result_without_copy) - lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn without_copy - - // spawn for every value to copy (set) - int count = 0; - for (;;) { - lddmc_refs_spawn(SPAWN(lddmc_relprod_help, mddnode_getvalue(n_set), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta))); - count++; - set = mddnode_getright(n_set); - if (set == lddmc_false) break; - n_set = GETNODE(set); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - - // add result from without_copy - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else { - // only-read, without copy - lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), mddnode_getright(n_rel), meta)); - MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } - } else if (m_val == 2 || m_val == 4) { // write, only-write - if (m_val == 4) { - // only-write, so we need to include 'for all variables' - lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta)); // next in set - } - - // spawn for every value to write (rel) - int count = 0; - for (;;) { - uint32_t value; - if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set); - else value = mddnode_getvalue(n_rel); - lddmc_refs_spawn(SPAWN(lddmc_relprod_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta))); - count++; - rel = mddnode_getright(n_rel); - if (rel == lddmc_false) break; - n_rel = GETNODE(rel); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - - if (m_val == 4) { - // sync+union with other variables - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } - - /* Write to cache */ - if (cache_put3(CACHE_MDD_RELPROD, set, rel, meta, result)) sylvan_stats_count(LDD_RELPROD_CACHEDPUT); - - return result; -} - -TASK_5(MDD, lddmc_relprod_union_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, un) -{ - return lddmc_makenode(val, CALL(lddmc_relprod_union, set, rel, proj, un), lddmc_false); -} - -// meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write) -TASK_IMPL_4(MDD, lddmc_relprod_union, MDD, set, MDD, rel, MDD, meta, MDD, un) -{ - if (set == lddmc_false) return un; - if (rel == lddmc_false) return un; - if (un == lddmc_false) return CALL(lddmc_relprod, set, rel, meta); - - mddnode_t n_meta = GETNODE(meta); - uint32_t m_val = mddnode_getvalue(n_meta); - if (m_val == (uint32_t)-1) return CALL(lddmc_union, set, un); - - // check depths (this triggers on logic error) - if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && un != lddmc_true); - - /* Skip nodes if possible */ - if (!mddnode_getcopy(GETNODE(rel))) { - if (m_val == 1 || m_val == 3) { - if (!match_ldds(&set, &rel)) return un; - } - } - - mddnode_t n_set = GETNODE(set); - mddnode_t n_rel = GETNODE(rel); - mddnode_t n_un = GETNODE(un); - - // in some cases, we know un.value < result.value - if (m_val == 0 || m_val == 3) { - // if m_val == 0, no read/write, then un.value < set.value? - // if m_val == 3, only read (write same), then un.value < set.value? - uint32_t set_value = mddnode_getvalue(n_set); - uint32_t un_value = mddnode_getvalue(n_un); - if (un_value < set_value) { - MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un)); - if (right == mddnode_getright(n_un)) return un; - else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right); - } - } else if (m_val == 2 || m_val == 4) { - // if we write, then we only know for certain that un.value < result.value if - // the root of rel is not a copy node - if (!mddnode_getcopy(n_rel)) { - uint32_t rel_value = mddnode_getvalue(n_rel); - uint32_t un_value = mddnode_getvalue(n_un); - if (un_value < rel_value) { - MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un)); - if (right == mddnode_getright(n_un)) return un; - else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right); - } - } - } - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_RELPROD_UNION); - - /* Access cache */ - MDD result; - if (cache_get4(CACHE_MDD_RELPROD, set, rel, meta, un, &result)) { - sylvan_stats_count(LDD_RELPROD_UNION_CACHED); - return result; - } - - /* Recursive operations */ - if (m_val == 0) { // not in rel - uint32_t set_value = mddnode_getvalue(n_set); - uint32_t un_value = mddnode_getvalue(n_un); - // set_value > un_value already checked above - if (set_value < un_value) { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un)); - // going down, we don't need _union, since un does not contain this subtree - MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_pop(1); - if (down == lddmc_false) result = right; - else result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } else /* set_value == un_value */ { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, mddnode_getright(n_un))); - MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_un)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_pop(1); - if (right == mddnode_getright(n_un) && down == mddnode_getdown(n_un)) result = un; - else result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } - } else if (m_val == 1) { // read - // read layer: if not copy, then set&rel are already matched - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn next read in list - - // for this read, either it is copy ('for all') or it is normal match - if (mddnode_getcopy(n_rel)) { - // spawn for every value in set (copy = for all) - int count = 0; - for (;;) { - // stay same level of set and un (for write) - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un)); - count++; - set = mddnode_getright(n_set); - if (set == lddmc_false) break; - n_set = GETNODE(set); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } else { - // stay same level of set and un (for write) - result = CALL(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un); - } - - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); // sync next read in list - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else if (m_val == 3) { // only-read - // un < set already checked above - if (mddnode_getcopy(n_rel)) { - // copy on read ('for any value') - // result = union(result_with_copy, result_without_copy) - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn without_copy - - // spawn for every value to copy (set) - int count = 0; - result = lddmc_false; - for (;;) { - uint32_t set_value = mddnode_getvalue(n_set); - uint32_t un_value = mddnode_getvalue(n_un); - if (un_value < set_value) { - // this is a bit tricky - // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un))); - count++; - un = mddnode_getright(n_un); - if (un == lddmc_false) { - result = CALL(lddmc_relprod, set, rel, meta); - break; - } - n_un = GETNODE(un); - } else if (un_value > set_value) { - // tricky again. the result of this is a normal relprod - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false)); - count++; - set = mddnode_getright(n_set); - if (set == lddmc_false) { - result = un; - break; - } - n_set = GETNODE(set); - } else /* un_value == set_value */ { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un))); - count++; - set = mddnode_getright(n_set); - un = mddnode_getright(n_un); - if (set == lddmc_false) { - result = un; - break; - } else if (un == lddmc_false) { - result = CALL(lddmc_relprod, set, rel, meta); - break; - } - n_set = GETNODE(set); - n_un = GETNODE(un); - } - } - - // sync+union (one by one) - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - - // add result from without_copy - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else { - // only-read, not a copy node - uint32_t set_value = mddnode_getvalue(n_set); - uint32_t un_value = mddnode_getvalue(n_un); - - // already did un_value < set_value - if (un_value > set_value) { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, un)); - MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } else /* un_value == set_value */ { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, mddnode_getright(n_un))); - MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } - } - } else if (m_val == 2 || m_val == 4) { // write, only-write - if (m_val == 4) { - // only-write, so we need to include 'for all variables' - lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un)); // next in set - } - - // spawn for every value to write (rel) - int count = 0; - for (;;) { - uint32_t value; - if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set); - else value = mddnode_getvalue(n_rel); - uint32_t un_value = mddnode_getvalue(n_un); - if (un_value < value) { - // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un))); - count++; - un = mddnode_getright(n_un); - if (un == lddmc_false) { - result = CALL(lddmc_relprod, set, rel, meta); - break; - } - n_un = GETNODE(un); - } else if (un_value > value) { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false)); - count++; - rel = mddnode_getright(n_rel); - if (rel == lddmc_false) { - result = un; - break; - } - n_rel = GETNODE(rel); - } else /* un_value == value */ { - lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un))); - count++; - rel = mddnode_getright(n_rel); - un = mddnode_getright(n_un); - if (rel == lddmc_false) { - result = un; - break; - } else if (un == lddmc_false) { - result = CALL(lddmc_relprod, set, rel, meta); - break; - } - n_rel = GETNODE(rel); - n_un = GETNODE(un); - } - } - - // sync+union (one by one) - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - - if (m_val == 4) { - // sync+union with other variables - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } - - /* Write to cache */ - if (cache_put4(CACHE_MDD_RELPROD, set, rel, meta, un, result)) sylvan_stats_count(LDD_RELPROD_UNION_CACHEDPUT); - - return result; -} - -TASK_5(MDD, lddmc_relprev_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, uni) -{ - return lddmc_makenode(val, CALL(lddmc_relprev, set, rel, proj, uni), lddmc_false); -} - -/** - * Calculate all predecessors to a in uni according to rel[meta] - * follows the same semantics as relprod - * i.e. 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write), -1 (end; rest=0) - */ -TASK_IMPL_4(MDD, lddmc_relprev, MDD, set, MDD, rel, MDD, meta, MDD, uni) -{ - if (set == lddmc_false) return lddmc_false; - if (rel == lddmc_false) return lddmc_false; - if (uni == lddmc_false) return lddmc_false; - - mddnode_t n_meta = GETNODE(meta); - uint32_t m_val = mddnode_getvalue(n_meta); - if (m_val == (uint32_t)-1) { - if (set == uni) return set; - else return lddmc_intersect(set, uni); - } - - if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && uni != lddmc_true); - - /* Skip nodes if possible */ - if (m_val == 0) { - // not in rel: match set and uni ('intersect') - if (!match_ldds(&set, &uni)) return lddmc_false; - } else if (mddnode_getcopy(GETNODE(rel))) { - // read+copy: no matching (pre is everything in uni) - // write+copy: no matching (match after split: set and uni) - // only-read+copy: match set and uni - // only-write+copy: no matching (match after split: set and uni) - if (m_val == 3) { - if (!match_ldds(&set, &uni)) return lddmc_false; - } - } else if (m_val == 1) { - // read: match uni and rel - if (!match_ldds(&uni, &rel)) return lddmc_false; - } else if (m_val == 2) { - // write: match set and rel - if (!match_ldds(&set, &rel)) return lddmc_false; - } else if (m_val == 3) { - // only-read: match uni and set and rel - mddnode_t n_set = GETNODE(set); - mddnode_t n_rel = GETNODE(rel); - mddnode_t n_uni = GETNODE(uni); - uint32_t n_set_value = mddnode_getvalue(n_set); - uint32_t n_rel_value = mddnode_getvalue(n_rel); - uint32_t n_uni_value = mddnode_getvalue(n_uni); - while (n_uni_value != n_rel_value || n_rel_value != n_set_value) { - if (n_uni_value < n_rel_value || n_uni_value < n_set_value) { - uni = mddnode_getright(n_uni); - if (uni == lddmc_false) return lddmc_false; - n_uni = GETNODE(uni); - n_uni_value = mddnode_getvalue(n_uni); - } - if (n_set_value < n_rel_value || n_set_value < n_uni_value) { - set = mddnode_getright(n_set); - if (set == lddmc_false) return lddmc_false; - n_set = GETNODE(set); - n_set_value = mddnode_getvalue(n_set); - } - if (n_rel_value < n_set_value || n_rel_value < n_uni_value) { - rel = mddnode_getright(n_rel); - if (rel == lddmc_false) return lddmc_false; - n_rel = GETNODE(rel); - n_rel_value = mddnode_getvalue(n_rel); - } - } - } else if (m_val == 4) { - // only-write: match set and rel (then use whole universe) - if (!match_ldds(&set, &rel)) return lddmc_false; - } - - /* Test gc */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_RELPREV); - - /* Access cache */ - MDD result; - if (cache_get4(CACHE_MDD_RELPREV, set, rel, meta, uni, &result)) { - sylvan_stats_count(LDD_RELPREV_CACHED); - return result; - } - - mddnode_t n_set = GETNODE(set); - mddnode_t n_rel = GETNODE(rel); - mddnode_t n_uni = GETNODE(uni); - - /* Recursive operations */ - if (m_val == 0) { // not in rel - // m_val == 0 : not in rel (intersection set and universe) - lddmc_refs_spawn(SPAWN(lddmc_relprev, mddnode_getright(n_set), rel, meta, mddnode_getright(n_uni))); - MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_uni)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_relprev)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n_set), down, right); - } else if (m_val == 1) { // read level - // result value is in case of copy: everything in uni! - // result value is in case of not-copy: match uni and rel! - lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel - if (mddnode_getcopy(n_rel)) { - // result is everything in uni - // spawn for every value to have been read (uni) - int count = 0; - for (;;) { - lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni)); - count++; - uni = mddnode_getright(n_uni); - if (uni == lddmc_false) break; - n_uni = GETNODE(uni); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } else { - // already matched - MDD down = CALL(lddmc_relprev, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni); - result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false); - } - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else if (m_val == 3) { // only-read level - // result value is in case of copy: match set and uni! (already done first match) - // result value is in case of not-copy: match set and uni and rel! - lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel - if (mddnode_getcopy(n_rel)) { - // spawn for every matching set+uni - int count = 0; - for (;;) { - lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); - count++; - uni = mddnode_getright(n_uni); - if (!match_ldds(&set, &uni)) break; - n_set = GETNODE(set); - n_uni = GETNODE(uni); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } else { - // already matched - MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); - result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false); - } - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else if (m_val == 2) { // write level - // note: the read level has already matched the uni that was read. - // write+copy: only for the one set equal to uni... - // write: match set and rel (already done) - lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); - if (mddnode_getcopy(n_rel)) { - MDD down = lddmc_follow(set, mddnode_getvalue(n_uni)); - if (down != lddmc_false) { - result = CALL(lddmc_relprev, down, mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); - } else { - result = lddmc_false; - } - } else { - result = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); - } - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } else if (m_val == 4) { // only-write level - // only-write+copy: match set and uni after spawn - // only-write: match set and rel (already done) - lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); - if (mddnode_getcopy(n_rel)) { - // spawn for every matching set+uni - int count = 0; - for (;;) { - if (!match_ldds(&set, &uni)) break; - n_set = GETNODE(set); - n_uni = GETNODE(uni); - lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); - count++; - uni = mddnode_getright(n_uni); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } else { - // spawn for every value in universe!! - int count = 0; - for (;;) { - lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); - count++; - uni = mddnode_getright(n_uni); - if (uni == lddmc_false) break; - n_uni = GETNODE(uni); - } - - // sync+union (one by one) - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - } - lddmc_refs_push(result); - MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); - lddmc_refs_push(result2); - result = CALL(lddmc_union, result, result2); - lddmc_refs_pop(2); - } - - /* Write to cache */ - if (cache_put4(CACHE_MDD_RELPREV, set, rel, meta, uni, result)) sylvan_stats_count(LDD_RELPREV_CACHEDPUT); - - return result; -} - -// Same 'proj' as project. So: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) -TASK_IMPL_4(MDD, lddmc_join, MDD, a, MDD, b, MDD, a_proj, MDD, b_proj) -{ - if (a == lddmc_false || b == lddmc_false) return lddmc_false; - - /* Test gc */ - sylvan_gc_test(); - - mddnode_t n_a_proj = GETNODE(a_proj); - mddnode_t n_b_proj = GETNODE(b_proj); - uint32_t a_proj_val = mddnode_getvalue(n_a_proj); - uint32_t b_proj_val = mddnode_getvalue(n_b_proj); - - while (a_proj_val == 0 && b_proj_val == 0) { - a_proj = mddnode_getdown(n_a_proj); - b_proj = mddnode_getdown(n_b_proj); - n_a_proj = GETNODE(a_proj); - n_b_proj = GETNODE(b_proj); - a_proj_val = mddnode_getvalue(n_a_proj); - b_proj_val = mddnode_getvalue(n_b_proj); - } - - if (a_proj_val == (uint32_t)-2) return b; // no a left - if (b_proj_val == (uint32_t)-2) return a; // no b left - if (a_proj_val == (uint32_t)-1 && b_proj_val == (uint32_t)-1) return CALL(lddmc_intersect, a, b); - - // At this point, only proj_val {-1, 0, 1}; max one with -1; max one with 0. - const int keep_a = a_proj_val != 0; - const int keep_b = b_proj_val != 0; - - if (keep_a && keep_b) { - // If both 'keep', then match values - if (!match_ldds(&a, &b)) return lddmc_false; - } - - sylvan_stats_count(LDD_JOIN); - - /* Access cache */ - MDD result; - if (cache_get4(CACHE_MDD_JOIN, a, b, a_proj, b_proj, &result)) { - sylvan_stats_count(LDD_JOIN_CACHED); - return result; - } - - /* Perform recursive calculation */ - const mddnode_t na = GETNODE(a); - const mddnode_t nb = GETNODE(b); - uint32_t val; - MDD down; - - if (keep_a) { - if (keep_b) { - val = mddnode_getvalue(nb); - lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), mddnode_getright(nb), a_proj, b_proj)); - if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); - if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); - down = CALL(lddmc_join, mddnode_getdown(na), mddnode_getdown(nb), a_proj, b_proj); - } else { - val = mddnode_getvalue(na); - lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), b, a_proj, b_proj)); - if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); - if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); - down = CALL(lddmc_join, mddnode_getdown(na), b, a_proj, b_proj); - } - } else { - val = mddnode_getvalue(nb); - lddmc_refs_spawn(SPAWN(lddmc_join, a, mddnode_getright(nb), a_proj, b_proj)); - if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); - if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); - down = CALL(lddmc_join, a, mddnode_getdown(nb), a_proj, b_proj); - } - - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_join)); - lddmc_refs_pop(1); - result = lddmc_makenode(val, down, right); - - /* Write to cache */ - if (cache_put4(CACHE_MDD_JOIN, a, b, a_proj, b_proj, result)) sylvan_stats_count(LDD_JOIN_CACHEDPUT); - - return result; -} - -// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) -TASK_IMPL_2(MDD, lddmc_project, const MDD, mdd, const MDD, proj) -{ - if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty - if (mdd == lddmc_true) return lddmc_true; // projection of universe is universe... - - mddnode_t p_node = GETNODE(proj); - uint32_t p_val = mddnode_getvalue(p_node); - if (p_val == (uint32_t)-1) return mdd; - if (p_val == (uint32_t)-2) return lddmc_true; // because we always end with true. - - sylvan_gc_test(); - - sylvan_stats_count(LDD_PROJECT); - - MDD result; - if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, 0, &result)) { - sylvan_stats_count(LDD_PROJECT_CACHED); - return result; - } - - mddnode_t n = GETNODE(mdd); - - if (p_val == 1) { // keep - lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getright(n), proj)); - MDD down = CALL(lddmc_project, mddnode_getdown(n), mddnode_getdown(p_node)); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_project)); - lddmc_refs_pop(1); - result = lddmc_makenode(mddnode_getvalue(n), down, right); - } else { // quantify - if (mddnode_getdown(n) == lddmc_true) { // assume lowest level - result = lddmc_true; - } else { - int count = 0; - MDD p_down = mddnode_getdown(p_node), _mdd=mdd; - while (1) { - lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getdown(n), p_down)); - count++; - _mdd = mddnode_getright(n); - assert(_mdd != lddmc_true); - if (_mdd == lddmc_false) break; - n = GETNODE(_mdd); - } - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD down = lddmc_refs_sync(SYNC(lddmc_project)); - lddmc_refs_push(down); - result = CALL(lddmc_union, result, down); - lddmc_refs_pop(2); - } - } - } - - if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, 0, result)) sylvan_stats_count(LDD_PROJECT_CACHEDPUT); - - return result; -} - -// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) -TASK_IMPL_3(MDD, lddmc_project_minus, const MDD, mdd, const MDD, proj, MDD, avoid) -{ - // This implementation assumed "avoid" has correct depth - if (avoid == lddmc_true) return lddmc_false; - if (mdd == avoid) return lddmc_false; - if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty - if (mdd == lddmc_true) return lddmc_true; // avoid != lddmc_true - - mddnode_t p_node = GETNODE(proj); - uint32_t p_val = mddnode_getvalue(p_node); - if (p_val == (uint32_t)-1) return lddmc_minus(mdd, avoid); - if (p_val == (uint32_t)-2) return lddmc_true; - - sylvan_gc_test(); - - sylvan_stats_count(LDD_PROJECT_MINUS); - - MDD result; - if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, avoid, &result)) { - sylvan_stats_count(LDD_PROJECT_MINUS_CACHED); - return result; - } - - mddnode_t n = GETNODE(mdd); - - if (p_val == 1) { // keep - // move 'avoid' until it matches - uint32_t val = mddnode_getvalue(n); - MDD a_down = lddmc_false; - while (avoid != lddmc_false) { - mddnode_t a_node = GETNODE(avoid); - uint32_t a_val = mddnode_getvalue(a_node); - if (a_val > val) { - break; - } else if (a_val == val) { - a_down = mddnode_getdown(a_node); - break; - } - avoid = mddnode_getright(a_node); - } - lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getright(n), proj, avoid)); - MDD down = CALL(lddmc_project_minus, mddnode_getdown(n), mddnode_getdown(p_node), a_down); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_project_minus)); - lddmc_refs_pop(1); - result = lddmc_makenode(val, down, right); - } else { // quantify - if (mddnode_getdown(n) == lddmc_true) { // assume lowest level - result = lddmc_true; - } else { - int count = 0; - MDD p_down = mddnode_getdown(p_node), _mdd=mdd; - while (1) { - lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getdown(n), p_down, avoid)); - count++; - _mdd = mddnode_getright(n); - assert(_mdd != lddmc_true); - if (_mdd == lddmc_false) break; - n = GETNODE(_mdd); - } - result = lddmc_false; - while (count--) { - lddmc_refs_push(result); - MDD down = lddmc_refs_sync(SYNC(lddmc_project_minus)); - lddmc_refs_push(down); - result = CALL(lddmc_union, result, down); - lddmc_refs_pop(2); - } - } - } - - if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, avoid, result)) sylvan_stats_count(LDD_PROJECT_MINUS_CACHEDPUT); - - return result; -} - -MDD -lddmc_union_cube(MDD a, uint32_t* values, size_t count) -{ - if (a == lddmc_false) return lddmc_cube(values, count); - if (a == lddmc_true) { - assert(count == 0); - return lddmc_true; - } - assert(count != 0); - - mddnode_t na = GETNODE(a); - uint32_t na_value = mddnode_getvalue(na); - - /* Only create a new node if something actually changed */ - - if (na_value < *values) { - MDD right = lddmc_union_cube(mddnode_getright(na), values, count); - if (right == mddnode_getright(na)) return a; // no actual change - return lddmc_makenode(na_value, mddnode_getdown(na), right); - } else if (na_value == *values) { - MDD down = lddmc_union_cube(mddnode_getdown(na), values+1, count-1); - if (down == mddnode_getdown(na)) return a; // no actual change - return lddmc_makenode(na_value, down, mddnode_getright(na)); - } else /* na_value > *values */ { - return lddmc_makenode(*values, lddmc_cube(values+1, count-1), a); - } -} - -MDD -lddmc_union_cube_copy(MDD a, uint32_t* values, int* copy, size_t count) -{ - if (a == lddmc_false) return lddmc_cube_copy(values, copy, count); - if (a == lddmc_true) { - assert(count == 0); - return lddmc_true; - } - assert(count != 0); - - mddnode_t na = GETNODE(a); - - /* Only create a new node if something actually changed */ - - int na_copy = mddnode_getcopy(na); - if (na_copy && *copy) { - MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1); - if (down == mddnode_getdown(na)) return a; // no actual change - return lddmc_make_copynode(down, mddnode_getright(na)); - } else if (na_copy) { - MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count); - if (right == mddnode_getright(na)) return a; // no actual change - return lddmc_make_copynode(mddnode_getdown(na), right); - } else if (*copy) { - return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), a); - } - - uint32_t na_value = mddnode_getvalue(na); - if (na_value < *values) { - MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count); - if (right == mddnode_getright(na)) return a; // no actual change - return lddmc_makenode(na_value, mddnode_getdown(na), right); - } else if (na_value == *values) { - MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1); - if (down == mddnode_getdown(na)) return a; // no actual change - return lddmc_makenode(na_value, down, mddnode_getright(na)); - } else /* na_value > *values */ { - return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), a); - } -} - -int -lddmc_member_cube(MDD a, uint32_t* values, size_t count) -{ - while (1) { - if (a == lddmc_false) return 0; - if (a == lddmc_true) return 1; - assert(count > 0); // size mismatch - - a = lddmc_follow(a, *values); - values++; - count--; - } -} - -int -lddmc_member_cube_copy(MDD a, uint32_t* values, int* copy, size_t count) -{ - while (1) { - if (a == lddmc_false) return 0; - if (a == lddmc_true) return 1; - assert(count > 0); // size mismatch - - if (*copy) a = lddmc_followcopy(a); - else a = lddmc_follow(a, *values); - values++; - count--; - } -} - -MDD -lddmc_cube(uint32_t* values, size_t count) -{ - if (count == 0) return lddmc_true; - return lddmc_makenode(*values, lddmc_cube(values+1, count-1), lddmc_false); -} - -MDD -lddmc_cube_copy(uint32_t* values, int* copy, size_t count) -{ - if (count == 0) return lddmc_true; - if (*copy) return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false); - else return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false); -} - -/** - * Count number of nodes for each level - */ - -static void -lddmc_nodecount_levels_mark(MDD mdd, size_t *variables) -{ - if (mdd <= lddmc_true) return; - mddnode_t n = GETNODE(mdd); - if (!mddnode_getmark(n)) { - mddnode_setmark(n, 1); - (*variables) += 1; - lddmc_nodecount_levels_mark(mddnode_getright(n), variables); - lddmc_nodecount_levels_mark(mddnode_getdown(n), variables+1); - } -} - -static void -lddmc_nodecount_levels_unmark(MDD mdd) -{ - if (mdd <= lddmc_true) return; - mddnode_t n = GETNODE(mdd); - if (mddnode_getmark(n)) { - mddnode_setmark(n, 0); - lddmc_nodecount_levels_unmark(mddnode_getright(n)); - lddmc_nodecount_levels_unmark(mddnode_getdown(n)); - } -} - -void -lddmc_nodecount_levels(MDD mdd, size_t *variables) -{ - lddmc_nodecount_levels_mark(mdd, variables); - lddmc_nodecount_levels_unmark(mdd); -} - -/** - * Count number of nodes in MDD - */ - -static size_t -lddmc_nodecount_mark(MDD mdd) -{ - if (mdd <= lddmc_true) return 0; - mddnode_t n = GETNODE(mdd); - if (mddnode_getmark(n)) return 0; - mddnode_setmark(n, 1); - return 1 + lddmc_nodecount_mark(mddnode_getdown(n)) + lddmc_nodecount_mark(mddnode_getright(n)); -} - -static void -lddmc_nodecount_unmark(MDD mdd) -{ - if (mdd <= lddmc_true) return; - mddnode_t n = GETNODE(mdd); - if (mddnode_getmark(n)) { - mddnode_setmark(n, 0); - lddmc_nodecount_unmark(mddnode_getright(n)); - lddmc_nodecount_unmark(mddnode_getdown(n)); - } -} - -size_t -lddmc_nodecount(MDD mdd) -{ - size_t result = lddmc_nodecount_mark(mdd); - lddmc_nodecount_unmark(mdd); - return result; -} - -/** - * CALCULATE NUMBER OF VAR ASSIGNMENTS THAT YIELD TRUE - */ - -TASK_IMPL_1(lddmc_satcount_double_t, lddmc_satcount_cached, MDD, mdd) -{ - if (mdd == lddmc_false) return 0.0; - if (mdd == lddmc_true) return 1.0; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - union { - lddmc_satcount_double_t d; - uint64_t s; - } hack; - - sylvan_stats_count(LDD_SATCOUNT); - - if (cache_get3(CACHE_MDD_SATCOUNT, mdd, 0, 0, &hack.s)) { - sylvan_stats_count(LDD_SATCOUNT_CACHED); - return hack.d; - } - - mddnode_t n = GETNODE(mdd); - - SPAWN(lddmc_satcount_cached, mddnode_getdown(n)); - lddmc_satcount_double_t right = CALL(lddmc_satcount_cached, mddnode_getright(n)); - hack.d = right + SYNC(lddmc_satcount_cached); - - if (cache_put3(CACHE_MDD_SATCOUNT, mdd, 0, 0, hack.s)) sylvan_stats_count(LDD_SATCOUNT_CACHEDPUT); - - return hack.d; -} - -TASK_IMPL_1(long double, lddmc_satcount, MDD, mdd) -{ - if (mdd == lddmc_false) return 0.0; - if (mdd == lddmc_true) return 1.0; - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - sylvan_stats_count(LDD_SATCOUNTL); - - union { - long double d; - struct { - uint64_t s1; - uint64_t s2; - } s; - } hack; - - if (cache_get3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, &hack.s.s1) && - cache_get3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, &hack.s.s2)) { - sylvan_stats_count(LDD_SATCOUNTL_CACHED); - return hack.d; - } - - mddnode_t n = GETNODE(mdd); - - SPAWN(lddmc_satcount, mddnode_getdown(n)); - long double right = CALL(lddmc_satcount, mddnode_getright(n)); - hack.d = right + SYNC(lddmc_satcount); - - int c1 = cache_put3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, hack.s.s1); - int c2 = cache_put3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, hack.s.s2); - if (c1 && c2) sylvan_stats_count(LDD_SATCOUNTL_CACHEDPUT); - - return hack.d; -} - -TASK_IMPL_5(MDD, lddmc_collect, MDD, mdd, lddmc_collect_cb, cb, void*, context, uint32_t*, values, size_t, count) -{ - if (mdd == lddmc_false) return lddmc_false; - if (mdd == lddmc_true) { - return WRAP(cb, values, count, context); - } - - mddnode_t n = GETNODE(mdd); - - lddmc_refs_spawn(SPAWN(lddmc_collect, mddnode_getright(n), cb, context, values, count)); - - uint32_t newvalues[count+1]; - if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count); - newvalues[count] = mddnode_getvalue(n); - MDD down = CALL(lddmc_collect, mddnode_getdown(n), cb, context, newvalues, count+1); - - if (down == lddmc_false) { - MDD result = lddmc_refs_sync(SYNC(lddmc_collect)); - return result; - } - - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_collect)); - - if (right == lddmc_false) { - lddmc_refs_pop(1); - return down; - } else { - lddmc_refs_push(right); - MDD result = CALL(lddmc_union, down, right); - lddmc_refs_pop(2); - return result; - } -} - -VOID_TASK_5(_lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count) -{ - if (mdd == lddmc_false) return; - if (mdd == lddmc_true) { - WRAP(cb, values, count, context); - return; - } - - mddnode_t n = GETNODE(mdd); - values[count] = mddnode_getvalue(n); - CALL(_lddmc_sat_all_nopar, mddnode_getdown(n), cb, context, values, count+1); - CALL(_lddmc_sat_all_nopar, mddnode_getright(n), cb, context, values, count); -} - -VOID_TASK_IMPL_3(lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context) -{ - // determine depth - size_t count=0; - MDD _mdd = mdd; - while (_mdd > lddmc_true) { - _mdd = mddnode_getdown(GETNODE(_mdd)); - assert(_mdd != lddmc_false); - count++; - } - - uint32_t values[count]; - CALL(_lddmc_sat_all_nopar, mdd, cb, context, values, 0); -} - -VOID_TASK_IMPL_5(lddmc_sat_all_par, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count) -{ - if (mdd == lddmc_false) return; - if (mdd == lddmc_true) { - WRAP(cb, values, count, context); - return; - } - - mddnode_t n = GETNODE(mdd); - - SPAWN(lddmc_sat_all_par, mddnode_getright(n), cb, context, values, count); - - uint32_t newvalues[count+1]; - if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count); - newvalues[count] = mddnode_getvalue(n); - CALL(lddmc_sat_all_par, mddnode_getdown(n), cb, context, newvalues, count+1); - - SYNC(lddmc_sat_all_par); -} - -struct lddmc_match_sat_info -{ - MDD mdd; - MDD match; - MDD proj; - size_t count; - uint32_t values[0]; -}; - -// proj: -1 (rest 0), 0 (no match), 1 (match) -VOID_TASK_3(lddmc_match_sat, struct lddmc_match_sat_info *, info, lddmc_enum_cb, cb, void*, context) -{ - MDD a = info->mdd, b = info->match, proj = info->proj; - - if (a == lddmc_false || b == lddmc_false) return; - - if (a == lddmc_true) { - assert(b == lddmc_true); - WRAP(cb, info->values, info->count, context); - return; - } - - mddnode_t p_node = GETNODE(proj); - uint32_t p_val = mddnode_getvalue(p_node); - if (p_val == (uint32_t)-1) { - assert(b == lddmc_true); - CALL(lddmc_sat_all_par, a, cb, context, info->values, info->count); - return; - } - - /* Get nodes */ - mddnode_t na = GETNODE(a); - mddnode_t nb = GETNODE(b); - uint32_t na_value = mddnode_getvalue(na); - uint32_t nb_value = mddnode_getvalue(nb); - - /* Skip nodes if possible */ - if (p_val == 1) { - while (na_value != nb_value) { - if (na_value < nb_value) { - a = mddnode_getright(na); - if (a == lddmc_false) return; - na = GETNODE(a); - na_value = mddnode_getvalue(na); - } - if (nb_value < na_value) { - b = mddnode_getright(nb); - if (b == lddmc_false) return; - nb = GETNODE(b); - nb_value = mddnode_getvalue(nb); - } - } - } - - struct lddmc_match_sat_info *ri = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count])); - struct lddmc_match_sat_info *di = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count+1])); - - ri->mdd = mddnode_getright(na); - di->mdd = mddnode_getdown(na); - ri->match = b; - di->match = mddnode_getdown(nb); - ri->proj = proj; - di->proj = mddnode_getdown(p_node); - ri->count = info->count; - di->count = info->count+1; - if (ri->count > 0) memcpy(ri->values, info->values, sizeof(uint32_t[info->count])); - if (di->count > 0) memcpy(di->values, info->values, sizeof(uint32_t[info->count])); - di->values[info->count] = na_value; - - SPAWN(lddmc_match_sat, ri, cb, context); - CALL(lddmc_match_sat, di, cb, context); - SYNC(lddmc_match_sat); -} - -VOID_TASK_IMPL_5(lddmc_match_sat_par, MDD, mdd, MDD, match, MDD, proj, lddmc_enum_cb, cb, void*, context) -{ - struct lddmc_match_sat_info i; - i.mdd = mdd; - i.match = match; - i.proj = proj; - i.count = 0; - CALL(lddmc_match_sat, &i, cb, context); -} - -int -lddmc_sat_one(MDD mdd, uint32_t* values, size_t count) -{ - if (mdd == lddmc_false) return 0; - if (mdd == lddmc_true) return 1; - assert(count != 0); - mddnode_t n = GETNODE(mdd); - *values = mddnode_getvalue(n); - return lddmc_sat_one(mddnode_getdown(n), values+1, count-1); -} - -MDD -lddmc_sat_one_mdd(MDD mdd) -{ - if (mdd == lddmc_false) return lddmc_false; - if (mdd == lddmc_true) return lddmc_true; - mddnode_t n = GETNODE(mdd); - MDD down = lddmc_sat_one_mdd(mddnode_getdown(n)); - return lddmc_makenode(mddnode_getvalue(n), down, lddmc_false); -} - -TASK_IMPL_4(MDD, lddmc_compose, MDD, mdd, lddmc_compose_cb, cb, void*, context, int, depth) -{ - if (depth == 0 || mdd == lddmc_false || mdd == lddmc_true) { - return WRAP(cb, mdd, context); - } else { - mddnode_t n = GETNODE(mdd); - lddmc_refs_spawn(SPAWN(lddmc_compose, mddnode_getright(n), cb, context, depth)); - MDD down = lddmc_compose(mddnode_getdown(n), cb, context, depth-1); - lddmc_refs_push(down); - MDD right = lddmc_refs_sync(SYNC(lddmc_compose)); - lddmc_refs_pop(1); - return lddmc_makenode(mddnode_getvalue(n), down, right); - } -} - -VOID_TASK_IMPL_4(lddmc_visit_seq, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context) -{ - if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return; - - void* context_down = alloca(ctx_size); - void* context_right = alloca(ctx_size); - WRAP(cbs->lddmc_visit_init_context, context_down, context, 1); - WRAP(cbs->lddmc_visit_init_context, context_right, context, 0); - - CALL(lddmc_visit_seq, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down); - CALL(lddmc_visit_seq, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right); - - WRAP(cbs->lddmc_visit_post, mdd, context); -} - -VOID_TASK_IMPL_4(lddmc_visit_par, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context) -{ - if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return; - - void* context_down = alloca(ctx_size); - void* context_right = alloca(ctx_size); - WRAP(cbs->lddmc_visit_init_context, context_down, context, 1); - WRAP(cbs->lddmc_visit_init_context, context_right, context, 0); - - SPAWN(lddmc_visit_par, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down); - CALL(lddmc_visit_par, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right); - SYNC(lddmc_visit_par); - - WRAP(cbs->lddmc_visit_post, mdd, context); -} - -/** - * GENERIC MARK/UNMARK METHODS - */ - -static inline int -lddmc_mark(mddnode_t node) -{ - if (mddnode_getmark(node)) return 0; - mddnode_setmark(node, 1); - return 1; -} - -static inline int -lddmc_unmark(mddnode_t node) -{ - if (mddnode_getmark(node)) { - mddnode_setmark(node, 0); - return 1; - } else { - return 0; - } -} - -static void -lddmc_unmark_rec(mddnode_t node) -{ - if (lddmc_unmark(node)) { - MDD node_right = mddnode_getright(node); - if (node_right > lddmc_true) lddmc_unmark_rec(GETNODE(node_right)); - MDD node_down = mddnode_getdown(node); - if (node_down > lddmc_true) lddmc_unmark_rec(GETNODE(node_down)); - } -} - -/************* - * DOT OUTPUT -*************/ - -static void -lddmc_fprintdot_rec(FILE* out, MDD mdd) -{ - // assert(mdd > lddmc_true); - - // check mark - mddnode_t n = GETNODE(mdd); - if (mddnode_getmark(n)) return; - mddnode_setmark(n, 1); - - // print the node - uint32_t val = mddnode_getvalue(n); - fprintf(out, "%" PRIu64 " [shape=record, label=\"", mdd); - if (mddnode_getcopy(n)) fprintf(out, " *"); - else fprintf(out, "<%u> %u", val, val); - MDD right = mddnode_getright(n); - while (right != lddmc_false) { - mddnode_t n2 = GETNODE(right); - uint32_t val2 = mddnode_getvalue(n2); - fprintf(out, "|<%u> %u", val2, val2); - right = mddnode_getright(n2); - // assert(right != lddmc_true); - } - fprintf(out, "\"];\n"); - - // recurse and print the edges - for (;;) { - MDD down = mddnode_getdown(n); - // assert(down != lddmc_false); - if (down > lddmc_true) { - lddmc_fprintdot_rec(out, down); - if (mddnode_getcopy(n)) { - fprintf(out, "%" PRIu64 ":c -> ", mdd); - } else { - fprintf(out, "%" PRIu64 ":%u -> ", mdd, mddnode_getvalue(n)); - } - if (mddnode_getcopy(GETNODE(down))) { - fprintf(out, "%" PRIu64 ":c [style=solid];\n", down); - } else { - fprintf(out, "%" PRIu64 ":%u [style=solid];\n", down, mddnode_getvalue(GETNODE(down))); - } - } - MDD right = mddnode_getright(n); - if (right == lddmc_false) break; - n = GETNODE(right); - } -} - -static void -lddmc_fprintdot_unmark(MDD mdd) -{ - if (mdd <= lddmc_true) return; - mddnode_t n = GETNODE(mdd); - if (mddnode_getmark(n)) { - mddnode_setmark(n, 0); - for (;;) { - lddmc_fprintdot_unmark(mddnode_getdown(n)); - mdd = mddnode_getright(n); - if (mdd == lddmc_false) return; - n = GETNODE(mdd); - } - } -} - -void -lddmc_fprintdot(FILE *out, MDD mdd) -{ - fprintf(out, "digraph \"DD\" {\n"); - fprintf(out, "graph [dpi = 300];\n"); - fprintf(out, "center = true;\n"); - fprintf(out, "edge [dir = forward];\n"); - - // Special case: false - if (mdd == lddmc_false) { - fprintf(out, "0 [shape=record, label=\"False\"];\n"); - fprintf(out, "}\n"); - return; - } - - // Special case: true - if (mdd == lddmc_true) { - fprintf(out, "1 [shape=record, label=\"True\"];\n"); - fprintf(out, "}\n"); - return; - } - - lddmc_fprintdot_rec(out, mdd); - lddmc_fprintdot_unmark(mdd); - - fprintf(out, "}\n"); -} - -void -lddmc_printdot(MDD mdd) -{ - lddmc_fprintdot(stdout, mdd); -} - -/** - * Some debug stuff - */ -void -lddmc_fprint(FILE *f, MDD mdd) -{ - lddmc_serialize_reset(); - size_t v = lddmc_serialize_add(mdd); - fprintf(f, "%zu,", v); - lddmc_serialize_totext(f); -} - -void -lddmc_print(MDD mdd) -{ - lddmc_fprint(stdout, mdd); -} - -/** - * SERIALIZATION - */ - -struct lddmc_ser { - MDD mdd; - size_t assigned; -}; - -// Define a AVL tree type with prefix 'lddmc_ser' holding -// nodes of struct lddmc_ser with the following compare() function... -AVL(lddmc_ser, struct lddmc_ser) -{ - if (left->mdd > right->mdd) return 1; - if (left->mdd < right->mdd) return -1; - return 0; -} - -// Define a AVL tree type with prefix 'lddmc_ser_reversed' holding -// nodes of struct lddmc_ser with the following compare() function... -AVL(lddmc_ser_reversed, struct lddmc_ser) -{ - if (left->assigned > right->assigned) return 1; - if (left->assigned < right->assigned) return -1; - return 0; -} - -// Initially, both sets are empty -static avl_node_t *lddmc_ser_set = NULL; -static avl_node_t *lddmc_ser_reversed_set = NULL; - -// Start counting (assigning numbers to MDDs) at 2 -static volatile size_t lddmc_ser_counter = 2; -static size_t lddmc_ser_done = 0; - -// Given a MDD, assign unique numbers to all nodes -static size_t -lddmc_serialize_assign_rec(MDD mdd) -{ - if (mdd <= lddmc_true) return mdd; - - mddnode_t n = GETNODE(mdd); - - struct lddmc_ser s, *ss; - s.mdd = mdd; - ss = lddmc_ser_search(lddmc_ser_set, &s); - if (ss == NULL) { - // assign dummy value - s.assigned = 0; - ss = lddmc_ser_put(&lddmc_ser_set, &s, NULL); - - // first assign recursively - lddmc_serialize_assign_rec(mddnode_getright(n)); - lddmc_serialize_assign_rec(mddnode_getdown(n)); - - // assign real value - ss->assigned = lddmc_ser_counter++; - - // put a copy in the reversed table - lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, ss); - } - - return ss->assigned; -} - -size_t -lddmc_serialize_add(MDD mdd) -{ - return lddmc_serialize_assign_rec(mdd); -} - -void -lddmc_serialize_reset() -{ - lddmc_ser_free(&lddmc_ser_set); - lddmc_ser_free(&lddmc_ser_reversed_set); - lddmc_ser_counter = 2; - lddmc_ser_done = 0; -} - -size_t -lddmc_serialize_get(MDD mdd) -{ - if (mdd <= lddmc_true) return mdd; - struct lddmc_ser s, *ss; - s.mdd = mdd; - ss = lddmc_ser_search(lddmc_ser_set, &s); - assert(ss != NULL); - return ss->assigned; -} - -MDD -lddmc_serialize_get_reversed(size_t value) -{ - if ((MDD)value <= lddmc_true) return (MDD)value; - struct lddmc_ser s, *ss; - s.assigned = value; - ss = lddmc_ser_reversed_search(lddmc_ser_reversed_set, &s); - assert(ss != NULL); - return ss->mdd; -} - -void -lddmc_serialize_totext(FILE *out) -{ - avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set); - struct lddmc_ser *s; - - fprintf(out, "["); - while ((s=lddmc_ser_reversed_iter_next(it))) { - MDD mdd = s->mdd; - mddnode_t n = GETNODE(mdd); - fprintf(out, "(%zu,v=%u,d=%zu,r=%zu),", s->assigned, - mddnode_getvalue(n), - lddmc_serialize_get(mddnode_getdown(n)), - lddmc_serialize_get(mddnode_getright(n))); - } - fprintf(out, "]"); - - lddmc_ser_reversed_iter_free(it); -} - -void -lddmc_serialize_tofile(FILE *out) -{ - size_t count = avl_count(lddmc_ser_reversed_set); - assert(count >= lddmc_ser_done); - assert(count == lddmc_ser_counter-2); - count -= lddmc_ser_done; - fwrite(&count, sizeof(size_t), 1, out); - - struct lddmc_ser *s; - avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set); - - /* Skip already written entries */ - size_t index = 0; - while (index < lddmc_ser_done && (s=lddmc_ser_reversed_iter_next(it))) { - assert(s->assigned == index+2); - index++; - } - - while ((s=lddmc_ser_reversed_iter_next(it))) { - assert(s->assigned == index+2); - index++; - - mddnode_t n = GETNODE(s->mdd); - - struct mddnode node; - uint64_t right = lddmc_serialize_get(mddnode_getright(n)); - uint64_t down = lddmc_serialize_get(mddnode_getdown(n)); - if (mddnode_getcopy(n)) mddnode_makecopy(&node, right, down); - else mddnode_make(&node, mddnode_getvalue(n), right, down); - - assert(right <= index); - assert(down <= index); - - fwrite(&node, sizeof(struct mddnode), 1, out); - } - - lddmc_ser_done = lddmc_ser_counter-2; - lddmc_ser_reversed_iter_free(it); -} - -void -lddmc_serialize_fromfile(FILE *in) -{ - size_t count, i; - if (fread(&count, sizeof(size_t), 1, in) != 1) { - // TODO FIXME return error - printf("sylvan_serialize_fromfile: file format error, giving up\n"); - exit(-1); - } - - for (i=1; i<=count; i++) { - struct mddnode node; - if (fread(&node, sizeof(struct mddnode), 1, in) != 1) { - // TODO FIXME return error - printf("sylvan_serialize_fromfile: file format error, giving up\n"); - exit(-1); - } - - assert(mddnode_getright(&node) <= lddmc_ser_done+1); - assert(mddnode_getdown(&node) <= lddmc_ser_done+1); - - MDD right = lddmc_serialize_get_reversed(mddnode_getright(&node)); - MDD down = lddmc_serialize_get_reversed(mddnode_getdown(&node)); - - struct lddmc_ser s; - if (mddnode_getcopy(&node)) s.mdd = lddmc_make_copynode(down, right); - else s.mdd = lddmc_makenode(mddnode_getvalue(&node), down, right); - s.assigned = lddmc_ser_done+2; // starts at 0 but we want 2-based... - lddmc_ser_done++; - - lddmc_ser_insert(&lddmc_ser_set, &s); - lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, &s); - } -} - -static void -lddmc_sha2_rec(MDD mdd, SHA256_CTX *ctx) -{ - if (mdd <= lddmc_true) { - SHA256_Update(ctx, (void*)&mdd, sizeof(uint64_t)); - return; - } - - mddnode_t node = GETNODE(mdd); - if (lddmc_mark(node)) { - uint32_t val = mddnode_getvalue(node); - SHA256_Update(ctx, (void*)&val, sizeof(uint32_t)); - lddmc_sha2_rec(mddnode_getdown(node), ctx); - lddmc_sha2_rec(mddnode_getright(node), ctx); - } -} - -void -lddmc_printsha(MDD mdd) -{ - lddmc_fprintsha(stdout, mdd); -} - -void -lddmc_fprintsha(FILE *out, MDD mdd) -{ - char buf[80]; - lddmc_getsha(mdd, buf); - fprintf(out, "%s", buf); -} - -void -lddmc_getsha(MDD mdd, char *target) -{ - SHA256_CTX ctx; - SHA256_Init(&ctx); - lddmc_sha2_rec(mdd, &ctx); - if (mdd > lddmc_true) lddmc_unmark_rec(GETNODE(mdd)); - SHA256_End(&ctx, target); -} - -#ifndef NDEBUG -size_t -lddmc_test_ismdd(MDD mdd) -{ - if (mdd == lddmc_true) return 1; - if (mdd == lddmc_false) return 0; - - int first = 1; - size_t depth = 0; - - if (mdd != lddmc_false) { - mddnode_t n = GETNODE(mdd); - if (mddnode_getcopy(n)) { - mdd = mddnode_getright(n); - depth = lddmc_test_ismdd(mddnode_getdown(n)); - assert(depth >= 1); - } - } - - uint32_t value = 0; - while (mdd != lddmc_false) { - assert(llmsset_is_marked(nodes, mdd)); - - mddnode_t n = GETNODE(mdd); - uint32_t next_value = mddnode_getvalue(n); - assert(mddnode_getcopy(n) == 0); - if (first) { - first = 0; - depth = lddmc_test_ismdd(mddnode_getdown(n)); - assert(depth >= 1); - } else { - assert(value < next_value); - assert(depth == lddmc_test_ismdd(mddnode_getdown(n))); - } - - value = next_value; - mdd = mddnode_getright(n); - } - - return 1 + depth; -} -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_ldd.h b/resources/3rdparty/sylvan/src/sylvan_ldd.h deleted file mode 100644 index f104309b2..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_ldd.h +++ /dev/null @@ -1,288 +0,0 @@ -/* - * Copyright 2011-2014 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Do not include this file directly. Instead, include sylvan.h */ - -#ifndef SYLVAN_LDD_H -#define SYLVAN_LDD_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - - -typedef uint64_t MDD; // Note: low 40 bits only - -#define lddmc_false ((MDD)0) -#define lddmc_true ((MDD)1) - -/* Initialize LDD functionality */ -void sylvan_init_ldd(); - -/* Primitives */ -MDD lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq); -MDD lddmc_extendnode(MDD mdd, uint32_t value, MDD ifeq); -uint32_t lddmc_getvalue(MDD mdd); -MDD lddmc_getdown(MDD mdd); -MDD lddmc_getright(MDD mdd); -MDD lddmc_follow(MDD mdd, uint32_t value); - -/** - * Copy nodes in relations. - * A copy node represents 'read x, then write x' for every x. - * In a read-write relation, use copy nodes twice, once on read level, once on write level. - * Copy nodes are only supported by relprod, relprev and union. - */ - -/* Primitive for special 'copy node' (for relprod/relprev) */ -MDD lddmc_make_copynode(MDD ifeq, MDD ifneq); -int lddmc_iscopy(MDD mdd); -MDD lddmc_followcopy(MDD mdd); - -/* Add or remove external reference to MDD */ -MDD lddmc_ref(MDD a); -void lddmc_deref(MDD a); - -/* For use in custom mark functions */ -VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD) -#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd) - -/* Return the number of external references */ -size_t lddmc_count_refs(); - -/* Mark MDD for "notify on dead" */ -#define lddmc_notify_ondead(mdd) llmsset_notify_ondead(nodes, mdd) - -/* Sanity check - returns depth of MDD including 'true' terminal or 0 for empty set */ -#ifndef NDEBUG -size_t lddmc_test_ismdd(MDD mdd); -#endif - -/* Operations for model checking */ -TASK_DECL_2(MDD, lddmc_union, MDD, MDD); -#define lddmc_union(a, b) CALL(lddmc_union, a, b) - -TASK_DECL_2(MDD, lddmc_minus, MDD, MDD); -#define lddmc_minus(a, b) CALL(lddmc_minus, a, b) - -TASK_DECL_3(MDD, lddmc_zip, MDD, MDD, MDD*); -#define lddmc_zip(a, b, res) CALL(lddmc_zip, a, b, res) - -TASK_DECL_2(MDD, lddmc_intersect, MDD, MDD); -#define lddmc_intersect(a, b) CALL(lddmc_intersect, a, b) - -TASK_DECL_3(MDD, lddmc_match, MDD, MDD, MDD); -#define lddmc_match(a, b, proj) CALL(lddmc_match, a, b, proj) - -MDD lddmc_union_cube(MDD a, uint32_t* values, size_t count); -int lddmc_member_cube(MDD a, uint32_t* values, size_t count); -MDD lddmc_cube(uint32_t* values, size_t count); - -MDD lddmc_union_cube_copy(MDD a, uint32_t* values, int* copy, size_t count); -int lddmc_member_cube_copy(MDD a, uint32_t* values, int* copy, size_t count); -MDD lddmc_cube_copy(uint32_t* values, int* copy, size_t count); - -TASK_DECL_3(MDD, lddmc_relprod, MDD, MDD, MDD); -#define lddmc_relprod(a, b, proj) CALL(lddmc_relprod, a, b, proj) - -TASK_DECL_4(MDD, lddmc_relprod_union, MDD, MDD, MDD, MDD); -#define lddmc_relprod_union(a, b, meta, un) CALL(lddmc_relprod_union, a, b, meta, un) - -/** - * Calculate all predecessors to a in uni according to rel[proj] - * follows the same semantics as relprod - * i.e. 0 (not in rel), 1 (read+write), 2 (read), 3 (write), -1 (end; rest=0) - */ -TASK_DECL_4(MDD, lddmc_relprev, MDD, MDD, MDD, MDD); -#define lddmc_relprev(a, rel, proj, uni) CALL(lddmc_relprev, a, rel, proj, uni) - -// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) -TASK_DECL_2(MDD, lddmc_project, MDD, MDD); -#define lddmc_project(mdd, proj) CALL(lddmc_project, mdd, proj) - -TASK_DECL_3(MDD, lddmc_project_minus, MDD, MDD, MDD); -#define lddmc_project_minus(mdd, proj, avoid) CALL(lddmc_project_minus, mdd, proj, avoid) - -TASK_DECL_4(MDD, lddmc_join, MDD, MDD, MDD, MDD); -#define lddmc_join(a, b, a_proj, b_proj) CALL(lddmc_join, a, b, a_proj, b_proj) - -/* Write a DOT representation */ -void lddmc_printdot(MDD mdd); -void lddmc_fprintdot(FILE *out, MDD mdd); - -void lddmc_fprint(FILE *out, MDD mdd); -void lddmc_print(MDD mdd); - -void lddmc_printsha(MDD mdd); -void lddmc_fprintsha(FILE *out, MDD mdd); -void lddmc_getsha(MDD mdd, char *target); // at least 65 bytes... - -/** - * Calculate number of satisfying variable assignments. - * The set of variables must be >= the support of the MDD. - * (i.e. all variables in the MDD must be in variables) - * - * The cached version uses the operation cache, but is limited to 64-bit floating point numbers. - */ - -typedef double lddmc_satcount_double_t; -// if this line below gives an error, modify the above typedef until fixed ;) -typedef char __lddmc_check_float_is_8_bytes[(sizeof(lddmc_satcount_double_t) == sizeof(uint64_t))?1:-1]; - -TASK_DECL_1(lddmc_satcount_double_t, lddmc_satcount_cached, MDD); -#define lddmc_satcount_cached(mdd) CALL(lddmc_satcount_cached, mdd) - -TASK_DECL_1(long double, lddmc_satcount, MDD); -#define lddmc_satcount(mdd) CALL(lddmc_satcount, mdd) - -/** - * A callback for enumerating functions like sat_all_par, collect and match - * Example: - * TASK_3(void*, my_function, uint32_t*, values, size_t, count, void*, context) ... - * For collect, use: - * TASK_3(MDD, ...) - */ -LACE_TYPEDEF_CB(void, lddmc_enum_cb, uint32_t*, size_t, void*); -LACE_TYPEDEF_CB(MDD, lddmc_collect_cb, uint32_t*, size_t, void*); - -VOID_TASK_DECL_5(lddmc_sat_all_par, MDD, lddmc_enum_cb, void*, uint32_t*, size_t); -#define lddmc_sat_all_par(mdd, cb, context) CALL(lddmc_sat_all_par, mdd, cb, context, 0, 0) - -VOID_TASK_DECL_3(lddmc_sat_all_nopar, MDD, lddmc_enum_cb, void*); -#define lddmc_sat_all_nopar(mdd, cb, context) CALL(lddmc_sat_all_nopar, mdd, cb, context) - -TASK_DECL_5(MDD, lddmc_collect, MDD, lddmc_collect_cb, void*, uint32_t*, size_t); -#define lddmc_collect(mdd, cb, context) CALL(lddmc_collect, mdd, cb, context, 0, 0) - -VOID_TASK_DECL_5(lddmc_match_sat_par, MDD, MDD, MDD, lddmc_enum_cb, void*); -#define lddmc_match_sat_par(mdd, match, proj, cb, context) CALL(lddmc_match_sat_par, mdd, match, proj, cb, context) - -int lddmc_sat_one(MDD mdd, uint32_t *values, size_t count); -MDD lddmc_sat_one_mdd(MDD mdd); -#define lddmc_pick_cube lddmc_sat_one_mdd - -/** - * Callback functions for visiting nodes. - * lddmc_visit_seq sequentially visits nodes, down first, then right. - * lddmc_visit_par visits nodes in parallel (down || right) - */ -LACE_TYPEDEF_CB(int, lddmc_visit_pre_cb, MDD, void*); // int pre(MDD, context) -LACE_TYPEDEF_CB(void, lddmc_visit_post_cb, MDD, void*); // void post(MDD, context) -LACE_TYPEDEF_CB(void, lddmc_visit_init_context_cb, void*, void*, int); // void init_context(context, parent, is_down) - -typedef struct lddmc_visit_node_callbacks { - lddmc_visit_pre_cb lddmc_visit_pre; - lddmc_visit_post_cb lddmc_visit_post; - lddmc_visit_init_context_cb lddmc_visit_init_context; -} lddmc_visit_callbacks_t; - -VOID_TASK_DECL_4(lddmc_visit_par, MDD, lddmc_visit_callbacks_t*, size_t, void*); -#define lddmc_visit_par(mdd, cbs, ctx_size, context) CALL(lddmc_visit_par, mdd, cbs, ctx_size, context); - -VOID_TASK_DECL_4(lddmc_visit_seq, MDD, lddmc_visit_callbacks_t*, size_t, void*); -#define lddmc_visit_seq(mdd, cbs, ctx_size, context) CALL(lddmc_visit_seq, mdd, cbs, ctx_size, context); - -size_t lddmc_nodecount(MDD mdd); -void lddmc_nodecount_levels(MDD mdd, size_t *variables); - -/** - * Functional composition - * For every node at depth , call function cb (MDD -> MDD). - * and replace the node by the result of the function - */ -LACE_TYPEDEF_CB(MDD, lddmc_compose_cb, MDD, void*); -TASK_DECL_4(MDD, lddmc_compose, MDD, lddmc_compose_cb, void*, int); -#define lddmc_compose(mdd, cb, context, depth) CALL(lddmc_compose, mdd, cb, context, depth) - -/** - * SAVING: - * use lddmc_serialize_add on every MDD you want to store - * use lddmc_serialize_get to retrieve the key of every stored MDD - * use lddmc_serialize_tofile - * - * LOADING: - * use lddmc_serialize_fromfile (implies lddmc_serialize_reset) - * use lddmc_serialize_get_reversed for every key - * - * MISC: - * use lddmc_serialize_reset to free all allocated structures - * use lddmc_serialize_totext to write a textual list of tuples of all MDDs. - * format: [(,,,,),...] - * - * for the old lddmc_print functions, use lddmc_serialize_totext - */ -size_t lddmc_serialize_add(MDD mdd); -size_t lddmc_serialize_get(MDD mdd); -MDD lddmc_serialize_get_reversed(size_t value); -void lddmc_serialize_reset(); -void lddmc_serialize_totext(FILE *out); -void lddmc_serialize_tofile(FILE *out); -void lddmc_serialize_fromfile(FILE *in); - -/* Infrastructure for internal markings */ -typedef struct lddmc_refs_internal -{ - size_t r_size, r_count; - size_t s_size, s_count; - MDD *results; - Task **spawns; -} *lddmc_refs_internal_t; - -extern DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - -static inline MDD -lddmc_refs_push(MDD ldd) -{ - LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - if (lddmc_refs_key->r_count >= lddmc_refs_key->r_size) { - lddmc_refs_key->r_size *= 2; - lddmc_refs_key->results = (MDD*)realloc(lddmc_refs_key->results, sizeof(MDD) * lddmc_refs_key->r_size); - } - lddmc_refs_key->results[lddmc_refs_key->r_count++] = ldd; - return ldd; -} - -static inline void -lddmc_refs_pop(int amount) -{ - LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - lddmc_refs_key->r_count-=amount; -} - -static inline void -lddmc_refs_spawn(Task *t) -{ - LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - if (lddmc_refs_key->s_count >= lddmc_refs_key->s_size) { - lddmc_refs_key->s_size *= 2; - lddmc_refs_key->spawns = (Task**)realloc(lddmc_refs_key->spawns, sizeof(Task*) * lddmc_refs_key->s_size); - } - lddmc_refs_key->spawns[lddmc_refs_key->s_count++] = t; -} - -static inline MDD -lddmc_refs_sync(MDD result) -{ - LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); - lddmc_refs_key->s_count--; - return result; -} - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd.c b/resources/3rdparty/sylvan/src/sylvan_mtbdd.c deleted file mode 100644 index 87db4c84b..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_mtbdd.c +++ /dev/null @@ -1,2543 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* Primitives */ -int -mtbdd_isleaf(MTBDD bdd) -{ - if (bdd == mtbdd_true || bdd == mtbdd_false) return 1; - return mtbddnode_isleaf(GETNODE(bdd)); -} - -// for nodes -uint32_t -mtbdd_getvar(MTBDD node) -{ - return mtbddnode_getvariable(GETNODE(node)); -} - -MTBDD -mtbdd_getlow(MTBDD mtbdd) -{ - return node_getlow(mtbdd, GETNODE(mtbdd)); -} - -MTBDD -mtbdd_gethigh(MTBDD mtbdd) -{ - return node_gethigh(mtbdd, GETNODE(mtbdd)); -} - -// for leaves -uint32_t -mtbdd_gettype(MTBDD leaf) -{ - return mtbddnode_gettype(GETNODE(leaf)); -} - -uint64_t -mtbdd_getvalue(MTBDD leaf) -{ - return mtbddnode_getvalue(GETNODE(leaf)); -} - -// for leaf type 0 (integer) -int64_t -mtbdd_getint64(MTBDD leaf) -{ - uint64_t value = mtbdd_getvalue(leaf); - return *(int64_t*)&value; -} - -// for leaf type 1 (double) -double -mtbdd_getdouble(MTBDD leaf) -{ - uint64_t value = mtbdd_getvalue(leaf); - return *(double*)&value; -} - -/** - * Implementation of garbage collection - */ - -/* Recursively mark MDD nodes as 'in use' */ -VOID_TASK_IMPL_1(mtbdd_gc_mark_rec, MDD, mtbdd) -{ - if (mtbdd == mtbdd_true) return; - if (mtbdd == mtbdd_false) return; - - if (llmsset_mark(nodes, mtbdd&(~mtbdd_complement))) { - mtbddnode_t n = GETNODE(mtbdd); - if (!mtbddnode_isleaf(n)) { - SPAWN(mtbdd_gc_mark_rec, mtbddnode_getlow(n)); - CALL(mtbdd_gc_mark_rec, mtbddnode_gethigh(n)); - SYNC(mtbdd_gc_mark_rec); - } - } -} - -/** - * External references - */ - -refs_table_t mtbdd_refs; -refs_table_t mtbdd_protected; -static int mtbdd_protected_created = 0; - -MDD -mtbdd_ref(MDD a) -{ - if (a == mtbdd_true || a == mtbdd_false) return a; - refs_up(&mtbdd_refs, a); - return a; -} - -void -mtbdd_deref(MDD a) -{ - if (a == mtbdd_true || a == mtbdd_false) return; - refs_down(&mtbdd_refs, a); -} - -size_t -mtbdd_count_refs() -{ - return refs_count(&mtbdd_refs); -} - -void -mtbdd_protect(MTBDD *a) -{ - if (!mtbdd_protected_created) { - // In C++, sometimes mtbdd_protect is called before Sylvan is initialized. Just create a table. - protect_create(&mtbdd_protected, 4096); - mtbdd_protected_created = 1; - } - protect_up(&mtbdd_protected, (size_t)a); -} - -void -mtbdd_unprotect(MTBDD *a) -{ - protect_down(&mtbdd_protected, (size_t)a); -} - -size_t -mtbdd_count_protected() -{ - return protect_count(&mtbdd_protected); -} - -/* Called during garbage collection */ -VOID_TASK_0(mtbdd_gc_mark_external_refs) -{ - // iterate through refs hash table, mark all found - size_t count=0; - uint64_t *it = refs_iter(&mtbdd_refs, 0, mtbdd_refs.refs_size); - while (it != NULL) { - SPAWN(mtbdd_gc_mark_rec, refs_next(&mtbdd_refs, &it, mtbdd_refs.refs_size)); - count++; - } - while (count--) { - SYNC(mtbdd_gc_mark_rec); - } -} - -VOID_TASK_0(mtbdd_gc_mark_protected) -{ - // iterate through refs hash table, mark all found - size_t count=0; - uint64_t *it = protect_iter(&mtbdd_protected, 0, mtbdd_protected.refs_size); - while (it != NULL) { - BDD *to_mark = (BDD*)protect_next(&mtbdd_protected, &it, mtbdd_protected.refs_size); - SPAWN(mtbdd_gc_mark_rec, *to_mark); - count++; - } - while (count--) { - SYNC(mtbdd_gc_mark_rec); - } -} - -/* Infrastructure for internal markings */ -DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - -VOID_TASK_0(mtbdd_refs_mark_task) -{ - LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - size_t i, j=0; - for (i=0; ir_count; i++) { - if (j >= 40) { - while (j--) SYNC(mtbdd_gc_mark_rec); - j=0; - } - SPAWN(mtbdd_gc_mark_rec, mtbdd_refs_key->results[i]); - j++; - } - for (i=0; is_count; i++) { - Task *t = mtbdd_refs_key->spawns[i]; - if (!TASK_IS_STOLEN(t)) break; - if (TASK_IS_COMPLETED(t)) { - if (j >= 40) { - while (j--) SYNC(mtbdd_gc_mark_rec); - j=0; - } - SPAWN(mtbdd_gc_mark_rec, *(BDD*)TASK_RESULT(t)); - j++; - } - } - while (j--) SYNC(mtbdd_gc_mark_rec); -} - -VOID_TASK_0(mtbdd_refs_mark) -{ - TOGETHER(mtbdd_refs_mark_task); -} - -VOID_TASK_0(mtbdd_refs_init_task) -{ - mtbdd_refs_internal_t s = (mtbdd_refs_internal_t)malloc(sizeof(struct mtbdd_refs_internal)); - s->r_size = 128; - s->r_count = 0; - s->s_size = 128; - s->s_count = 0; - s->results = (BDD*)malloc(sizeof(BDD) * 128); - s->spawns = (Task**)malloc(sizeof(Task*) * 128); - SET_THREAD_LOCAL(mtbdd_refs_key, s); -} - -VOID_TASK_0(mtbdd_refs_init) -{ - INIT_THREAD_LOCAL(mtbdd_refs_key); - TOGETHER(mtbdd_refs_init_task); - sylvan_gc_add_mark(10, TASK(mtbdd_refs_mark)); -} - -/** - * Handling of custom leaves "registry" - */ - -typedef struct -{ - mtbdd_hash_cb hash_cb; - mtbdd_equals_cb equals_cb; - mtbdd_create_cb create_cb; - mtbdd_destroy_cb destroy_cb; -} customleaf_t; - -static customleaf_t *cl_registry; -static size_t cl_registry_count; - -static void -_mtbdd_create_cb(uint64_t *a, uint64_t *b) -{ - // for leaf - if ((*a & 0x4000000000000000) == 0) return; // huh? - uint32_t type = *a & 0xffffffff; - if (type >= cl_registry_count) return; // not in registry - customleaf_t *c = cl_registry + type; - if (c->create_cb == NULL) return; // not in registry - c->create_cb(b); -} - -static void -_mtbdd_destroy_cb(uint64_t a, uint64_t b) -{ - // for leaf - if ((a & 0x4000000000000000) == 0) return; // huh? - uint32_t type = a & 0xffffffff; - if (type >= cl_registry_count) return; // not in registry - customleaf_t *c = cl_registry + type; - if (c->destroy_cb == NULL) return; // not in registry - c->destroy_cb(b); -} - -static uint64_t -_mtbdd_hash_cb(uint64_t a, uint64_t b, uint64_t seed) -{ - // for leaf - if ((a & 0x4000000000000000) == 0) return llmsset_hash(a, b, seed); - uint32_t type = a & 0xffffffff; - if (type >= cl_registry_count) return llmsset_hash(a, b, seed); - customleaf_t *c = cl_registry + type; - if (c->hash_cb == NULL) return llmsset_hash(a, b, seed); - return c->hash_cb(b, seed ^ a); -} - -static int -_mtbdd_equals_cb(uint64_t a, uint64_t b, uint64_t aa, uint64_t bb) -{ - // for leaf - if (a != aa) return 0; - if ((a & 0x4000000000000000) == 0) return b == bb ? 1 : 0; - if ((aa & 0x4000000000000000) == 0) return b == bb ? 1 : 0; - uint32_t type = a & 0xffffffff; - if (type >= cl_registry_count) return b == bb ? 1 : 0; - customleaf_t *c = cl_registry + type; - if (c->equals_cb == NULL) return b == bb ? 1 : 0; - return c->equals_cb(b, bb); -} - -uint32_t -mtbdd_register_custom_leaf(mtbdd_hash_cb hash_cb, mtbdd_equals_cb equals_cb, mtbdd_create_cb create_cb, mtbdd_destroy_cb destroy_cb) -{ - uint32_t type = cl_registry_count; - if (type == 0) type = 3; - if (cl_registry == NULL) { - cl_registry = (customleaf_t *)calloc(sizeof(customleaf_t), (type+1)); - cl_registry_count = type+1; - llmsset_set_custom(nodes, _mtbdd_hash_cb, _mtbdd_equals_cb, _mtbdd_create_cb, _mtbdd_destroy_cb); - } else if (cl_registry_count <= type) { - cl_registry = (customleaf_t *)realloc(cl_registry, sizeof(customleaf_t) * (type+1)); - memset(cl_registry + cl_registry_count, 0, sizeof(customleaf_t) * (type+1-cl_registry_count)); - cl_registry_count = type+1; - } - customleaf_t *c = cl_registry + type; - c->hash_cb = hash_cb; - c->equals_cb = equals_cb; - c->create_cb = create_cb; - c->destroy_cb = destroy_cb; - return type; -} - -/** - * Initialize and quit functions - */ - -static void -mtbdd_quit() -{ - refs_free(&mtbdd_refs); - if (mtbdd_protected_created) { - protect_free(&mtbdd_protected); - mtbdd_protected_created = 0; - } - if (cl_registry != NULL) { - free(cl_registry); - cl_registry = NULL; - cl_registry_count = 0; - } -} - -void -sylvan_init_mtbdd() -{ - sylvan_register_quit(mtbdd_quit); - sylvan_gc_add_mark(10, TASK(mtbdd_gc_mark_external_refs)); - sylvan_gc_add_mark(10, TASK(mtbdd_gc_mark_protected)); - - // Sanity check - if (sizeof(struct mtbddnode) != 16) { - fprintf(stderr, "Invalid size of mtbdd nodes: %ld\n", sizeof(struct mtbddnode)); - exit(1); - } - - refs_create(&mtbdd_refs, 1024); - if (!mtbdd_protected_created) { - protect_create(&mtbdd_protected, 4096); - mtbdd_protected_created = 1; - } - - LACE_ME; - CALL(mtbdd_refs_init); - - cl_registry = NULL; - cl_registry_count = 0; -} - -/** - * Primitives - */ -MTBDD -mtbdd_makeleaf(uint32_t type, uint64_t value) -{ - struct mtbddnode n; - mtbddnode_makeleaf(&n, type, value); - - int custom = type < cl_registry_count && cl_registry[type].hash_cb != NULL ? 1 : 0; - - int created; - uint64_t index = custom ? llmsset_lookupc(nodes, n.a, n.b, &created) : llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - LACE_ME; - - sylvan_gc(); - - index = custom ? llmsset_lookupc(nodes, n.a, n.b, &created) : llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); - exit(1); - } - } - - return (MTBDD)index; -} - -MTBDD -mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high) -{ - if (low == high) return low; - - // Normalization to keep canonicity - // low will have no mark - - struct mtbddnode n; - int mark, created; - - if (MTBDD_HASMARK(low)) { - mark = 1; - low = MTBDD_TOGGLEMARK(low); - high = MTBDD_TOGGLEMARK(high); - } else { - mark = 0; - } - - mtbddnode_makenode(&n, var, low, high); - - MTBDD result; - uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - LACE_ME; - - mtbdd_refs_push(low); - mtbdd_refs_push(high); - sylvan_gc(); - mtbdd_refs_pop(2); - - index = llmsset_lookup(nodes, n.a, n.b, &created); - if (index == 0) { - fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); - exit(1); - } - } - - result = index; - return mark ? result | mtbdd_complement : result; -} - -/* Operations */ - -/** - * Calculate greatest common divisor - * Source: http://lemire.me/blog/archives/2013/12/26/fastest-way-to-compute-the-greatest-common-divisor/ - */ -uint32_t -gcd(uint32_t u, uint32_t v) -{ - int shift; - if (u == 0) return v; - if (v == 0) return u; - shift = __builtin_ctz(u | v); - u >>= __builtin_ctz(u); - do { - v >>= __builtin_ctz(v); - if (u > v) { - unsigned int t = v; - v = u; - u = t; - } - v = v - u; - } while (v != 0); - return u << shift; -} - -/** - * Create leaves of unsigned/signed integers and doubles - */ - -MTBDD -mtbdd_int64(int64_t value) -{ - return mtbdd_makeleaf(0, *(uint64_t*)&value); -} - -MTBDD -mtbdd_double(double value) -{ - return mtbdd_makeleaf(1, *(uint64_t*)&value); -} - -MTBDD -mtbdd_fraction(int64_t nom, uint64_t denom) -{ - if (nom == 0) return mtbdd_makeleaf(2, 1); - uint32_t c = gcd(nom < 0 ? -nom : nom, denom); - nom /= c; - denom /= c; - if (nom > 2147483647 || nom < -2147483647 || denom > 4294967295) fprintf(stderr, "mtbdd_fraction: fraction overflow\n"); - return mtbdd_makeleaf(2, (nom<<32)|denom); -} - -/** - * Create the cube of variables in arr. - */ -MTBDD -mtbdd_fromarray(uint32_t* arr, size_t length) -{ - if (length == 0) return mtbdd_true; - else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true); - else return mtbdd_makenode(*arr, mtbdd_false, mtbdd_fromarray(arr+1, length-1)); -} - -/** - * Create a MTBDD cube representing the conjunction of variables in their positive or negative - * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). - * Use cube[idx]==3 for "s=s'" in interleaved variables (matches with next variable) - * is the cube of variables - */ -MTBDD -mtbdd_cube(MTBDD variables, uint8_t *cube, MTBDD terminal) -{ - if (variables == mtbdd_true) return terminal; - mtbddnode_t n = GETNODE(variables); - - BDD result; - switch (*cube) { - case 0: - result = mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); - result = mtbdd_makenode(mtbddnode_getvariable(n), result, mtbdd_false); - return result; - case 1: - result = mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); - result = mtbdd_makenode(mtbddnode_getvariable(n), mtbdd_false, result); - return result; - case 2: - return mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); - case 3: - { - MTBDD variables2 = node_gethigh(variables, n); - mtbddnode_t n2 = GETNODE(variables2); - uint32_t var2 = mtbddnode_getvariable(n2); - result = mtbdd_cube(node_gethigh(variables2, n2), cube+2, terminal); - BDD low = mtbdd_makenode(var2, result, mtbdd_false); - mtbdd_refs_push(low); - BDD high = mtbdd_makenode(var2, mtbdd_false, result); - mtbdd_refs_pop(1); - result = mtbdd_makenode(mtbddnode_getvariable(n), low, high); - return result; - } - default: - return mtbdd_false; // ? - } -} - -/** - * Same as mtbdd_cube, but also performs "or" with existing MTBDD, - * effectively adding an item to the set - */ -TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube, MTBDD, terminal) -{ - /* Terminal cases */ - if (mtbdd == terminal) return terminal; - if (mtbdd == mtbdd_false) return mtbdd_cube(vars, cube, terminal); - if (vars == mtbdd_true) return terminal; - - sylvan_gc_test(); - - mtbddnode_t nv = GETNODE(vars); - uint32_t v = mtbddnode_getvariable(nv); - - mtbddnode_t na = GETNODE(mtbdd); - uint32_t va = mtbddnode_getvariable(na); - - if (va < v) { - MTBDD low = node_getlow(mtbdd, na); - MTBDD high = node_gethigh(mtbdd, na); - SPAWN(mtbdd_union_cube, high, vars, cube, terminal); - BDD new_low = mtbdd_union_cube(low, vars, cube, terminal); - mtbdd_refs_push(new_low); - BDD new_high = SYNC(mtbdd_union_cube); - mtbdd_refs_pop(1); - if (new_low != low || new_high != high) return mtbdd_makenode(va, new_low, new_high); - else return mtbdd; - } else if (va == v) { - MTBDD low = node_getlow(mtbdd, na); - MTBDD high = node_gethigh(mtbdd, na); - switch (*cube) { - case 0: - { - MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal); - if (new_low != low) return mtbdd_makenode(v, new_low, high); - else return mtbdd; - } - case 1: - { - MTBDD new_high = mtbdd_union_cube(high, node_gethigh(vars, nv), cube+1, terminal); - if (new_high != high) return mtbdd_makenode(v, low, new_high); - return mtbdd; - } - case 2: - { - SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal); - MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal); - mtbdd_refs_push(new_low); - MTBDD new_high = SYNC(mtbdd_union_cube); - mtbdd_refs_pop(1); - if (new_low != low || new_high != high) return mtbdd_makenode(v, new_low, new_high); - return mtbdd; - } - case 3: - { - return mtbdd_false; // currently not implemented - } - default: - return mtbdd_false; - } - } else /* va > v */ { - switch (*cube) { - case 0: - { - MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); - return mtbdd_makenode(v, new_low, mtbdd_false); - } - case 1: - { - MTBDD new_high = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); - return mtbdd_makenode(v, mtbdd_false, new_high); - } - case 2: - { - SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal); - MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); - mtbdd_refs_push(new_low); - MTBDD new_high = SYNC(mtbdd_union_cube); - mtbdd_refs_pop(1); - return mtbdd_makenode(v, new_low, new_high); - } - case 3: - { - return mtbdd_false; // currently not implemented - } - default: - return mtbdd_false; - } - } -} - -/** - * Apply a binary operation to and . - */ -TASK_IMPL_3(MTBDD, mtbdd_apply, MTBDD, a, MTBDD, b, mtbdd_apply_op, op) -{ - /* Check terminal case */ - MTBDD result = WRAP(op, &a, &b); - if (result != mtbdd_invalid) return result; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - if (cache_get3(CACHE_MTBDD_APPLY, a, b, (size_t)op, &result)) return result; - - /* Get top variable */ - int la = mtbdd_isleaf(a); - int lb = mtbdd_isleaf(b); - mtbddnode_t na, nb; - uint32_t va, vb; - if (!la) { - na = GETNODE(a); - va = mtbddnode_getvariable(na); - } else { - na = 0; - va = 0xffffffff; - } - if (!lb) { - nb = GETNODE(b); - vb = mtbddnode_getvariable(nb); - } else { - nb = 0; - vb = 0xffffffff; - } - uint32_t v = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - if (!la && va == v) { - alow = node_getlow(a, na); - ahigh = node_gethigh(a, na); - } else { - alow = a; - ahigh = a; - } - if (!lb && vb == v) { - blow = node_getlow(b, nb); - bhigh = node_gethigh(b, nb); - } else { - blow = b; - bhigh = b; - } - - /* Recursive */ - mtbdd_refs_spawn(SPAWN(mtbdd_apply, ahigh, bhigh, op)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_apply, alow, blow, op)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_apply)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(v, low, high); - - /* Store in cache */ - cache_put3(CACHE_MTBDD_APPLY, a, b, (size_t)op, result); - return result; -} - -/** - * Apply a binary operation to and with parameter

    - */ -TASK_IMPL_5(MTBDD, mtbdd_applyp, MTBDD, a, MTBDD, b, size_t, p, mtbdd_applyp_op, op, uint64_t, opid) -{ - /* Check terminal case */ - MTBDD result = WRAP(op, &a, &b, p); - if (result != mtbdd_invalid) return result; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - if (cache_get3(opid, a, b, p, &result)) return result; - - /* Get top variable */ - int la = mtbdd_isleaf(a); - int lb = mtbdd_isleaf(b); - mtbddnode_t na, nb; - uint32_t va, vb; - if (!la) { - na = GETNODE(a); - va = mtbddnode_getvariable(na); - } else { - na = 0; - va = 0xffffffff; - } - if (!lb) { - nb = GETNODE(b); - vb = mtbddnode_getvariable(nb); - } else { - nb = 0; - vb = 0xffffffff; - } - uint32_t v = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - if (!la && va == v) { - alow = node_getlow(a, na); - ahigh = node_gethigh(a, na); - } else { - alow = a; - ahigh = a; - } - if (!lb && vb == v) { - blow = node_getlow(b, nb); - bhigh = node_gethigh(b, nb); - } else { - blow = b; - bhigh = b; - } - - /* Recursive */ - mtbdd_refs_spawn(SPAWN(mtbdd_applyp, ahigh, bhigh, p, op, opid)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_applyp, alow, blow, p, op, opid)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_applyp)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(v, low, high); - - /* Store in cache */ - cache_put3(opid, a, b, p, result); - return result; -} - -/** - * Apply a unary operation to

    . - */ -TASK_IMPL_3(MTBDD, mtbdd_uapply, MTBDD, dd, mtbdd_uapply_op, op, size_t, param) -{ - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, &result)) return result; - - /* Check terminal case */ - result = WRAP(op, dd, param); - if (result != mtbdd_invalid) { - /* Store in cache */ - cache_put3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, result); - return result; - } - - /* Get cofactors */ - mtbddnode_t ndd = GETNODE(dd); - MTBDD ddlow = node_getlow(dd, ndd); - MTBDD ddhigh = node_gethigh(dd, ndd); - - /* Recursive */ - mtbdd_refs_spawn(SPAWN(mtbdd_uapply, ddhigh, op, param)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_uapply, ddlow, op, param)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_uapply)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(mtbddnode_getvariable(ndd), low, high); - - /* Store in cache */ - cache_put3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, result); - return result; -} - -TASK_2(MTBDD, mtbdd_uop_times_uint, MTBDD, a, size_t, k) -{ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - int64_t v = mtbdd_getint64(a); - return mtbdd_int64(v*k); - } else if (mtbddnode_gettype(na) == 1) { - double d = mtbdd_getdouble(a); - return mtbdd_double(d*k); - } else if (mtbddnode_gettype(na) == 2) { - uint64_t v = mtbddnode_getvalue(na); - int64_t n = (int32_t)(v>>32); - uint32_t d = v; - uint32_t c = gcd(d, (uint32_t)k); - return mtbdd_fraction(n*(k/c), d/c); - } - } - - return mtbdd_invalid; -} - -TASK_2(MTBDD, mtbdd_uop_pow_uint, MTBDD, a, size_t, k) -{ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - int64_t v = mtbdd_getint64(a); - return mtbdd_int64(pow(v, k)); - } else if (mtbddnode_gettype(na) == 1) { - double d = mtbdd_getdouble(a); - return mtbdd_double(pow(d, k)); - } else if (mtbddnode_gettype(na) == 2) { - uint64_t v = mtbddnode_getvalue(na); - return mtbdd_fraction(pow((int32_t)(v>>32), k), (uint32_t)v); - } - } - - return mtbdd_invalid; -} - -TASK_IMPL_3(MTBDD, mtbdd_abstract_op_plus, MTBDD, a, MTBDD, b, int, k) -{ - if (k==0) { - return mtbdd_apply(a, b, TASK(mtbdd_op_plus)); - } else { - uint64_t factor = 1ULL< from using the operation - */ -TASK_IMPL_3(MTBDD, mtbdd_abstract, MTBDD, a, MTBDD, v, mtbdd_abstract_op, op) -{ - /* Check terminal case */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - if (v == mtbdd_true) return a; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* a != constant, v != constant */ - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - /* Count number of variables */ - uint64_t k = 0; - while (v != mtbdd_true) { - k++; - v = node_gethigh(v, GETNODE(v)); - } - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, &result)) return result; - - /* Compute result */ - result = WRAP(op, a, a, k); - - /* Store in cache */ - cache_put3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, result); - return result; - } - - /* Possibly skip k variables */ - mtbddnode_t nv = GETNODE(v); - uint32_t var_a = mtbddnode_getvariable(na); - uint32_t var_v = mtbddnode_getvariable(nv); - uint64_t k = 0; - while (var_v < var_a) { - k++; - v = node_gethigh(v, nv); - if (v == mtbdd_true) break; - nv = GETNODE(v); - var_v = mtbddnode_getvariable(nv); - } - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, &result)) return result; - - /* Recursive */ - if (v == mtbdd_true) { - result = a; - } else if (var_a < var_v) { - mtbdd_refs_spawn(SPAWN(mtbdd_abstract, node_gethigh(a, na), v, op)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_abstract, node_getlow(a, na), v, op)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_abstract)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(var_a, low, high); - } else /* var_a == var_v */ { - mtbdd_refs_spawn(SPAWN(mtbdd_abstract, node_gethigh(a, na), node_gethigh(v, nv), op)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_abstract, node_getlow(a, na), node_gethigh(v, nv), op)); - MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_abstract))); - result = WRAP(op, low, high, 0); - mtbdd_refs_pop(2); - } - - if (k) { - mtbdd_refs_push(result); - result = WRAP(op, result, result, k); - mtbdd_refs_pop(1); - } - - /* Store in cache */ - cache_put3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, result); - return result; -} - -/** - * Binary operation Plus (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". - */ -TASK_IMPL_2(MTBDD, mtbdd_op_plus, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false) return b; - if (b == mtbdd_false) return a; - - // Handle Boolean MTBDDs: interpret as Or - if (a == mtbdd_true) return mtbdd_true; - if (b == mtbdd_true) return mtbdd_true; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // both integer - return mtbdd_int64(*(int64_t*)(&val_a) + *(int64_t*)(&val_b)); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - return mtbdd_double(*(double*)(&val_a) + *(double*)(&val_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - int64_t nom_a = (int32_t)(val_a>>32); - int64_t nom_b = (int32_t)(val_b>>32); - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // common cases - if (nom_a == 0) return b; - if (nom_b == 0) return a; - // equalize denominators - uint32_t c = gcd(denom_a, denom_b); - nom_a *= denom_b/c; - nom_b *= denom_a/c; - denom_a *= denom_b/c; - // add - return mtbdd_fraction(nom_a + nom_b, denom_a); - } - } - - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Binary operation Minus (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". - */ -TASK_IMPL_2(MTBDD, mtbdd_op_minus, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false) return mtbdd_negate(b); - if (b == mtbdd_false) return a; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // both integer - return mtbdd_int64(*(int64_t*)(&val_a) - *(int64_t*)(&val_b)); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - return mtbdd_double(*(double*)(&val_a) - *(double*)(&val_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - int64_t nom_a = (int32_t)(val_a>>32); - int64_t nom_b = (int32_t)(val_b>>32); - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // common cases - if (nom_b == 0) return a; - // equalize denominators - uint32_t c = gcd(denom_a, denom_b); - nom_a *= denom_b/c; - nom_b *= denom_a/c; - denom_a *= denom_b/c; - // subtract - return mtbdd_fraction(nom_a - nom_b, denom_a); - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Times (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_times, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; - - // Handle Boolean MTBDDs: interpret as And - if (a == mtbdd_true) return b; - if (b == mtbdd_true) return a; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // both integer - return mtbdd_int64(*(int64_t*)(&val_a) * *(int64_t*)(&val_b)); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - return mtbdd_double(*(double*)(&val_a) * *(double*)(&val_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - int64_t nom_a = (int32_t)(val_a>>32); - int64_t nom_b = (int32_t)(val_b>>32); - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // multiply! - uint32_t c = gcd(nom_b < 0 ? -nom_b : nom_b, denom_a); - uint32_t d = gcd(nom_a < 0 ? -nom_a : nom_a, denom_b); - nom_a /= d; - denom_a /= c; - nom_a *= (nom_b/c); - denom_a *= (denom_b/d); - return mtbdd_fraction(nom_a, denom_a); - } - } - - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Binary operation Minimum (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_IMPL_2(MTBDD, mtbdd_op_min, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_true) return b; - if (b == mtbdd_true) return a; - if (a == b) return a; - - // Special case where "false" indicates a partial function - if (a == mtbdd_false && b != mtbdd_false && mtbddnode_isleaf(GETNODE(b))) return b; - if (b == mtbdd_false && a != mtbdd_false && mtbddnode_isleaf(GETNODE(a))) return a; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // both integer - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - return va < vb ? a : b; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double va = *(double*)&val_a; - double vb = *(double*)&val_b; - return va < vb ? a : b; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - int64_t nom_a = (int32_t)(val_a>>32); - int64_t nom_b = (int32_t)(val_b>>32); - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // equalize denominators - uint32_t c = gcd(denom_a, denom_b); - nom_a *= denom_b/c; - nom_b *= denom_a/c; - // compute lowest - return nom_a < nom_b ? a : b; - } - } - - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Binary operation Maximum (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_IMPL_2(MTBDD, mtbdd_op_max, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_true) return a; - if (b == mtbdd_true) return b; - if (a == mtbdd_false) return b; - if (b == mtbdd_false) return a; - if (a == b) return a; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // both integer - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - return va > vb ? a : b; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - return vval_a > vval_b ? a : b; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - int64_t nom_a = (int32_t)(val_a>>32); - int64_t nom_b = (int32_t)(val_b>>32); - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // equalize denominators - uint32_t c = gcd(denom_a, denom_b); - nom_a *= denom_b/c; - nom_b *= denom_a/c; - // compute highest - return nom_a > nom_b ? a : b; - } - } - - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -TASK_IMPL_2(MTBDD, mtbdd_op_negate, MTBDD, a, size_t, k) -{ - // if a is false, then it is a partial function. Keep partial! - if (a == mtbdd_false) return mtbdd_false; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - int64_t v = mtbdd_getint64(a); - return mtbdd_int64(-v); - } else if (mtbddnode_gettype(na) == 1) { - double d = mtbdd_getdouble(a); - return mtbdd_double(-d); - } else if (mtbddnode_gettype(na) == 2) { - uint64_t v = mtbddnode_getvalue(na); - return mtbdd_fraction(-(int32_t)(v>>32), (uint32_t)v); - } - } - - return mtbdd_invalid; - (void)k; // unused variable -} - -/** - * Compute IF THEN ELSE . - * must be a Boolean MTBDD (or standard BDD). - */ -TASK_IMPL_3(MTBDD, mtbdd_ite, MTBDD, f, MTBDD, g, MTBDD, h) -{ - /* Terminal cases */ - if (f == mtbdd_true) return g; - if (f == mtbdd_false) return h; - if (g == h) return g; - if (g == mtbdd_true && h == mtbdd_false) return f; - if (h == mtbdd_true && g == mtbdd_false) return MTBDD_TOGGLEMARK(f); - - // If all MTBDD's are Boolean, then there could be further optimizations (see sylvan_bdd.c) - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_ITE, f, g, h, &result)) return result; - - /* Get top variable */ - int lg = mtbdd_isleaf(g); - int lh = mtbdd_isleaf(h); - mtbddnode_t nf = GETNODE(f); - mtbddnode_t ng = lg ? 0 : GETNODE(g); - mtbddnode_t nh = lh ? 0 : GETNODE(h); - uint32_t vf = mtbddnode_getvariable(nf); - uint32_t vg = lg ? 0 : mtbddnode_getvariable(ng); - uint32_t vh = lh ? 0 : mtbddnode_getvariable(nh); - uint32_t v = vf; - if (!lg && vg < v) v = vg; - if (!lh && vh < v) v = vh; - - /* Get cofactors */ - MTBDD flow, fhigh, glow, ghigh, hlow, hhigh; - flow = (vf == v) ? node_getlow(f, nf) : f; - fhigh = (vf == v) ? node_gethigh(f, nf) : f; - glow = (!lg && vg == v) ? node_getlow(g, ng) : g; - ghigh = (!lg && vg == v) ? node_gethigh(g, ng) : g; - hlow = (!lh && vh == v) ? node_getlow(h, nh) : h; - hhigh = (!lh && vh == v) ? node_gethigh(h, nh) : h; - - /* Recursive calls */ - mtbdd_refs_spawn(SPAWN(mtbdd_ite, fhigh, ghigh, hhigh)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_ite, flow, glow, hlow)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_ite)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(v, low, high); - - /* Store in cache */ - cache_put3(CACHE_MTBDD_ITE, f, g, h, result); - return result; -} - -/** - * Monad that converts double/fraction to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; - */ -TASK_IMPL_2(MTBDD, mtbdd_op_threshold_double, MTBDD, a, size_t, svalue) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_invalid; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - double value = *(double*)&svalue; - if (mtbddnode_gettype(na) == 1) { - return mtbdd_getdouble(a) >= value ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2) { - double d = (double)mtbdd_getnumer(a); - d /= mtbdd_getdenom(a); - return d >= value ? mtbdd_true : mtbdd_false; - } - } - - return mtbdd_invalid; -} - -/** - * Monad that converts double/fraction to a Boolean BDD, translate terminals > value to 1 and to 0 otherwise; - */ -TASK_IMPL_2(MTBDD, mtbdd_op_strict_threshold_double, MTBDD, a, size_t, svalue) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_invalid; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - double value = *(double*)&svalue; - if (mtbddnode_gettype(na) == 1) { - return mtbdd_getdouble(a) > value ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2) { - double d = (double)mtbdd_getnumer(a); - d /= mtbdd_getdenom(a); - return d > value ? mtbdd_true : mtbdd_false; - } - } - - return mtbdd_invalid; -} - -TASK_IMPL_2(MTBDD, mtbdd_threshold_double, MTBDD, dd, double, d) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_threshold_double), *(size_t*)&d); -} - -TASK_IMPL_2(MTBDD, mtbdd_strict_threshold_double, MTBDD, dd, double, d) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_strict_threshold_double), *(size_t*)&d); -} - -/** - * Compare two Double MTBDDs, returns Boolean True if they are equal within some value epsilon - */ -TASK_4(MTBDD, mtbdd_equal_norm_d2, MTBDD, a, MTBDD, b, size_t, svalue, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_true; - if (a == mtbdd_false) return mtbdd_false; - if (b == mtbdd_false) return mtbdd_false; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - // assume Double MTBDD - double va = mtbdd_getdouble(a); - double vb = mtbdd_getdouble(b); - va -= vb; - if (va < 0) va = -va; - return (va < *(double*)&svalue) ? mtbdd_true : mtbdd_false; - } - - if (b < a) { - MTBDD t = a; - a = b; - b = t; - } - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_EQUAL_NORM, a, b, svalue, &result)) return result; - - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_equal_norm_d2, ahigh, bhigh, svalue, shortcircuit); - result = CALL(mtbdd_equal_norm_d2, alow, blow, svalue, shortcircuit); - if (result == mtbdd_false) *shortcircuit = 1; - if (result != SYNC(mtbdd_equal_norm_d2)) result = mtbdd_false; - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache (only if we are not short circuiting) */ - if (!*shortcircuit) cache_put3(CACHE_MTBDD_EQUAL_NORM, a, b, svalue, result); - return result; -} - -TASK_IMPL_3(MTBDD, mtbdd_equal_norm_d, MTBDD, a, MTBDD, b, double, d) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_equal_norm_d2, a, b, *(size_t*)&d, &shortcircuit); -} - -/** - * Compare two Double MTBDDs, returns Boolean True if they are equal within some value epsilon - * This version computes the relative difference vs the value in a. - */ -TASK_4(MTBDD, mtbdd_equal_norm_rel_d2, MTBDD, a, MTBDD, b, size_t, svalue, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_true; - if (a == mtbdd_false) return mtbdd_false; - if (b == mtbdd_false) return mtbdd_false; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - // assume Double MTBDD - double va = mtbdd_getdouble(a); - double vb = mtbdd_getdouble(b); - if (va == 0.0 || va == -0.0) return mtbdd_false; - va = (va - vb) / va; - if (va < 0) va = -va; - return (va < *(double*)&svalue) ? mtbdd_true : mtbdd_false; - } - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL, a, b, svalue, &result)) return result; - - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_equal_norm_rel_d2, ahigh, bhigh, svalue, shortcircuit); - result = CALL(mtbdd_equal_norm_rel_d2, alow, blow, svalue, shortcircuit); - if (result == mtbdd_false) *shortcircuit = 1; - if (result != SYNC(mtbdd_equal_norm_rel_d2)) result = mtbdd_false; - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache (only if we are not short circuiting) */ - if (!*shortcircuit) cache_put3(CACHE_MTBDD_EQUAL_NORM_REL, a, b, svalue, result); - return result; -} - -TASK_IMPL_3(MTBDD, mtbdd_equal_norm_rel_d, MTBDD, a, MTBDD, b, double, d) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_equal_norm_rel_d2, a, b, *(size_t*)&d, &shortcircuit); -} - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) <= b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_3(MTBDD, mtbdd_leq_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_true; - - /* For partial functions, just return true */ - if (a == mtbdd_false) return mtbdd_true; - if (b == mtbdd_false) return mtbdd_true; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_LEQ, a, b, 0, &result)) return result; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - uint64_t va = mtbddnode_getvalue(na); - uint64_t vb = mtbddnode_getvalue(nb); - - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // type 0 = integer - result = *(int64_t*)(&va) <= *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // type 1 = double - double vva = *(double*)&va; - double vvb = *(double*)&vb; - result = vva <= vvb ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // type 2 = fraction - int64_t nom_a = (int32_t)(va>>32); - int64_t nom_b = (int32_t)(vb>>32); - uint64_t da = va&0xffffffff; - uint64_t db = vb&0xffffffff; - // equalize denominators - uint32_t c = gcd(da, db); - nom_a *= db/c; - nom_b *= da/c; - result = nom_a <= nom_b ? mtbdd_true : mtbdd_false; - } - } else { - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_leq_rec, ahigh, bhigh, shortcircuit); - result = CALL(mtbdd_leq_rec, alow, blow, shortcircuit); - if (result != SYNC(mtbdd_leq_rec)) result = mtbdd_false; - } - - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache */ - cache_put3(CACHE_MTBDD_LEQ, a, b, 0, result); - return result; -} - -TASK_IMPL_2(MTBDD, mtbdd_leq, MTBDD, a, MTBDD, b) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_leq_rec, a, b, &shortcircuit); -} - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) < b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_3(MTBDD, mtbdd_less_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_false; - - /* For partial functions, just return true */ - if (a == mtbdd_false) return mtbdd_true; - if (b == mtbdd_false) return mtbdd_true; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_LESS, a, b, 0, &result)) return result; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - uint64_t va = mtbddnode_getvalue(na); - uint64_t vb = mtbddnode_getvalue(nb); - - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // type 0 = integer - result = *(int64_t*)(&va) < *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // type 1 = double - double vva = *(double*)&va; - double vvb = *(double*)&vb; - result = vva < vvb ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // type 2 = fraction - int64_t nom_a = (int32_t)(va>>32); - int64_t nom_b = (int32_t)(vb>>32); - uint64_t da = va&0xffffffff; - uint64_t db = vb&0xffffffff; - // equalize denominators - uint32_t c = gcd(da, db); - nom_a *= db/c; - nom_b *= da/c; - result = nom_a < nom_b ? mtbdd_true : mtbdd_false; - } - } else { - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_less_rec, ahigh, bhigh, shortcircuit); - result = CALL(mtbdd_less_rec, alow, blow, shortcircuit); - if (result != SYNC(mtbdd_less_rec)) result = mtbdd_false; - } - - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache */ - cache_put3(CACHE_MTBDD_LESS, a, b, 0, result); - return result; -} - -TASK_IMPL_2(MTBDD, mtbdd_less, MTBDD, a, MTBDD, b) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_less_rec, a, b, &shortcircuit); -} - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) >= b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_3(MTBDD, mtbdd_geq_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_true; - - /* For partial functions, just return true */ - if (a == mtbdd_false) return mtbdd_true; - if (b == mtbdd_false) return mtbdd_true; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_GEQ, a, b, 0, &result)) return result; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - uint64_t va = mtbddnode_getvalue(na); - uint64_t vb = mtbddnode_getvalue(nb); - - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // type 0 = integer - result = *(int64_t*)(&va) >= *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // type 1 = double - double vva = *(double*)&va; - double vvb = *(double*)&vb; - result = vva >= vvb ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // type 2 = fraction - int64_t nom_a = (int32_t)(va>>32); - int64_t nom_b = (int32_t)(vb>>32); - uint64_t da = va&0xffffffff; - uint64_t db = vb&0xffffffff; - // equalize denominators - uint32_t c = gcd(da, db); - nom_a *= db/c; - nom_b *= da/c; - result = nom_a >= nom_b ? mtbdd_true : mtbdd_false; - } - } else { - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_geq_rec, ahigh, bhigh, shortcircuit); - result = CALL(mtbdd_geq_rec, alow, blow, shortcircuit); - if (result != SYNC(mtbdd_geq_rec)) result = mtbdd_false; - } - - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache */ - cache_put3(CACHE_MTBDD_GEQ, a, b, 0, result); - return result; -} - -TASK_IMPL_2(MTBDD, mtbdd_geq, MTBDD, a, MTBDD, b) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_geq_rec, a, b, &shortcircuit); -} - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) > b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_3(MTBDD, mtbdd_greater_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) -{ - /* Check short circuit */ - if (*shortcircuit) return mtbdd_false; - - /* Check terminal case */ - if (a == b) return mtbdd_false; - - /* For partial functions, just return true */ - if (a == mtbdd_false) return mtbdd_true; - if (b == mtbdd_false) return mtbdd_true; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_GREATER, a, b, 0, &result)) return result; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - int la = mtbddnode_isleaf(na); - int lb = mtbddnode_isleaf(nb); - - if (la && lb) { - uint64_t va = mtbddnode_getvalue(na); - uint64_t vb = mtbddnode_getvalue(nb); - - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - // type 0 = integer - result = *(int64_t*)(&va) > *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // type 1 = double - double vva = *(double*)&va; - double vvb = *(double*)&vb; - result = vva > vvb ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // type 2 = fraction - int64_t nom_a = (int32_t)(va>>32); - int64_t nom_b = (int32_t)(vb>>32); - uint64_t da = va&0xffffffff; - uint64_t db = vb&0xffffffff; - // equalize denominators - uint32_t c = gcd(da, db); - nom_a *= db/c; - nom_b *= da/c; - result = nom_a > nom_b ? mtbdd_true : mtbdd_false; - } - } else { - /* Get top variable */ - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = va == var ? node_getlow(a, na) : a; - ahigh = va == var ? node_gethigh(a, na) : a; - blow = vb == var ? node_getlow(b, nb) : b; - bhigh = vb == var ? node_gethigh(b, nb) : b; - - SPAWN(mtbdd_greater_rec, ahigh, bhigh, shortcircuit); - result = CALL(mtbdd_greater_rec, alow, blow, shortcircuit); - if (result != SYNC(mtbdd_greater_rec)) result = mtbdd_false; - } - - if (result == mtbdd_false) *shortcircuit = 1; - - /* Store in cache */ - cache_put3(CACHE_MTBDD_GREATER, a, b, 0, result); - return result; -} - -TASK_IMPL_2(MTBDD, mtbdd_greater, MTBDD, a, MTBDD, b) -{ - /* the implementation checks shortcircuit in every task and if the two - MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ - int shortcircuit = 0; - return CALL(mtbdd_greater_rec, a, b, &shortcircuit); -} - -/** - * Multiply and , and abstract variables using summation. - * This is similar to the "and_exists" operation in BDDs. - */ -TASK_IMPL_3(MTBDD, mtbdd_and_exists, MTBDD, a, MTBDD, b, MTBDD, v) -{ - /* Check terminal case */ - if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(mtbdd_op_times)); - MTBDD result = CALL(mtbdd_op_times, &a, &b); - if (result != mtbdd_invalid) { - mtbdd_refs_push(result); - result = mtbdd_abstract(result, v, TASK(mtbdd_abstract_op_plus)); - mtbdd_refs_pop(1); - return result; - } - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - if (cache_get3(CACHE_MTBDD_AND_EXISTS, a, b, v, &result)) return result; - - /* Now, v is not a constant, and either a or b is not a constant */ - - /* Get top variable */ - int la = mtbdd_isleaf(a); - int lb = mtbdd_isleaf(b); - mtbddnode_t na = la ? 0 : GETNODE(a); - mtbddnode_t nb = lb ? 0 : GETNODE(b); - uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); - uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); - uint32_t var = va < vb ? va : vb; - - mtbddnode_t nv = GETNODE(v); - uint32_t vv = mtbddnode_getvariable(nv); - - if (vv < var) { - /* Recursive, then abstract result */ - result = CALL(mtbdd_and_exists, a, b, node_gethigh(v, nv)); - mtbdd_refs_push(result); - result = mtbdd_apply(result, result, TASK(mtbdd_op_plus)); - mtbdd_refs_pop(1); - } else { - /* Get cofactors */ - MTBDD alow, ahigh, blow, bhigh; - alow = (!la && va == var) ? node_getlow(a, na) : a; - ahigh = (!la && va == var) ? node_gethigh(a, na) : a; - blow = (!lb && vb == var) ? node_getlow(b, nb) : b; - bhigh = (!lb && vb == var) ? node_gethigh(b, nb) : b; - - if (vv == var) { - /* Recursive, then abstract result */ - mtbdd_refs_spawn(SPAWN(mtbdd_and_exists, ahigh, bhigh, node_gethigh(v, nv))); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_and_exists, alow, blow, node_gethigh(v, nv))); - MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_and_exists))); - result = CALL(mtbdd_apply, low, high, TASK(mtbdd_op_plus)); - mtbdd_refs_pop(2); - } else /* vv > v */ { - /* Recursive, then create node */ - mtbdd_refs_spawn(SPAWN(mtbdd_and_exists, ahigh, bhigh, v)); - MTBDD low = mtbdd_refs_push(CALL(mtbdd_and_exists, alow, blow, v)); - MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_and_exists)); - mtbdd_refs_pop(1); - result = mtbdd_makenode(var, low, high); - } - } - - /* Store in cache */ - cache_put3(CACHE_MTBDD_AND_EXISTS, a, b, v, result); - return result; -} - -/** - * Calculate the support of a MTBDD, i.e. the cube of all variables that appear in the MTBDD nodes. - */ -TASK_IMPL_1(MTBDD, mtbdd_support, MTBDD, dd) -{ - /* Terminal case */ - if (mtbdd_isleaf(dd)) return mtbdd_true; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_SUPPORT, dd, 0, 0, &result)) return result; - - /* Recursive calls */ - mtbddnode_t n = GETNODE(dd); - mtbdd_refs_spawn(SPAWN(mtbdd_support, node_getlow(dd, n))); - MTBDD high = mtbdd_refs_push(CALL(mtbdd_support, node_gethigh(dd, n))); - MTBDD low = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_support))); - - /* Compute result */ - result = mtbdd_makenode(mtbddnode_getvariable(n), mtbdd_false, mtbdd_times(low, high)); - mtbdd_refs_pop(2); - - /* Write to cache */ - cache_put3(CACHE_MTBDD_SUPPORT, dd, 0, 0, result); - return result; -} - -/** - * Function composition, for each node with variable which has a pair in , - * replace the node by the result of mtbdd_ite(, , ). - * Each in must be a Boolean MTBDD. - */ -TASK_IMPL_2(MTBDD, mtbdd_compose, MTBDD, a, MTBDDMAP, map) -{ - /* Terminal case */ - if (mtbdd_isleaf(a) || mtbdd_map_isempty(map)) return a; - - /* Determine top level */ - mtbddnode_t n = GETNODE(a); - uint32_t v = mtbddnode_getvariable(n); - - /* Find in map */ - while (mtbdd_map_key(map) < v) { - map = mtbdd_map_next(map); - if (mtbdd_map_isempty(map)) return a; - } - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_COMPOSE, a, map, 0, &result)) return result; - - /* Recursive calls */ - mtbdd_refs_spawn(SPAWN(mtbdd_compose, node_getlow(a, n), map)); - MTBDD high = mtbdd_refs_push(CALL(mtbdd_compose, node_gethigh(a, n), map)); - MTBDD low = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_compose))); - - /* Calculate result */ - MTBDD r = mtbdd_map_key(map) == v ? mtbdd_map_value(map) : mtbdd_makenode(v, mtbdd_false, mtbdd_true); - mtbdd_refs_push(r); - result = CALL(mtbdd_ite, r, high, low); - mtbdd_refs_pop(3); - - /* Store in cache */ - cache_put3(CACHE_MTBDD_COMPOSE, a, map, 0, result); - return result; -} - -/** - * Compute minimum leaf in the MTBDD (for Integer, Double, Rational MTBDDs) - */ -TASK_IMPL_1(MTBDD, mtbdd_minimum, MTBDD, a) -{ - /* Check terminal case */ - if (a == mtbdd_false) return mtbdd_false; - mtbddnode_t na = GETNODE(a); - if (mtbddnode_isleaf(na)) return a; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_MINIMUM, a, 0, 0, &result)) return result; - - /* Call recursive */ - SPAWN(mtbdd_minimum, node_getlow(a, na)); - MTBDD high = CALL(mtbdd_minimum, node_gethigh(a, na)); - MTBDD low = SYNC(mtbdd_minimum); - - /* Determine lowest */ - mtbddnode_t nl = GETNODE(low); - mtbddnode_t nh = GETNODE(high); - - if (mtbddnode_gettype(nl) == 0 && mtbddnode_gettype(nh) == 0) { - result = mtbdd_getint64(low) < mtbdd_getint64(high) ? low : high; - } else if (mtbddnode_gettype(nl) == 1 && mtbddnode_gettype(nh) == 1) { - result = mtbdd_getdouble(low) < mtbdd_getdouble(high) ? low : high; - } else if (mtbddnode_gettype(nl) == 2 && mtbddnode_gettype(nh) == 2) { - // type 2 = fraction - int64_t nom_l = mtbdd_getnumer(low); - int64_t nom_h = mtbdd_getnumer(high); - uint64_t denom_l = mtbdd_getdenom(low); - uint64_t denom_h = mtbdd_getdenom(high); - // equalize denominators - uint32_t c = gcd(denom_l, denom_h); - nom_l *= denom_h/c; - nom_h *= denom_l/c; - result = nom_l < nom_h ? low : high; - } - - /* Store in cache */ - cache_put3(CACHE_MTBDD_MINIMUM, a, 0, 0, result); - return result; -} - -/** - * Compute maximum leaf in the MTBDD (for Integer, Double, Rational MTBDDs) - */ -TASK_IMPL_1(MTBDD, mtbdd_maximum, MTBDD, a) -{ - /* Check terminal case */ - if (a == mtbdd_false) return mtbdd_false; - mtbddnode_t na = GETNODE(a); - if (mtbddnode_isleaf(na)) return a; - - /* Maybe perform garbage collection */ - sylvan_gc_test(); - - /* Check cache */ - MTBDD result; - if (cache_get3(CACHE_MTBDD_MAXIMUM, a, 0, 0, &result)) return result; - - /* Call recursive */ - SPAWN(mtbdd_maximum, node_getlow(a, na)); - MTBDD high = CALL(mtbdd_maximum, node_gethigh(a, na)); - MTBDD low = SYNC(mtbdd_maximum); - - /* Determine highest */ - mtbddnode_t nl = GETNODE(low); - mtbddnode_t nh = GETNODE(high); - - if (mtbddnode_gettype(nl) == 0 && mtbddnode_gettype(nh) == 0) { - result = mtbdd_getint64(low) > mtbdd_getint64(high) ? low : high; - } else if (mtbddnode_gettype(nl) == 1 && mtbddnode_gettype(nh) == 1) { - result = mtbdd_getdouble(low) > mtbdd_getdouble(high) ? low : high; - } else if (mtbddnode_gettype(nl) == 2 && mtbddnode_gettype(nh) == 2) { - // type 2 = fraction - int64_t nom_l = mtbdd_getnumer(low); - int64_t nom_h = mtbdd_getnumer(high); - uint64_t denom_l = mtbdd_getdenom(low); - uint64_t denom_h = mtbdd_getdenom(high); - // equalize denominators - uint32_t c = gcd(denom_l, denom_h); - nom_l *= denom_h/c; - nom_h *= denom_l/c; - result = nom_l > nom_h ? low : high; - } - - /* Store in cache */ - cache_put3(CACHE_MTBDD_MAXIMUM, a, 0, 0, result); - return result; -} - -/** - * Calculate the number of satisfying variable assignments according to . - */ -TASK_IMPL_2(double, mtbdd_satcount, MTBDD, dd, size_t, nvars) -{ - /* Trivial cases */ - if (dd == mtbdd_false) return 0.0; - if (mtbdd_isleaf(dd)) return powl(2.0L, nvars); - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - union { - double d; - uint64_t s; - } hack; - - /* Consult cache */ - if (cache_get3(CACHE_BDD_SATCOUNT, dd, 0, nvars, &hack.s)) { - sylvan_stats_count(BDD_SATCOUNT_CACHED); - return hack.d; - } - - SPAWN(mtbdd_satcount, mtbdd_gethigh(dd), nvars-1); - double low = CALL(mtbdd_satcount, mtbdd_getlow(dd), nvars-1); - hack.d = low + SYNC(mtbdd_satcount); - - cache_put3(CACHE_BDD_SATCOUNT, dd, 0, nvars, hack.s); - return hack.d; -} - -MTBDD -mtbdd_enum_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb) -{ - if (dd == mtbdd_false) { - // the leaf dd is skipped - return mtbdd_false; - } else if (mtbdd_isleaf(dd)) { - // a leaf for which the filter returns 0 is skipped - if (filter_cb != NULL && filter_cb(dd) == 0) return mtbdd_false; - // ok, we have a leaf that is not skipped, go for it! - while (variables != mtbdd_true) { - *arr++ = 2; - variables = mtbdd_gethigh(variables); - } - return dd; - } else { - // if variables == true, then dd must be a leaf. But then this line is unreachable. - // if this assertion fails, then is not the support of
    . - assert(variables != mtbdd_true); - - // get next variable from - uint32_t v = mtbdd_getvar(variables); - variables = mtbdd_gethigh(variables); - - // check if MTBDD is on this variable - mtbddnode_t n = GETNODE(dd); - if (mtbddnode_getvariable(n) != v) { - *arr = 2; - return mtbdd_enum_first(dd, variables, arr+1, filter_cb); - } - - // first maybe follow low - MTBDD res = mtbdd_enum_first(node_getlow(dd, n), variables, arr+1, filter_cb); - if (res != mtbdd_false) { - *arr = 0; - return res; - } - - // if not low, try following high - res = mtbdd_enum_first(node_gethigh(dd, n), variables, arr+1, filter_cb); - if (res != mtbdd_false) { - *arr = 1; - return res; - } - - // we've tried low and high, return false - return mtbdd_false; - } -} - -MTBDD -mtbdd_enum_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb) -{ - if (mtbdd_isleaf(dd)) { - // we find the leaf in 'enum_next', then we've seen it before... - return mtbdd_false; - } else { - // if variables == true, then dd must be a leaf. But then this line is unreachable. - // if this assertion fails, then is not the support of
    . - assert(variables != mtbdd_true); - - variables = mtbdd_gethigh(variables); - - if (*arr == 0) { - // previous was low - mtbddnode_t n = GETNODE(dd); - MTBDD res = mtbdd_enum_next(node_getlow(dd, n), variables, arr+1, filter_cb); - if (res != mtbdd_false) { - return res; - } else { - // try to find new in high branch - res = mtbdd_enum_first(node_gethigh(dd, n), variables, arr+1, filter_cb); - if (res != mtbdd_false) { - *arr = 1; - return res; - } else { - return mtbdd_false; - } - } - } else if (*arr == 1) { - // previous was high - mtbddnode_t n = GETNODE(dd); - return mtbdd_enum_next(node_gethigh(dd, n), variables, arr+1, filter_cb); - } else { - // previous was either - return mtbdd_enum_next(dd, variables, arr+1, filter_cb); - } - } -} - -/** - * Helper function for recursive unmarking - */ -static void -mtbdd_unmark_rec(MTBDD mtbdd) -{ - mtbddnode_t n = GETNODE(mtbdd); - if (!mtbddnode_getmark(n)) return; - mtbddnode_setmark(n, 0); - if (mtbddnode_isleaf(n)) return; - mtbdd_unmark_rec(mtbddnode_getlow(n)); - mtbdd_unmark_rec(mtbddnode_gethigh(n)); -} - -/** - * Count number of leaves in MTBDD - */ - -static size_t -mtbdd_leafcount_mark(MTBDD mtbdd) -{ - if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf - if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf - mtbddnode_t n = GETNODE(mtbdd); - if (mtbddnode_getmark(n)) return 0; - mtbddnode_setmark(n, 1); - if (mtbddnode_isleaf(n)) return 1; // count leaf as 1 - return mtbdd_leafcount_mark(mtbddnode_getlow(n)) + mtbdd_leafcount_mark(mtbddnode_gethigh(n)); -} - -size_t -mtbdd_leafcount(MTBDD mtbdd) -{ - size_t result = mtbdd_leafcount_mark(mtbdd); - mtbdd_unmark_rec(mtbdd); - return result; -} - -/** - * Count number of nodes in MTBDD - */ - -static size_t -mtbdd_nodecount_mark(MTBDD mtbdd) -{ - if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf - if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf - mtbddnode_t n = GETNODE(mtbdd); - if (mtbddnode_getmark(n)) return 0; - mtbddnode_setmark(n, 1); - if (mtbddnode_isleaf(n)) return 1; // count leaf as 1 - return 1 + mtbdd_nodecount_mark(mtbddnode_getlow(n)) + mtbdd_nodecount_mark(mtbddnode_gethigh(n)); -} - -size_t -mtbdd_nodecount(MTBDD mtbdd) -{ - size_t result = mtbdd_nodecount_mark(mtbdd); - mtbdd_unmark_rec(mtbdd); - return result; -} - -TASK_2(int, mtbdd_test_isvalid_rec, MTBDD, dd, uint32_t, parent_var) -{ - // check if True/False leaf - if (dd == mtbdd_true || dd == mtbdd_false) return 1; - - // check if index is in array - uint64_t index = dd & (~mtbdd_complement); - assert(index > 1 && index < nodes->table_size); - if (index <= 1 || index >= nodes->table_size) return 0; - - // check if marked - int marked = llmsset_is_marked(nodes, index); - assert(marked); - if (marked == 0) return 0; - - // check if leaf - mtbddnode_t n = GETNODE(dd); - if (mtbddnode_isleaf(n)) return 1; // we're fine - - // check variable order - uint32_t var = mtbddnode_getvariable(n); - assert(var > parent_var); - if (var <= parent_var) return 0; - - // check cache - uint64_t result; - if (cache_get3(CACHE_BDD_ISBDD, dd, 0, 0, &result)) { - return result; - } - - // check recursively - SPAWN(mtbdd_test_isvalid_rec, node_getlow(dd, n), var); - result = (uint64_t)CALL(mtbdd_test_isvalid_rec, node_gethigh(dd, n), var); - if (!SYNC(mtbdd_test_isvalid_rec)) result = 0; - - // put in cache and return result - cache_put3(CACHE_BDD_ISBDD, dd, 0, 0, result); - return result; -} - -TASK_IMPL_1(int, mtbdd_test_isvalid, MTBDD, dd) -{ - // check if True/False leaf - if (dd == mtbdd_true || dd == mtbdd_false) return 1; - - // check if index is in array - uint64_t index = dd & (~mtbdd_complement); - assert(index > 1 && index < nodes->table_size); - if (index <= 1 || index >= nodes->table_size) return 0; - - // check if marked - int marked = llmsset_is_marked(nodes, index); - assert(marked); - if (marked == 0) return 0; - - // check if leaf - mtbddnode_t n = GETNODE(dd); - if (mtbddnode_isleaf(n)) return 1; // we're fine - - // check recursively - uint32_t var = mtbddnode_getvariable(n); - SPAWN(mtbdd_test_isvalid_rec, node_getlow(dd, n), var); - int result = CALL(mtbdd_test_isvalid_rec, node_gethigh(dd, n), var); - if (!SYNC(mtbdd_test_isvalid_rec)) result = 0; - return result; -} - -/** - * Export to .dot file - */ - -static void -mtbdd_fprintdot_rec(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb) -{ - mtbddnode_t n = GETNODE(mtbdd); // also works for mtbdd_false - if (mtbddnode_getmark(n)) return; - mtbddnode_setmark(n, 1); - - if (mtbdd == mtbdd_true || mtbdd == mtbdd_false) { - fprintf(out, "0 [shape=box, style=filled, label=\"F\"];\n"); - } else if (mtbddnode_isleaf(n)) { - uint32_t type = mtbddnode_gettype(n); - uint64_t value = mtbddnode_getvalue(n); - fprintf(out, "%" PRIu64 " [shape=box, style=filled, label=\"", MTBDD_STRIPMARK(mtbdd)); - switch (type) { - case 0: - fprintf(out, "%" PRIu64, value); - break; - case 1: - fprintf(out, "%f", *(double*)&value); - break; - case 2: - fprintf(out, "%u/%u", (uint32_t)(value>>32), (uint32_t)value); - break; - default: - cb(out, type, value); - break; - } - fprintf(out, "\"];\n"); - } else { - fprintf(out, "%" PRIu64 " [label=\"%" PRIu32 "\"];\n", - MTBDD_STRIPMARK(mtbdd), mtbddnode_getvariable(n)); - - mtbdd_fprintdot_rec(out, mtbddnode_getlow(n), cb); - mtbdd_fprintdot_rec(out, mtbddnode_gethigh(n), cb); - - fprintf(out, "%" PRIu64 " -> %" PRIu64 " [style=dashed];\n", - MTBDD_STRIPMARK(mtbdd), mtbddnode_getlow(n)); - fprintf(out, "%" PRIu64 " -> %" PRIu64 " [style=solid dir=both arrowtail=%s];\n", - MTBDD_STRIPMARK(mtbdd), MTBDD_STRIPMARK(mtbddnode_gethigh(n)), - mtbddnode_getcomp(n) ? "dot" : "none"); - } -} - -void -mtbdd_fprintdot(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb) -{ - fprintf(out, "digraph \"DD\" {\n"); - fprintf(out, "graph [dpi = 300];\n"); - fprintf(out, "center = true;\n"); - fprintf(out, "edge [dir = forward];\n"); - fprintf(out, "root [style=invis];\n"); - fprintf(out, "root -> %" PRIu64 " [style=solid dir=both arrowtail=%s];\n", - MTBDD_STRIPMARK(mtbdd), MTBDD_HASMARK(mtbdd) ? "dot" : "none"); - - mtbdd_fprintdot_rec(out, mtbdd, cb); - mtbdd_unmark_rec(mtbdd); - - fprintf(out, "}\n"); -} - -/** - * Return 1 if the map contains the key, 0 otherwise. - */ -int -mtbdd_map_contains(MTBDDMAP map, uint32_t key) -{ - while (!mtbdd_map_isempty(map)) { - mtbddnode_t n = GETNODE(map); - uint32_t k = mtbddnode_getvariable(n); - if (k == key) return 1; - if (k > key) return 0; - map = node_getlow(map, n); - } - - return 0; -} - -/** - * Retrieve the number of keys in the map. - */ -size_t -mtbdd_map_count(MTBDDMAP map) -{ - size_t r = 0; - - while (!mtbdd_map_isempty(map)) { - r++; - map = mtbdd_map_next(map); - } - - return r; -} - -/** - * Add the pair to the map, overwrites if key already in map. - */ -MTBDDMAP -mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value) -{ - if (mtbdd_map_isempty(map)) return mtbdd_makenode(key, mtbdd_map_empty(), value); - - mtbddnode_t n = GETNODE(map); - uint32_t k = mtbddnode_getvariable(n); - - if (k < key) { - // add recursively and rebuild tree - MTBDDMAP low = mtbdd_map_add(node_getlow(map, n), key, value); - return mtbdd_makenode(k, low, node_gethigh(map, n)); - } else if (k > key) { - return mtbdd_makenode(key, map, value); - } else { - // replace old - return mtbdd_makenode(key, node_getlow(map, n), value); - } -} - -/** - * Add all values from map2 to map1, overwrites if key already in map1. - */ -MTBDDMAP -mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2) -{ - if (mtbdd_map_isempty(map1)) return map2; - if (mtbdd_map_isempty(map2)) return map1; - - mtbddnode_t n1 = GETNODE(map1); - mtbddnode_t n2 = GETNODE(map2); - uint32_t k1 = mtbddnode_getvariable(n1); - uint32_t k2 = mtbddnode_getvariable(n2); - - MTBDDMAP result; - if (k1 < k2) { - MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), map2); - result = mtbdd_makenode(k1, low, node_gethigh(map1, n1)); - } else if (k1 > k2) { - MTBDDMAP low = mtbdd_map_addall(map1, node_getlow(map2, n2)); - result = mtbdd_makenode(k2, low, node_gethigh(map2, n2)); - } else { - MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), node_getlow(map2, n2)); - result = mtbdd_makenode(k2, low, node_gethigh(map2, n2)); - } - - return result; -} - -/** - * Remove the key from the map and return the result - */ -MTBDDMAP -mtbdd_map_remove(MTBDDMAP map, uint32_t key) -{ - if (mtbdd_map_isempty(map)) return map; - - mtbddnode_t n = GETNODE(map); - uint32_t k = mtbddnode_getvariable(n); - - if (k < key) { - MTBDDMAP low = mtbdd_map_remove(node_getlow(map, n), key); - return mtbdd_makenode(k, low, node_gethigh(map, n)); - } else if (k > key) { - return map; - } else { - return node_getlow(map, n); - } -} - -/** - * Remove all keys in the cube from the map and return the result - */ -MTBDDMAP -mtbdd_map_removeall(MTBDDMAP map, MTBDD variables) -{ - if (mtbdd_map_isempty(map)) return map; - if (variables == mtbdd_true) return map; - - mtbddnode_t n1 = GETNODE(map); - mtbddnode_t n2 = GETNODE(variables); - uint32_t k1 = mtbddnode_getvariable(n1); - uint32_t k2 = mtbddnode_getvariable(n2); - - if (k1 < k2) { - MTBDDMAP low = mtbdd_map_removeall(node_getlow(map, n1), variables); - return mtbdd_makenode(k1, low, node_gethigh(map, n1)); - } else if (k1 > k2) { - return mtbdd_map_removeall(map, node_gethigh(variables, n2)); - } else { - return mtbdd_map_removeall(node_getlow(map, n1), node_gethigh(variables, n2)); - } -} - -#include "sylvan_mtbdd_storm.c" - diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd.h deleted file mode 100644 index 1a7de3f57..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_mtbdd.h +++ /dev/null @@ -1,608 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * This is an implementation of Multi-Terminal Binary Decision Diagrams. - * They encode functions on Boolean variables to any domain. - * - * Three domains are supported by default: Boolean, Integer and Real. - * Boolean MTBDDs are identical to BDDs (as supported by the bdd subpackage). - * Integer MTBDDs are encoded using "int64_t" terminals. - * Real MTBDDs are encoded using "double" terminals. - * - * Labels of Boolean variables of MTBDD nodes are 24-bit integers. - * - * Custom terminals are supported. - * - * Terminal type "0" is the Integer type, type "1" is the Real type. - * Type "2" is the Fraction type, consisting of two 32-bit integers (numerator and denominator) - * For non-Boolean MTBDDs, mtbdd_false is used for partial functions, i.e. mtbdd_false - * indicates that the function is not defined for a certain input. - */ - -/* Do not include this file directly. Instead, include sylvan.h */ - -#ifndef SYLVAN_MTBDD_H -#define SYLVAN_MTBDD_H - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/** - * An MTBDD is a 64-bit value. The low 40 bits are an index into the unique table. - * The highest 1 bit is the complement edge, indicating negation. - * For Boolean MTBDDs, this means "not X", for Integer and Real MTBDDs, this means "-X". - */ -typedef uint64_t MTBDD; -typedef MTBDD MTBDDMAP; - -/** - * mtbdd_true is only used in Boolean MTBDDs. mtbdd_false has multiple roles (see above). - */ -#define mtbdd_complement ((MTBDD)0x8000000000000000LL) -#define mtbdd_false ((MTBDD)0) -#define mtbdd_true (mtbdd_false|mtbdd_complement) -#define mtbdd_invalid ((MTBDD)0xffffffffffffffffLL) - -/** - * Initialize MTBDD functionality. - * This initializes internal and external referencing datastructures, - * and registers them in the garbage collection framework. - */ -void sylvan_init_mtbdd(); - -/** - * Create a MTBDD terminal of type and value . - * For custom types, the value could be a pointer to some external struct. - */ -MTBDD mtbdd_makeleaf(uint32_t type, uint64_t value); - -/** - * Create an internal MTBDD node of Boolean variable , with low edge and high edge . - * is a 24-bit integer. - */ -MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high); - -/** - * Returns 1 is the MTBDD is a terminal, or 0 otherwise. - */ -int mtbdd_isleaf(MTBDD mtbdd); -#define mtbdd_isnode(mtbdd) (mtbdd_isleaf(mtbdd) ? 0 : 1) - -/** - * For MTBDD terminals, returns and - */ -uint32_t mtbdd_gettype(MTBDD terminal); -uint64_t mtbdd_getvalue(MTBDD terminal); - -/** - * For internal MTBDD nodes, returns , and - */ -uint32_t mtbdd_getvar(MTBDD node); -MTBDD mtbdd_getlow(MTBDD node); -MTBDD mtbdd_gethigh(MTBDD node); - -/** - * Compute the complement of the MTBDD. - * For Boolean MTBDDs, this means "not X". - */ -#define mtbdd_hascomp(dd) ((dd & mtbdd_complement) ? 1 : 0) -#define mtbdd_comp(dd) (dd ^ mtbdd_complement) -#define mtbdd_not(dd) (dd ^ mtbdd_complement) - -/** - * Create terminals representing int64_t (type 0), double (type 1), or fraction (type 2) values - */ -MTBDD mtbdd_int64(int64_t value); -MTBDD mtbdd_double(double value); -MTBDD mtbdd_fraction(int64_t numer, uint64_t denom); - -/** - * Get the value of a terminal (for Integer, Real and Fraction terminals, types 0, 1 and 2) - */ -int64_t mtbdd_getint64(MTBDD terminal); -double mtbdd_getdouble(MTBDD terminal); -#define mtbdd_getnumer(terminal) ((int32_t)(mtbdd_getvalue(terminal)>>32)) -#define mtbdd_getdenom(terminal) ((uint32_t)(mtbdd_getvalue(terminal)&0xffffffff)) - -/** - * Create the conjunction of variables in arr. - * I.e. arr[0] \and arr[1] \and ... \and arr[length-1] - */ -MTBDD mtbdd_fromarray(uint32_t* arr, size_t length); - -/** - * Create a MTBDD cube representing the conjunction of variables in their positive or negative - * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). - * Use cube[idx]==3 for "s=s'" in interleaved variables (matches with next variable) - * is the cube of variables (var1 \and var2 \and ... \and varn) - */ -MTBDD mtbdd_cube(MTBDD variables, uint8_t *cube, MTBDD terminal); - -/** - * Same as mtbdd_cube, but extends with the assignment \to . - * If already assigns a value to the cube, the new value is taken. - * Does not support cube[idx]==3. - */ -#define mtbdd_union_cube(mtbdd, variables, cube, terminal) CALL(mtbdd_union_cube, mtbdd, variables, cube, terminal) -TASK_DECL_4(BDD, mtbdd_union_cube, MTBDD, MTBDD, uint8_t*, MTBDD); - -/** - * Count the number of satisfying assignments (minterms) leading to a non-false leaf - */ -TASK_DECL_2(double, mtbdd_satcount, MTBDD, size_t); -#define mtbdd_satcount(dd, nvars) CALL(mtbdd_satcount, dd, nvars) - -/** - * Count the number of MTBDD leaves (excluding mtbdd_false and mtbdd_true) in the MTBDD - */ -size_t mtbdd_leafcount(MTBDD mtbdd); - -/** - * Count the number of MTBDD nodes and terminals (excluding mtbdd_false and mtbdd_true) in a MTBDD - */ -size_t mtbdd_nodecount(MTBDD mtbdd); - -/** - * Callback function types for binary ("dyadic") and unary ("monadic") operations. - * The callback function returns either the MTBDD that is the result of applying op to the MTBDDs, - * or mtbdd_invalid if op cannot be applied. - * The binary function may swap the two parameters (if commutative) to improve caching. - * The unary function is allowed an extra parameter (be careful of caching) - */ -LACE_TYPEDEF_CB(MTBDD, mtbdd_apply_op, MTBDD*, MTBDD*); -LACE_TYPEDEF_CB(MTBDD, mtbdd_applyp_op, MTBDD*, MTBDD*, size_t); -LACE_TYPEDEF_CB(MTBDD, mtbdd_uapply_op, MTBDD, size_t); - -/** - * Apply a binary operation to and . - * Callback is consulted before the cache, thus the application to terminals is not cached. - */ -TASK_DECL_3(MTBDD, mtbdd_apply, MTBDD, MTBDD, mtbdd_apply_op); -#define mtbdd_apply(a, b, op) CALL(mtbdd_apply, a, b, op) - -/** - * Apply a binary operation with id to and with parameter

    - * Callback is consulted before the cache, thus the application to terminals is not cached. - */ -TASK_DECL_5(MTBDD, mtbdd_applyp, MTBDD, MTBDD, size_t, mtbdd_applyp_op, uint64_t); -#define mtbdd_applyp(a, b, p, op, opid) CALL(mtbdd_applyp, a, b, p, op, opid) - -/** - * Apply a unary operation to

    . - * Callback is consulted after the cache, thus the application to a terminal is cached. - */ -TASK_DECL_3(MTBDD, mtbdd_uapply, MTBDD, mtbdd_uapply_op, size_t); -#define mtbdd_uapply(dd, op, param) CALL(mtbdd_uapply, dd, op, param) - -/** - * Callback function types for abstraction. - * MTBDD mtbdd_abstract_op(MTBDD a, MTBDD b, int k). - * The function is either called with k==0 (apply to two arguments) or k>0 (k skipped BDD variables) - * k == 0 => res := apply op to a and b - * k > 0 => res := apply op to op(a, a, k-1) and op(a, a, k-1) - */ -LACE_TYPEDEF_CB(MTBDD, mtbdd_abstract_op, MTBDD, MTBDD, int); - -/** - * Abstract the variables in from using the binary operation . - */ -TASK_DECL_3(MTBDD, mtbdd_abstract, MTBDD, MTBDD, mtbdd_abstract_op); -#define mtbdd_abstract(a, v, op) CALL(mtbdd_abstract, a, v, op) - -/** - * Unary operation Negate. - * Supported domains: Integer, Real, Fraction - */ -TASK_DECL_2(MTBDD, mtbdd_op_negate, MTBDD, size_t); - -/** - * Binary operation Plus (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". - */ -TASK_DECL_2(MTBDD, mtbdd_op_plus, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, mtbdd_abstract_op_plus, MTBDD, MTBDD, int); - -/** - * Binary operation Minus (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". - */ -TASK_DECL_2(MTBDD, mtbdd_op_minus, MTBDD*, MTBDD*); - -/** - * Binary operation Times (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_DECL_2(MTBDD, mtbdd_op_times, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, mtbdd_abstract_op_times, MTBDD, MTBDD, int); - -/** - * Binary operation Minimum (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_min, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, mtbdd_abstract_op_min, MTBDD, MTBDD, int); - -/** - * Binary operation Maximum (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_max, MTBDD*, MTBDD*); -TASK_DECL_3(MTBDD, mtbdd_abstract_op_max, MTBDD, MTBDD, int); - -/** - * Compute -a - */ -#define mtbdd_negate(a) mtbdd_uapply(a, TASK(mtbdd_op_negate), 0) - -/** - * Compute a + b - */ -#define mtbdd_plus(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_plus)) - -/** - * Compute a - b - */ -#define mtbdd_minus(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_minus)) - -/** - * Compute a * b - */ -#define mtbdd_times(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_times)) - -/** - * Compute min(a, b) - */ -#define mtbdd_min(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_min)) - -/** - * Compute max(a, b) - */ -#define mtbdd_max(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_max)) - -/** - * Abstract the variables in from by taking the sum of all values - */ -#define mtbdd_abstract_plus(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_plus)) - -/** - * Abstract the variables in from by taking the product of all values - */ -#define mtbdd_abstract_times(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_times)) - -/** - * Abstract the variables in from by taking the minimum of all values - */ -#define mtbdd_abstract_min(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_min)) - -/** - * Abstract the variables in from by taking the maximum of all values - */ -#define mtbdd_abstract_max(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_max)) - -/** - * Compute IF THEN ELSE . - * must be a Boolean MTBDD (or standard BDD). - */ -TASK_DECL_3(MTBDD, mtbdd_ite, MTBDD, MTBDD, MTBDD); -#define mtbdd_ite(f, g, h) CALL(mtbdd_ite, f, g, h); - -/** - * Multiply and , and abstract variables using summation. - * This is similar to the "and_exists" operation in BDDs. - */ -TASK_DECL_3(MTBDD, mtbdd_and_exists, MTBDD, MTBDD, MTBDD); -#define mtbdd_and_exists(a, b, vars) CALL(mtbdd_and_exists, a, b, vars) - -/** - * Monad that converts double to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_op_threshold_double, MTBDD, size_t) - -/** - * Monad that converts double to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_op_strict_threshold_double, MTBDD, size_t) - -/** - * Convert double to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_threshold_double, MTBDD, double); -#define mtbdd_threshold_double(dd, value) CALL(mtbdd_threshold_double, dd, value) - -/** - * Convert double to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_strict_threshold_double, MTBDD, double); -#define mtbdd_strict_threshold_double(dd, value) CALL(mtbdd_strict_threshold_double, dd, value) - -/** - * For two Double MTBDDs, calculate whether they are equal module some value epsilon - * i.e. abs(a-b) < e - */ -TASK_DECL_3(MTBDD, mtbdd_equal_norm_d, MTBDD, MTBDD, double); -#define mtbdd_equal_norm_d(a, b, epsilon) CALL(mtbdd_equal_norm_d, a, b, epsilon) - -/** - * For two Double MTBDDs, calculate whether they are equal modulo some value epsilon - * This version computes the relative difference vs the value in a. - * i.e. abs((a-b)/a) < e - */ -TASK_DECL_3(MTBDD, mtbdd_equal_norm_rel_d, MTBDD, MTBDD, double); -#define mtbdd_equal_norm_rel_d(a, b, epsilon) CALL(mtbdd_equal_norm_rel_d, a, b, epsilon) - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) <= b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_DECL_2(MTBDD, mtbdd_leq, MTBDD, MTBDD); -#define mtbdd_leq(a, b) CALL(mtbdd_leq, a, b) - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) < b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_DECL_2(MTBDD, mtbdd_less, MTBDD, MTBDD); -#define mtbdd_less(a, b) CALL(mtbdd_less, a, b) - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) >= b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_DECL_2(MTBDD, mtbdd_geq, MTBDD, MTBDD); -#define mtbdd_geq(a, b) CALL(mtbdd_geq, a, b) - -/** - * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) > b(s), mtbdd_false otherwise. - * For domains not in a / b, assume True. - */ -TASK_DECL_2(MTBDD, mtbdd_greater, MTBDD, MTBDD); -#define mtbdd_greater(a, b) CALL(mtbdd_greater, a, b) - -/** - * Calculate the support of a MTBDD, i.e. the cube of all variables that appear in the MTBDD nodes. - */ -TASK_DECL_1(MTBDD, mtbdd_support, MTBDD); -#define mtbdd_support(dd) CALL(mtbdd_support, dd) - -/** - * Function composition, for each node with variable which has a pair in , - * replace the node by the result of mtbdd_ite(, , ). - * Each in must be a Boolean MTBDD. - */ -TASK_DECL_2(MTBDD, mtbdd_compose, MTBDD, MTBDDMAP); -#define mtbdd_compose(dd, map) CALL(mtbdd_compose, dd, map) - -/** - * Compute minimal leaf in the MTBDD (for Integer, Double, Rational MTBDDs) - */ -TASK_DECL_1(MTBDD, mtbdd_minimum, MTBDD); -#define mtbdd_minimum(dd) CALL(mtbdd_minimum, dd) - -/** - * Compute maximal leaf in the MTBDD (for Integer, Double, Rational MTBDDs) - */ -TASK_DECL_1(MTBDD, mtbdd_maximum, MTBDD); -#define mtbdd_maximum(dd) CALL(mtbdd_maximum, dd) - -/** - * Given a MTBDD
    and a cube of variables expected in
    , - * mtbdd_enum_first and mtbdd_enum_next enumerates the unique paths in
    that lead to a non-False leaf. - * - * The function returns the leaf (or mtbdd_false if no new path is found) and encodes the path - * in the supplied array : 0 for a low edge, 1 for a high edge, and 2 if the variable is skipped. - * - * The supplied array must be large enough for all variables in . - * - * Usage: - * MTBDD leaf = mtbdd_enum_first(dd, variables, arr, NULL); - * while (leaf != mtbdd_false) { - * .... // do something with arr/leaf - * leaf = mtbdd_enum_next(dd, variables, arr, NULL); - * } - * - * The callback is an optional function that returns 0 when the given terminal node should be skipped. - */ -typedef int (*mtbdd_enum_filter_cb)(MTBDD); -MTBDD mtbdd_enum_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb); -MTBDD mtbdd_enum_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb); - -/** - * For debugging. - * Tests if all nodes in the MTBDD are correctly ``marked'' in the nodes table. - * Tests if variables in the internal nodes appear in-order. - * In Debug mode, this will cause assertion failures instead of returning 0. - * Returns 1 if all is fine, or 0 otherwise. - */ -TASK_DECL_1(int, mtbdd_test_isvalid, MTBDD); -#define mtbdd_test_isvalid(mtbdd) CALL(mtbdd_test_isvalid, mtbdd) - -/** - * Write a DOT representation of a MTBDD - * The callback function is required for custom terminals. - */ -typedef void (*print_terminal_label_cb)(FILE *out, uint32_t type, uint64_t value); -void mtbdd_fprintdot(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb); -#define mtbdd_printdot(mtbdd, cb) mtbdd_fprintdot(stdout, mtbdd, cb) - -/** - * MTBDDMAP, maps uint32_t variables to MTBDDs. - * A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD - */ -#define mtbdd_map_empty() mtbdd_false -#define mtbdd_map_isempty(map) (map == mtbdd_false ? 1 : 0) -#define mtbdd_map_key(map) mtbdd_getvar(map) -#define mtbdd_map_value(map) mtbdd_gethigh(map) -#define mtbdd_map_next(map) mtbdd_getlow(map) - -/** - * Return 1 if the map contains the key, 0 otherwise. - */ -int mtbdd_map_contains(MTBDDMAP map, uint32_t key); - -/** - * Retrieve the number of keys in the map. - */ -size_t mtbdd_map_count(MTBDDMAP map); - -/** - * Add the pair to the map, overwrites if key already in map. - */ -MTBDDMAP mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value); - -/** - * Add all values from map2 to map1, overwrites if key already in map1. - */ -MTBDDMAP mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2); - -/** - * Remove the key from the map and return the result - */ -MTBDDMAP mtbdd_map_remove(MTBDDMAP map, uint32_t key); - -/** - * Remove all keys in the cube from the map and return the result - */ -MTBDDMAP mtbdd_map_removeall(MTBDDMAP map, MTBDD variables); - -/** - * Custom node types - * Overrides standard hash/equality/notify_on_dead behavior - * hash(value, seed) return hash version - * equals(value1, value2) return 1 if equal, 0 if not equal - * create(&value) replace value by new value for object allocation - * destroy(value) - * NOTE: equals(value1, value2) must imply: hash(value1, seed) == hash(value2,seed) - * NOTE: new value of create must imply: equals(old, new) - */ -typedef uint64_t (*mtbdd_hash_cb)(uint64_t, uint64_t); -typedef int (*mtbdd_equals_cb)(uint64_t, uint64_t); -typedef void (*mtbdd_create_cb)(uint64_t*); -typedef void (*mtbdd_destroy_cb)(uint64_t); - -/** - * Registry callback handlers for . - */ -uint32_t mtbdd_register_custom_leaf(mtbdd_hash_cb hash_cb, mtbdd_equals_cb equals_cb, mtbdd_create_cb create_cb, mtbdd_destroy_cb destroy_cb); - -/** - * Garbage collection - * Sylvan supplies two default methods to handle references to nodes, but the user - * is encouraged to implement custom handling. Simply add a handler using sylvan_gc_add_mark - * and let the handler call mtbdd_gc_mark_rec for every MTBDD that should be saved - * during garbage collection. - */ - -/** - * Call mtbdd_gc_mark_rec for every mtbdd you want to keep in your custom mark functions. - */ -VOID_TASK_DECL_1(mtbdd_gc_mark_rec, MTBDD); -#define mtbdd_gc_mark_rec(mtbdd) CALL(mtbdd_gc_mark_rec, mtbdd) - -/** - * Default external referencing. During garbage collection, MTBDDs marked with mtbdd_ref will - * be kept in the forest. - * It is recommended to prefer mtbdd_protect and mtbdd_unprotect. - */ -MTBDD mtbdd_ref(MTBDD a); -void mtbdd_deref(MTBDD a); -size_t mtbdd_count_refs(); - -/** - * Default external pointer referencing. During garbage collection, the pointers are followed and the MTBDD - * that they refer to are kept in the forest. - */ -void mtbdd_protect(MTBDD* ptr); -void mtbdd_unprotect(MTBDD* ptr); -size_t mtbdd_count_protected(); - -/** - * If sylvan_set_ondead is set to a callback, then this function marks MTBDDs (terminals). - * When they are dead after the mark phase in garbage collection, the callback is called for marked MTBDDs. - * The ondead callback can either perform cleanup or resurrect dead terminals. - */ -#define mtbdd_notify_ondead(dd) llmsset_notify_ondead(nodes, dd&~mtbdd_complement) - -/** - * Infrastructure for internal references (per-thread, e.g. during MTBDD operations) - * Use mtbdd_refs_push and mtbdd_refs_pop to put MTBDDs on a thread-local reference stack. - * Use mtbdd_refs_spawn and mtbdd_refs_sync around SPAWN and SYNC operations when the result - * of the spawned Task is a MTBDD that must be kept during garbage collection. - */ -typedef struct mtbdd_refs_internal -{ - size_t r_size, r_count; - size_t s_size, s_count; - MTBDD *results; - Task **spawns; -} *mtbdd_refs_internal_t; - -extern DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - -static inline MTBDD -mtbdd_refs_push(MTBDD mtbdd) -{ - LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - if (mtbdd_refs_key->r_count >= mtbdd_refs_key->r_size) { - mtbdd_refs_key->r_size *= 2; - mtbdd_refs_key->results = (MTBDD*)realloc(mtbdd_refs_key->results, sizeof(MTBDD) * mtbdd_refs_key->r_size); - } - mtbdd_refs_key->results[mtbdd_refs_key->r_count++] = mtbdd; - return mtbdd; -} - -static inline void -mtbdd_refs_pop(int amount) -{ - LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - mtbdd_refs_key->r_count-=amount; -} - -static inline void -mtbdd_refs_spawn(Task *t) -{ - LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - if (mtbdd_refs_key->s_count >= mtbdd_refs_key->s_size) { - mtbdd_refs_key->s_size *= 2; - mtbdd_refs_key->spawns = (Task**)realloc(mtbdd_refs_key->spawns, sizeof(Task*) * mtbdd_refs_key->s_size); - } - mtbdd_refs_key->spawns[mtbdd_refs_key->s_count++] = t; -} - -static inline MTBDD -mtbdd_refs_sync(MTBDD result) -{ - LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); - mtbdd_refs_key->s_count--; - return result; -} - -#include "sylvan_mtbdd_storm.h" - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h deleted file mode 100644 index 940250b9a..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Internals for MTBDDs - */ - -#ifndef SYLVAN_MTBDD_INT_H -#define SYLVAN_MTBDD_INT_H - -/** - * MTBDD node structure - */ -typedef struct __attribute__((packed)) mtbddnode { - uint64_t a, b; -} * mtbddnode_t; // 16 bytes - -#define GETNODE(mtbdd) ((mtbddnode_t)llmsset_index_to_ptr(nodes, mtbdd&0x000000ffffffffff)) - -/** - * Complement handling macros - */ -#define MTBDD_HASMARK(s) (s&mtbdd_complement?1:0) -#define MTBDD_TOGGLEMARK(s) (s^mtbdd_complement) -#define MTBDD_STRIPMARK(s) (s&~mtbdd_complement) -#define MTBDD_TRANSFERMARK(from, to) (to ^ (from & mtbdd_complement)) -// Equal under mark -#define MTBDD_EQUALM(a, b) ((((a)^(b))&(~mtbdd_complement))==0) - -// Leaf: a = L=1, M, type; b = value -// Node: a = L=0, C, M, high; b = variable, low -// Only complement edge on "high" - -static inline int -mtbddnode_isleaf(mtbddnode_t n) -{ - return n->a & 0x4000000000000000 ? 1 : 0; -} - -static inline uint32_t -mtbddnode_gettype(mtbddnode_t n) -{ - return n->a & 0x00000000ffffffff; -} - -static inline uint64_t -mtbddnode_getvalue(mtbddnode_t n) -{ - return n->b; -} - -static inline int -mtbddnode_getcomp(mtbddnode_t n) -{ - return n->a & 0x8000000000000000 ? 1 : 0; -} - -static inline uint64_t -mtbddnode_getlow(mtbddnode_t n) -{ - return n->b & 0x000000ffffffffff; // 40 bits -} - -static inline uint64_t -mtbddnode_gethigh(mtbddnode_t n) -{ - return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first -} - -static inline uint32_t -mtbddnode_getvariable(mtbddnode_t n) -{ - return (uint32_t)(n->b >> 40); -} - -static inline int -mtbddnode_getmark(mtbddnode_t n) -{ - return n->a & 0x2000000000000000 ? 1 : 0; -} - -static inline void -mtbddnode_setmark(mtbddnode_t n, int mark) -{ - if (mark) n->a |= 0x2000000000000000; - else n->a &= 0xdfffffffffffffff; -} - -static inline void -mtbddnode_makeleaf(mtbddnode_t n, uint32_t type, uint64_t value) -{ - n->a = 0x4000000000000000 | (uint64_t)type; - n->b = value; -} - -static inline void -mtbddnode_makenode(mtbddnode_t n, uint32_t var, uint64_t low, uint64_t high) -{ - n->a = high; - n->b = ((uint64_t)var)<<40 | low; -} - -static MTBDD -node_getlow(MTBDD mtbdd, mtbddnode_t node) -{ - return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_getlow(node)); -} - -static MTBDD -node_gethigh(MTBDD mtbdd, mtbddnode_t node) -{ - return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_gethigh(node)); -} - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c deleted file mode 100644 index dab4860c0..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c +++ /dev/null @@ -1,514 +0,0 @@ -/** - * Generate SHA2 structural hashes. - * Hashes are independent of location. - * Mainly useful for debugging purposes. - */ -static void -mtbdd_sha2_rec(MTBDD mtbdd, SHA256_CTX *ctx) -{ - if (mtbdd == sylvan_true || mtbdd == sylvan_false) { - SHA256_Update(ctx, (void*)&mtbdd, sizeof(MTBDD)); - return; - } - - mtbddnode_t node = GETNODE(mtbdd); - if (mtbddnode_isleaf(node)) { - uint64_t val = mtbddnode_getvalue(node); - SHA256_Update(ctx, (void*)&val, sizeof(uint64_t)); - } else if (mtbddnode_getmark(node) == 0) { - mtbddnode_setmark(node, 1); - uint32_t level = mtbddnode_getvariable(node); - if (MTBDD_STRIPMARK(mtbddnode_gethigh(node))) level |= 0x80000000; - SHA256_Update(ctx, (void*)&level, sizeof(uint32_t)); - mtbdd_sha2_rec(mtbddnode_gethigh(node), ctx); - mtbdd_sha2_rec(mtbddnode_getlow(node), ctx); - } -} - -void -mtbdd_getsha(MTBDD mtbdd, char *target) -{ - SHA256_CTX ctx; - SHA256_Init(&ctx); - mtbdd_sha2_rec(mtbdd, &ctx); - if (mtbdd != sylvan_true && mtbdd != sylvan_false) mtbdd_unmark_rec(mtbdd); - SHA256_End(&ctx, target); -} - -/** - * Binary operation Times (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Integer or Double. - * If either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_divide, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; - - // Do not handle Boolean MTBDDs... - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - - if (va == 0) return a; - else if (vb == 0) return b; - else { - MTBDD result; - if (va == 1) result = b; - else if (vb == 1) result = a; - else result = mtbdd_int64(va*vb); - return result; - } - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - if (vval_a == 0.0) return a; - else if (vval_b == 0.0) return b; - else { - MTBDD result; - if (vval_a == 0.0 || vval_b == 1.0) result = a; - result = mtbdd_double(vval_a / vval_b); - return result; - } - } - else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - uint64_t nom_a = val_a>>32; - uint64_t nom_b = val_b>>32; - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - // multiply! - uint32_t c = gcd(denom_b, denom_a); - uint32_t d = gcd(nom_a, nom_b); - nom_a /= d; - denom_a /= c; - nom_a *= (denom_b/c); - denom_a *= (nom_b/d); - // compute result - MTBDD result = mtbdd_fraction(nom_a, denom_a); - return result; - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Equals (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_equals, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; - if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - if (va == vb) return mtbdd_true; - return mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - if (vval_a == vval_b) return mtbdd_true; - return mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - uint64_t nom_a = val_a>>32; - uint64_t nom_b = val_b>>32; - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - if (nom_a == nom_b && denom_a == denom_b) return mtbdd_true; - return mtbdd_false; - } - } - - if (a < b) { - *pa = b; - *pb = a; - } - - return mtbdd_invalid; -} - -/** - * Binary operation Equals (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_less, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; - if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - if (va < vb) return mtbdd_true; - return mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - if (vval_a < vval_b) return mtbdd_true; - return mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - uint64_t nom_a = val_a>>32; - uint64_t nom_b = val_b>>32; - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - return nom_a * denom_b < nom_b * denom_a ? mtbdd_true : mtbdd_false; - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Equals (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_less_or_equal, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; - if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - int64_t va = *(int64_t*)(&val_a); - int64_t vb = *(int64_t*)(&val_b); - return va <= vb ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - if (vval_a <= vval_b) return mtbdd_true; - return mtbdd_false; - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - // both fraction - uint64_t nom_a = val_a>>32; - uint64_t nom_b = val_b>>32; - uint64_t denom_a = val_a&0xffffffff; - uint64_t denom_b = val_b&0xffffffff; - nom_a *= denom_b; - nom_b *= denom_a; - return nom_a <= nom_b ? mtbdd_true : mtbdd_false; - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Pow (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_pow, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - assert(0); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - return mtbdd_double(pow(vval_a, vval_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - assert(0); - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Mod (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_mod, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - assert(0); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - return mtbdd_double(fmod(vval_a, vval_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - assert(0); - } - } - - return mtbdd_invalid; -} - -/** - * Binary operation Log (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_IMPL_2(MTBDD, mtbdd_op_logxy, MTBDD*, pa, MTBDD*, pb) -{ - MTBDD a = *pa, b = *pb; - - mtbddnode_t na = GETNODE(a); - mtbddnode_t nb = GETNODE(b); - - if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { - uint64_t val_a = mtbddnode_getvalue(na); - uint64_t val_b = mtbddnode_getvalue(nb); - if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { - assert(0); - } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { - // both double - double vval_a = *(double*)&val_a; - double vval_b = *(double*)&val_b; - return mtbdd_double(log(vval_a) / log(vval_b)); - } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { - assert(0); - } - } - - return mtbdd_invalid; -} - -TASK_IMPL_2(MTBDD, mtbdd_op_not_zero, MTBDD, a, size_t, v) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - return mtbdd_getint64(a) != 0 ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 1) { - return mtbdd_getdouble(a) != 0.0 ? mtbdd_true : mtbdd_false; - } else if (mtbddnode_gettype(na) == 2) { - return mtbdd_getnumer(a) != 0 ? mtbdd_true : mtbdd_false; - } - } - - // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). - (void)v; - - return mtbdd_invalid; -} - -TASK_IMPL_1(MTBDD, mtbdd_not_zero, MTBDD, dd) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_not_zero), 0); -} - -TASK_IMPL_2(MTBDD, mtbdd_op_floor, MTBDD, a, size_t, v) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - return a; - } else if (mtbddnode_gettype(na) == 1) { - MTBDD result = mtbdd_double(floor(mtbdd_getdouble(a))); - return result; - } else if (mtbddnode_gettype(na) == 2) { - MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a), 1); - return result; - } - } - - // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). - (void)v; - - return mtbdd_invalid; -} - -TASK_IMPL_1(MTBDD, mtbdd_floor, MTBDD, dd) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_floor), 0); -} - -TASK_IMPL_2(MTBDD, mtbdd_op_ceil, MTBDD, a, size_t, v) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_false; - if (a == mtbdd_true) return mtbdd_true; - - // a != constant - mtbddnode_t na = GETNODE(a); - - if (mtbddnode_isleaf(na)) { - if (mtbddnode_gettype(na) == 0) { - return a; - } else if (mtbddnode_gettype(na) == 1) { - MTBDD result = mtbdd_double(ceil(mtbdd_getdouble(a))); - return result; - } else if (mtbddnode_gettype(na) == 2) { - MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a) + 1, 1); - return result; - } - } - - // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). - (void)v; - - return mtbdd_invalid; -} - -TASK_IMPL_1(MTBDD, mtbdd_ceil, MTBDD, dd) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_ceil), 0); -} - -TASK_IMPL_2(MTBDD, mtbdd_op_bool_to_double, MTBDD, a, size_t, v) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_double(0); - if (a == mtbdd_true) return mtbdd_double(1.0); - - // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). - (void)v; - - return mtbdd_invalid; -} - -TASK_IMPL_1(MTBDD, mtbdd_bool_to_double, MTBDD, dd) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_bool_to_double), 0); -} - -TASK_IMPL_2(MTBDD, mtbdd_op_bool_to_int64, MTBDD, a, size_t, v) -{ - /* We only expect "double" terminals, or false */ - if (a == mtbdd_false) return mtbdd_int64(0); - if (a == mtbdd_true) return mtbdd_int64(1); - - // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). - (void)v; - - return mtbdd_invalid; -} - -TASK_IMPL_1(MTBDD, mtbdd_bool_to_int64, MTBDD, dd) -{ - return mtbdd_uapply(dd, TASK(mtbdd_op_bool_to_int64), 0); -} - -/** - * Calculate the number of satisfying variable assignments according to . - */ -TASK_IMPL_2(double, mtbdd_non_zero_count, MTBDD, dd, size_t, nvars) -{ - /* Trivial cases */ - if (dd == mtbdd_false) return 0.0; - - mtbddnode_t na = GETNODE(dd); - - if (mtbdd_isleaf(dd)) { - if (mtbddnode_gettype(na) == 0) { - return mtbdd_getint64(dd) != 0 ? powl(2.0L, nvars) : 0.0; - } else if (mtbddnode_gettype(na) == 1) { - return mtbdd_getdouble(dd) != 0 ? powl(2.0L, nvars) : 0.0; - } else if (mtbddnode_gettype(na) == 2) { - return mtbdd_getnumer(dd) != 0 ? powl(2.0L, nvars) : 0.0; - } - } - - /* Perhaps execute garbage collection */ - sylvan_gc_test(); - - union { - double d; - uint64_t s; - } hack; - - /* Consult cache */ - if (cache_get3(CACHE_MTBDD_NONZERO_COUNT, dd, 0, nvars, &hack.s)) { - sylvan_stats_count(CACHE_MTBDD_NONZERO_COUNT); - return hack.d; - } - - SPAWN(mtbdd_non_zero_count, mtbdd_gethigh(dd), nvars-1); - double low = CALL(mtbdd_non_zero_count, mtbdd_getlow(dd), nvars-1); - hack.d = low + SYNC(mtbdd_non_zero_count); - - cache_put3(CACHE_MTBDD_NONZERO_COUNT, dd, 0, nvars, hack.s); - return hack.d; -} - -int mtbdd_iszero(MTBDD dd) { - if (mtbdd_gettype(dd) == 0) { - return mtbdd_getint64(dd) == 0; - } else if (mtbdd_gettype(dd) == 1) { - return mtbdd_getdouble(dd) == 0; - } else if (mtbdd_gettype(dd) == 2) { - return mtbdd_getnumer(dd) == 0; - } - return 0; -} - -int mtbdd_isnonzero(MTBDD dd) { - return mtbdd_iszero(dd) ? 0 : 1; -} \ No newline at end of file diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h deleted file mode 100644 index 38fa6668b..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h +++ /dev/null @@ -1,111 +0,0 @@ -void mtbdd_getsha(MTBDD mtbdd, char *target); // target must be at least 65 bytes... - -/** - * Binary operation Divide (for MTBDDs of same type) - * Only for MTBDDs where all leaves are Integer or Double. - * If either operand is mtbdd_false (not defined), - * then the result is mtbdd_false (i.e. not defined). - */ -TASK_DECL_2(MTBDD, mtbdd_op_divide, MTBDD*, MTBDD*); -#define mtbdd_divide(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_divide)) - -/** - * Binary operation equals (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_equals, MTBDD*, MTBDD*); -#define mtbdd_equals(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_equals)) - -/** - * Binary operation Less (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_less, MTBDD*, MTBDD*); -#define mtbdd_less_as_bdd(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_less)) - -/** - * Binary operation Less (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_less_or_equal, MTBDD*, MTBDD*); -#define mtbdd_less_or_equal_as_bdd(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_less_or_equal)) - -/** - * Binary operation Pow (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Integer, Double or a Fraction. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_pow, MTBDD*, MTBDD*); -#define mtbdd_pow(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_pow)) - -/** - * Binary operation Mod (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Integer, Double or a Fraction. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_mod, MTBDD*, MTBDD*); -#define mtbdd_mod(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_mod)) - -/** - * Binary operation Log (for MTBDDs of same type) - * Only for MTBDDs where either all leaves are Double or a Fraction. - * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), - * then the result is the other operand. - */ -TASK_DECL_2(MTBDD, mtbdd_op_logxy, MTBDD*, MTBDD*); -#define mtbdd_logxy(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_logxy)) - -/** - * Monad that converts double to a Boolean MTBDD, translate terminals != 0 to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_op_not_zero, MTBDD, size_t) -TASK_DECL_1(MTBDD, mtbdd_not_zero, MTBDD) -#define mtbdd_not_zero(dd) CALL(mtbdd_not_zero, dd) - -/** - * Monad that floors all values Double and Fraction values. - */ -TASK_DECL_2(MTBDD, mtbdd_op_floor, MTBDD, size_t) -TASK_DECL_1(MTBDD, mtbdd_floor, MTBDD) -#define mtbdd_floor(dd) CALL(mtbdd_floor, dd) - -/** - * Monad that ceils all values Double and Fraction values. - */ -TASK_DECL_2(MTBDD, mtbdd_op_ceil, MTBDD, size_t) -TASK_DECL_1(MTBDD, mtbdd_ceil, MTBDD) -#define mtbdd_ceil(dd) CALL(mtbdd_ceil, dd) - -/** - * Monad that converts Boolean to a Double MTBDD, translate terminals true to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_op_bool_to_double, MTBDD, size_t) -TASK_DECL_1(MTBDD, mtbdd_bool_to_double, MTBDD) -#define mtbdd_bool_to_double(dd) CALL(mtbdd_bool_to_double, dd) - -/** - * Monad that converts Boolean to a uint MTBDD, translate terminals true to 1 and to 0 otherwise; - */ -TASK_DECL_2(MTBDD, mtbdd_op_bool_to_int64, MTBDD, size_t) -TASK_DECL_1(MTBDD, mtbdd_bool_to_int64, MTBDD) -#define mtbdd_bool_to_int64(dd) CALL(mtbdd_bool_to_int64, dd) - -/** - * Count the number of assignments (minterms) leading to a non-zero - */ -TASK_DECL_2(double, mtbdd_non_zero_count, MTBDD, size_t); -#define mtbdd_non_zero_count(dd, nvars) CALL(mtbdd_non_zero_count, dd, nvars) - -// Checks whether the given MTBDD (does represents a zero leaf. -int mtbdd_iszero(MTBDD); -int mtbdd_isnonzero(MTBDD); - -#define mtbdd_regular(dd) (dd & ~mtbdd_complement) diff --git a/resources/3rdparty/sylvan/src/sylvan_obj.cpp b/resources/3rdparty/sylvan/src/sylvan_obj.cpp deleted file mode 100644 index 07a232626..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_obj.cpp +++ /dev/null @@ -1,1040 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include - -using namespace sylvan; - -/*** - * Implementation of class Bdd - */ - -int -Bdd::operator==(const Bdd& other) const -{ - return bdd == other.bdd; -} - -int -Bdd::operator!=(const Bdd& other) const -{ - return bdd != other.bdd; -} - -Bdd -Bdd::operator=(const Bdd& right) -{ - bdd = right.bdd; - return *this; -} - -int -Bdd::operator<=(const Bdd& other) const -{ - // TODO: better implementation, since we are not interested in the BDD result - LACE_ME; - BDD r = sylvan_ite(this->bdd, sylvan_not(other.bdd), sylvan_false); - return r == sylvan_false; -} - -int -Bdd::operator>=(const Bdd& other) const -{ - // TODO: better implementation, since we are not interested in the BDD result - return other <= *this; -} - -int -Bdd::operator<(const Bdd& other) const -{ - return bdd != other.bdd && *this <= other; -} - -int -Bdd::operator>(const Bdd& other) const -{ - return bdd != other.bdd && *this >= other; -} - -Bdd -Bdd::operator!() const -{ - return Bdd(sylvan_not(bdd)); -} - -Bdd -Bdd::operator~() const -{ - return Bdd(sylvan_not(bdd)); -} - -Bdd -Bdd::operator*(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_and(bdd, other.bdd)); -} - -Bdd -Bdd::operator*=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_and(bdd, other.bdd); - return *this; -} - -Bdd -Bdd::operator&(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_and(bdd, other.bdd)); -} - -Bdd -Bdd::operator&=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_and(bdd, other.bdd); - return *this; -} - -Bdd -Bdd::operator+(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_or(bdd, other.bdd)); -} - -Bdd -Bdd::operator+=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_or(bdd, other.bdd); - return *this; -} - -Bdd -Bdd::operator|(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_or(bdd, other.bdd)); -} - -Bdd -Bdd::operator|=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_or(bdd, other.bdd); - return *this; -} - -Bdd -Bdd::operator^(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_xor(bdd, other.bdd)); -} - -Bdd -Bdd::operator^=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_xor(bdd, other.bdd); - return *this; -} - -Bdd -Bdd::operator-(const Bdd& other) const -{ - LACE_ME; - return Bdd(sylvan_and(bdd, sylvan_not(other.bdd))); -} - -Bdd -Bdd::operator-=(const Bdd& other) -{ - LACE_ME; - bdd = sylvan_and(bdd, sylvan_not(other.bdd)); - return *this; -} - -Bdd -Bdd::AndAbstract(const Bdd &g, const BddSet &cube) const -{ - LACE_ME; - return sylvan_and_exists(bdd, g.bdd, cube.set.bdd); -} - -Bdd -Bdd::ExistAbstract(const BddSet &cube) const -{ - LACE_ME; - return sylvan_exists(bdd, cube.set.bdd); -} - -Bdd -Bdd::UnivAbstract(const BddSet &cube) const -{ - LACE_ME; - return sylvan_forall(bdd, cube.set.bdd); -} - -Bdd -Bdd::Ite(const Bdd &g, const Bdd &h) const -{ - LACE_ME; - return sylvan_ite(bdd, g.bdd, h.bdd); -} - -Bdd -Bdd::And(const Bdd &g) const -{ - LACE_ME; - return sylvan_and(bdd, g.bdd); -} - -Bdd -Bdd::Or(const Bdd &g) const -{ - LACE_ME; - return sylvan_or(bdd, g.bdd); -} - -Bdd -Bdd::Nand(const Bdd &g) const -{ - LACE_ME; - return sylvan_nand(bdd, g.bdd); -} - -Bdd -Bdd::Nor(const Bdd &g) const -{ - LACE_ME; - return sylvan_nor(bdd, g.bdd); -} - -Bdd -Bdd::Xor(const Bdd &g) const -{ - LACE_ME; - return sylvan_xor(bdd, g.bdd); -} - -Bdd -Bdd::Xnor(const Bdd &g) const -{ - LACE_ME; - return sylvan_equiv(bdd, g.bdd); -} - -int -Bdd::Leq(const Bdd &g) const -{ - // TODO: better implementation, since we are not interested in the BDD result - LACE_ME; - BDD r = sylvan_ite(bdd, sylvan_not(g.bdd), sylvan_false); - return r == sylvan_false; -} - -Bdd -Bdd::RelPrev(const Bdd& relation, const BddSet& cube) const -{ - LACE_ME; - return sylvan_relprev(relation.bdd, bdd, cube.set.bdd); -} - -Bdd -Bdd::RelNext(const Bdd &relation, const BddSet &cube) const -{ - LACE_ME; - return sylvan_relnext(bdd, relation.bdd, cube.set.bdd); -} - -Bdd -Bdd::Closure() const -{ - LACE_ME; - return sylvan_closure(bdd); -} - -Bdd -Bdd::Constrain(const Bdd &c) const -{ - LACE_ME; - return sylvan_constrain(bdd, c.bdd); -} - -Bdd -Bdd::Restrict(const Bdd &c) const -{ - LACE_ME; - return sylvan_restrict(bdd, c.bdd); -} - -Bdd -Bdd::Compose(const BddMap &m) const -{ - LACE_ME; - return sylvan_compose(bdd, m.bdd); -} - -Bdd -Bdd::Permute(const std::vector& from, const std::vector& to) const -{ - LACE_ME; - - /* Create a map */ - BddMap map; - for (int i=from.size()-1; i>=0; i--) { - map.put(from[i], Bdd::bddVar(to[i])); - } - - return sylvan_compose(bdd, map.bdd); -} - -Bdd -Bdd::Support() const -{ - LACE_ME; - return sylvan_support(bdd); -} - -BDD -Bdd::GetBDD() const -{ - return bdd; -} - -void -Bdd::PrintDot(FILE *out) const -{ - sylvan_fprintdot(out, bdd); -} - -void -Bdd::GetShaHash(char *string) const -{ - sylvan_getsha(bdd, string); -} - -std::string -Bdd::GetShaHash() const -{ - char buf[65]; - sylvan_getsha(bdd, buf); - return std::string(buf); -} - -double -Bdd::SatCount(const BddSet &variables) const -{ - LACE_ME; - return sylvan_satcount(bdd, variables.set.bdd); -} - -double -Bdd::SatCount(size_t nvars) const -{ - LACE_ME; - // Note: the mtbdd_satcount can be called without initializing the MTBDD module. - return mtbdd_satcount(bdd, nvars); -} - -void -Bdd::PickOneCube(const BddSet &variables, uint8_t *values) const -{ - LACE_ME; - sylvan_sat_one(bdd, variables.set.bdd, values); -} - -std::vector -Bdd::PickOneCube(const BddSet &variables) const -{ - std::vector result = std::vector(); - - BDD bdd = this->bdd; - BDD vars = variables.set.bdd; - - if (bdd == sylvan_false) return result; - - for (; !sylvan_set_isempty(vars); vars = sylvan_set_next(vars)) { - uint32_t var = sylvan_set_var(vars); - if (bdd == sylvan_true) { - // pick 0 - result.push_back(false); - } else { - if (sylvan_var(bdd) != var) { - // pick 0 - result.push_back(false); - } else { - if (sylvan_low(bdd) == sylvan_false) { - // pick 1 - result.push_back(true); - bdd = sylvan_high(bdd); - } else { - // pick 0 - result.push_back(false); - bdd = sylvan_low(bdd); - } - } - } - } - - return result; -} - -Bdd -Bdd::PickOneCube() const -{ - LACE_ME; - return Bdd(sylvan_sat_one_bdd(bdd)); -} - -Bdd -Bdd::UnionCube(const BddSet &variables, uint8_t *values) const -{ - LACE_ME; - return sylvan_union_cube(bdd, variables.set.bdd, values); -} - -Bdd -Bdd::UnionCube(const BddSet &variables, std::vector values) const -{ - LACE_ME; - uint8_t *data = values.data(); - return sylvan_union_cube(bdd, variables.set.bdd, data); -} - -/** - * @brief Generate a cube representing a set of variables - */ -Bdd -Bdd::VectorCube(const std::vector variables) -{ - Bdd result = Bdd::bddOne(); - for (int i=variables.size()-1; i>=0; i--) { - result *= variables[i]; - } - return result; -} - -/** - * @brief Generate a cube representing a set of variables - */ -Bdd -Bdd::VariablesCube(std::vector variables) -{ - BDD result = sylvan_true; - for (int i=variables.size()-1; i>=0; i--) { - result = sylvan_makenode(variables[i], sylvan_false, result); - } - return result; -} - -size_t -Bdd::NodeCount() const -{ - return sylvan_nodecount(bdd); -} - -Bdd -Bdd::bddOne() -{ - return sylvan_true; -} - -Bdd -Bdd::bddZero() -{ - return sylvan_false; -} - -Bdd -Bdd::bddVar(uint32_t index) -{ - LACE_ME; - return sylvan_ithvar(index); -} - -Bdd -Bdd::bddCube(const BddSet &variables, uint8_t *values) -{ - LACE_ME; - return sylvan_cube(variables.set.bdd, values); -} - -Bdd -Bdd::bddCube(const BddSet &variables, std::vector values) -{ - LACE_ME; - uint8_t *data = values.data(); - return sylvan_cube(variables.set.bdd, data); -} - -int -Bdd::isConstant() const -{ - return bdd == sylvan_true || bdd == sylvan_false; -} - -int -Bdd::isTerminal() const -{ - return bdd == sylvan_true || bdd == sylvan_false; -} - -int -Bdd::isOne() const -{ - return bdd == sylvan_true; -} - -int -Bdd::isZero() const -{ - return bdd == sylvan_false; -} - -uint32_t -Bdd::TopVar() const -{ - return sylvan_var(bdd); -} - -Bdd -Bdd::Then() const -{ - return Bdd(sylvan_high(bdd)); -} - -Bdd -Bdd::Else() const -{ - return Bdd(sylvan_low(bdd)); -} - -/*** - * Implementation of class BddMap - */ - -BddMap::BddMap(uint32_t key_variable, const Bdd value) -{ - bdd = sylvan_map_add(sylvan_map_empty(), key_variable, value.bdd); -} - - -BddMap -BddMap::operator+(const Bdd& other) const -{ - return BddMap(sylvan_map_addall(bdd, other.bdd)); -} - -BddMap -BddMap::operator+=(const Bdd& other) -{ - bdd = sylvan_map_addall(bdd, other.bdd); - return *this; -} - -BddMap -BddMap::operator-(const Bdd& other) const -{ - return BddMap(sylvan_map_removeall(bdd, other.bdd)); -} - -BddMap -BddMap::operator-=(const Bdd& other) -{ - bdd = sylvan_map_removeall(bdd, other.bdd); - return *this; -} - -void -BddMap::put(uint32_t key, Bdd value) -{ - bdd = sylvan_map_add(bdd, key, value.bdd); -} - -void -BddMap::removeKey(uint32_t key) -{ - bdd = sylvan_map_remove(bdd, key); -} - -size_t -BddMap::size() const -{ - return sylvan_map_count(bdd); -} - -int -BddMap::isEmpty() const -{ - return sylvan_map_isempty(bdd); -} - - -/*** - * Implementation of class Mtbdd - */ - -Mtbdd -Mtbdd::int64Terminal(int64_t value) -{ - return mtbdd_int64(value); -} - -Mtbdd -Mtbdd::doubleTerminal(double value) -{ - return mtbdd_double(value); -} - -Mtbdd -Mtbdd::fractionTerminal(int64_t nominator, uint64_t denominator) -{ - return mtbdd_fraction(nominator, denominator); -} - -Mtbdd -Mtbdd::terminal(uint32_t type, uint64_t value) -{ - return mtbdd_makeleaf(type, value); -} - -Mtbdd -Mtbdd::mtbddVar(uint32_t variable) -{ - return mtbdd_makenode(variable, mtbdd_false, mtbdd_true); -} - -Mtbdd -Mtbdd::mtbddOne() -{ - return mtbdd_true; -} - -Mtbdd -Mtbdd::mtbddZero() -{ - return mtbdd_false; -} - -Mtbdd -Mtbdd::mtbddCube(const BddSet &variables, uint8_t *values, const Mtbdd &terminal) -{ - LACE_ME; - return mtbdd_cube(variables.set.bdd, values, terminal.mtbdd); -} - -Mtbdd -Mtbdd::mtbddCube(const BddSet &variables, std::vector values, const Mtbdd &terminal) -{ - LACE_ME; - uint8_t *data = values.data(); - return mtbdd_cube(variables.set.bdd, data, terminal.mtbdd); -} - -int -Mtbdd::isTerminal() const -{ - return mtbdd_isleaf(mtbdd); -} - -int -Mtbdd::isLeaf() const -{ - return mtbdd_isleaf(mtbdd); -} - -int -Mtbdd::isOne() const -{ - return mtbdd == mtbdd_true; -} - -int -Mtbdd::isZero() const -{ - return mtbdd == mtbdd_false; -} - -uint32_t -Mtbdd::TopVar() const -{ - return mtbdd_getvar(mtbdd); -} - -Mtbdd -Mtbdd::Then() const -{ - return mtbdd_isnode(mtbdd) ? mtbdd_gethigh(mtbdd) : mtbdd; -} - -Mtbdd -Mtbdd::Else() const -{ - return mtbdd_isnode(mtbdd) ? mtbdd_getlow(mtbdd) : mtbdd; -} - -Mtbdd -Mtbdd::Negate() const -{ - LACE_ME; - return mtbdd_negate(mtbdd); -} - -Mtbdd -Mtbdd::Apply(const Mtbdd &other, mtbdd_apply_op op) const -{ - LACE_ME; - return mtbdd_apply(mtbdd, other.mtbdd, op); -} - -Mtbdd -Mtbdd::UApply(mtbdd_uapply_op op, size_t param) const -{ - LACE_ME; - return mtbdd_uapply(mtbdd, op, param); -} - -Mtbdd -Mtbdd::Abstract(const BddSet &variables, mtbdd_abstract_op op) const -{ - LACE_ME; - return mtbdd_abstract(mtbdd, variables.set.bdd, op); -} - -Mtbdd -Mtbdd::Ite(const Mtbdd &g, const Mtbdd &h) const -{ - LACE_ME; - return mtbdd_ite(mtbdd, g.mtbdd, h.mtbdd); -} - -Mtbdd -Mtbdd::Plus(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_plus(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Times(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_times(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Min(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_min(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Max(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_max(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::AbstractPlus(const BddSet &variables) const -{ - LACE_ME; - return mtbdd_abstract_plus(mtbdd, variables.set.bdd); -} - -Mtbdd -Mtbdd::AbstractTimes(const BddSet &variables) const -{ - LACE_ME; - return mtbdd_abstract_times(mtbdd, variables.set.bdd); -} - -Mtbdd -Mtbdd::AbstractMin(const BddSet &variables) const -{ - LACE_ME; - return mtbdd_abstract_min(mtbdd, variables.set.bdd); -} - -Mtbdd -Mtbdd::AbstractMax(const BddSet &variables) const -{ - LACE_ME; - return mtbdd_abstract_max(mtbdd, variables.set.bdd); -} - -Mtbdd -Mtbdd::AndExists(const Mtbdd &other, const BddSet &variables) const -{ - LACE_ME; - return mtbdd_and_exists(mtbdd, other.mtbdd, variables.set.bdd); -} - -int -Mtbdd::operator==(const Mtbdd& other) const -{ - return mtbdd == other.mtbdd; -} - -int -Mtbdd::operator!=(const Mtbdd& other) const -{ - return mtbdd != other.mtbdd; -} - -Mtbdd -Mtbdd::operator=(const Mtbdd& right) -{ - mtbdd = right.mtbdd; - return *this; -} - -Mtbdd -Mtbdd::operator!() const -{ - return mtbdd_not(mtbdd); -} - -Mtbdd -Mtbdd::operator~() const -{ - return mtbdd_not(mtbdd); -} - -Mtbdd -Mtbdd::operator*(const Mtbdd& other) const -{ - LACE_ME; - return mtbdd_times(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::operator*=(const Mtbdd& other) -{ - LACE_ME; - mtbdd = mtbdd_times(mtbdd, other.mtbdd); - return *this; -} - -Mtbdd -Mtbdd::operator+(const Mtbdd& other) const -{ - LACE_ME; - return mtbdd_plus(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::operator+=(const Mtbdd& other) -{ - LACE_ME; - mtbdd = mtbdd_plus(mtbdd, other.mtbdd); - return *this; -} - -Mtbdd -Mtbdd::operator-(const Mtbdd& other) const -{ - LACE_ME; - return mtbdd_minus(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::operator-=(const Mtbdd& other) -{ - LACE_ME; - mtbdd = mtbdd_minus(mtbdd, other.mtbdd); - return *this; -} - -Mtbdd -Mtbdd::MtbddThreshold(double value) const -{ - LACE_ME; - return mtbdd_threshold_double(mtbdd, value); -} - -Mtbdd -Mtbdd::MtbddStrictThreshold(double value) const -{ - LACE_ME; - return mtbdd_strict_threshold_double(mtbdd, value); -} - -Bdd -Mtbdd::BddThreshold(double value) const -{ - LACE_ME; - return mtbdd_threshold_double(mtbdd, value); -} - -Bdd -Mtbdd::BddStrictThreshold(double value) const -{ - LACE_ME; - return mtbdd_strict_threshold_double(mtbdd, value); -} - -Mtbdd -Mtbdd::Support() const -{ - LACE_ME; - return mtbdd_support(mtbdd); -} - -MTBDD -Mtbdd::GetMTBDD() const -{ - return mtbdd; -} - -Mtbdd -Mtbdd::Compose(MtbddMap &m) const -{ - LACE_ME; - return mtbdd_compose(mtbdd, m.mtbdd); -} - -Mtbdd -Mtbdd::Permute(const std::vector& from, const std::vector& to) const -{ - LACE_ME; - - /* Create a map */ - MtbddMap map; - for (int i=from.size()-1; i>=0; i--) { - map.put(from[i], Bdd::bddVar(to[i])); - } - - return mtbdd_compose(mtbdd, map.mtbdd); -} - -double -Mtbdd::SatCount(size_t nvars) const -{ - LACE_ME; - return mtbdd_satcount(mtbdd, nvars); -} - -double -Mtbdd::SatCount(const BddSet &variables) const -{ - return SatCount(sylvan_set_count(variables.set.bdd)); -} - -size_t -Mtbdd::NodeCount() const -{ - LACE_ME; - return mtbdd_nodecount(mtbdd); -} - - -/*** - * Implementation of class MtbddMap - */ - -MtbddMap::MtbddMap(uint32_t key_variable, Mtbdd value) -{ - mtbdd = mtbdd_map_add(mtbdd_map_empty(), key_variable, value.mtbdd); -} - -MtbddMap -MtbddMap::operator+(const Mtbdd& other) const -{ - return MtbddMap(mtbdd_map_addall(mtbdd, other.mtbdd)); -} - -MtbddMap -MtbddMap::operator+=(const Mtbdd& other) -{ - mtbdd = mtbdd_map_addall(mtbdd, other.mtbdd); - return *this; -} - -MtbddMap -MtbddMap::operator-(const Mtbdd& other) const -{ - return MtbddMap(mtbdd_map_removeall(mtbdd, other.mtbdd)); -} - -MtbddMap -MtbddMap::operator-=(const Mtbdd& other) -{ - mtbdd = mtbdd_map_removeall(mtbdd, other.mtbdd); - return *this; -} - -void -MtbddMap::put(uint32_t key, Mtbdd value) -{ - mtbdd = mtbdd_map_add(mtbdd, key, value.mtbdd); -} - -void -MtbddMap::removeKey(uint32_t key) -{ - mtbdd = mtbdd_map_remove(mtbdd, key); -} - -size_t -MtbddMap::size() -{ - return mtbdd_map_count(mtbdd); -} - -int -MtbddMap::isEmpty() -{ - return mtbdd_map_isempty(mtbdd); -} - - -/*** - * Implementation of class Sylvan - */ - -void -Sylvan::initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize) -{ - sylvan_init_package(initialTableSize, maxTableSize, initialCacheSize, maxCacheSize); -} - -void -Sylvan::initBdd(int granularity) -{ - sylvan_init_bdd(granularity); -} - -void -Sylvan::initMtbdd() -{ - sylvan_init_mtbdd(); -} - -void -Sylvan::quitPackage() -{ - sylvan_quit(); -} - -#include "sylvan_obj_storm.cpp" - diff --git a/resources/3rdparty/sylvan/src/sylvan_obj.hpp b/resources/3rdparty/sylvan/src/sylvan_obj.hpp deleted file mode 100644 index 4ae849dad..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_obj.hpp +++ /dev/null @@ -1,855 +0,0 @@ -/* - * Copyright 2011-2015 Formal Methods and Tools, University of Twente - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef SYLVAN_OBJ_H -#define SYLVAN_OBJ_H - -#include -#include - -#include -#include - -namespace sylvan { - -class BddSet; -class BddMap; -class Mtbdd; - -class Bdd { - friend class Sylvan; - friend class BddSet; - friend class BddMap; - friend class Mtbdd; - -public: - Bdd() { bdd = sylvan_false; sylvan_protect(&bdd); } - Bdd(const BDD from) : bdd(from) { sylvan_protect(&bdd); } - Bdd(const Bdd &from) : bdd(from.bdd) { sylvan_protect(&bdd); } - Bdd(const uint32_t var) { bdd = sylvan_ithvar(var); sylvan_protect(&bdd); } - ~Bdd() { sylvan_unprotect(&bdd); } - - /** - * @brief Creates a Bdd representing just the variable index in its positive form - * The variable index must be a 0<=index<=2^23 (we use 24 bits internally) - */ - static Bdd bddVar(uint32_t index); - - /** - * @brief Returns the Bdd representing "True" - */ - static Bdd bddOne(); - - /** - * @brief Returns the Bdd representing "False" - */ - static Bdd bddZero(); - - /** - * @brief Returns the Bdd representing a cube of variables, according to the given values. - * @param variables the variables that will be in the cube in their positive or negative form - * @param values a character array describing how the variables will appear in the result - * The length of string must be equal to the number of variables in the cube. - * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, - * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will - * be skipped. - */ - static Bdd bddCube(const BddSet &variables, unsigned char *values); - - /** - * @brief Returns the Bdd representing a cube of variables, according to the given values. - * @param variables the variables that will be in the cube in their positive or negative form - * @param string a character array describing how the variables will appear in the result - * The length of string must be equal to the number of variables in the cube. - * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, - * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will - * be skipped. - */ - static Bdd bddCube(const BddSet &variables, std::vector values); - - int operator==(const Bdd& other) const; - int operator!=(const Bdd& other) const; - Bdd operator=(const Bdd& right); - int operator<=(const Bdd& other) const; - int operator>=(const Bdd& other) const; - int operator<(const Bdd& other) const; - int operator>(const Bdd& other) const; - Bdd operator!() const; - Bdd operator~() const; - Bdd operator*(const Bdd& other) const; - Bdd operator*=(const Bdd& other); - Bdd operator&(const Bdd& other) const; - Bdd operator&=(const Bdd& other); - Bdd operator+(const Bdd& other) const; - Bdd operator+=(const Bdd& other); - Bdd operator|(const Bdd& other) const; - Bdd operator|=(const Bdd& other); - Bdd operator^(const Bdd& other) const; - Bdd operator^=(const Bdd& other); - Bdd operator-(const Bdd& other) const; - Bdd operator-=(const Bdd& other); - - /** - * @brief Returns non-zero if this Bdd is bddOne() or bddZero() - */ - int isConstant() const; - - /** - * @brief Returns non-zero if this Bdd is bddOne() or bddZero() - */ - int isTerminal() const; - - /** - * @brief Returns non-zero if this Bdd is bddOne() - */ - int isOne() const; - - /** - * @brief Returns non-zero if this Bdd is bddZero() - */ - int isZero() const; - - /** - * @brief Returns the top variable index of this Bdd (the variable in the root node) - */ - uint32_t TopVar() const; - - /** - * @brief Follows the high edge ("then") of the root node of this Bdd - */ - Bdd Then() const; - - /** - * @brief Follows the low edge ("else") of the root node of this Bdd - */ - Bdd Else() const; - - /** - * @brief Computes \exists cube: f \and g - */ - Bdd AndAbstract(const Bdd& g, const BddSet& cube) const; - - /** - * @brief Computes \exists cube: f - */ - Bdd ExistAbstract(const BddSet& cube) const; - - /** - * @brief Computes \forall cube: f - */ - Bdd UnivAbstract(const BddSet& cube) const; - - /** - * @brief Computes if f then g else h - */ - Bdd Ite(const Bdd& g, const Bdd& h) const; - - /** - * @brief Computes f \and g - */ - Bdd And(const Bdd& g) const; - - /** - * @brief Computes f \or g - */ - Bdd Or(const Bdd& g) const; - - /** - * @brief Computes \not (f \and g) - */ - Bdd Nand(const Bdd& g) const; - - /** - * @brief Computes \not (f \or g) - */ - Bdd Nor(const Bdd& g) const; - - /** - * @brief Computes f \xor g - */ - Bdd Xor(const Bdd& g) const; - - /** - * @brief Computes \not (f \xor g), i.e. f \equiv g - */ - Bdd Xnor(const Bdd& g) const; - - /** - * @brief Returns whether all elements in f are also in g - */ - int Leq(const Bdd& g) const; - - /** - * @brief Computes the reverse application of a transition relation to this set. - * @param relation the transition relation to apply - * @param cube the variables that are in the transition relation - * This function assumes that s,t are interleaved with s even and t odd (s+1). - * Other variables in the relation are ignored (by existential quantification) - * Set cube to "false" (illegal cube) to assume all encountered variables are in s,t - * - * Use this function to concatenate two relations --> --> - * or to take the 'previous' of a set --> S - */ - Bdd RelPrev(const Bdd& relation, const BddSet& cube) const; - - /** - * @brief Computes the application of a transition relation to this set. - * @param relation the transition relation to apply - * @param cube the variables that are in the transition relation - * This function assumes that s,t are interleaved with s even and t odd (s+1). - * Other variables in the relation are ignored (by existential quantification) - * Set cube to "false" (illegal cube) to assume all encountered variables are in s,t - * - * Use this function to take the 'next' of a set S --> - */ - Bdd RelNext(const Bdd& relation, const BddSet& cube) const; - - /** - * @brief Computes the transitive closure by traversing the BDD recursively. - * See Y. Matsunaga, P. C. McGeer, R. K. Brayton - * On Computing the Transitive Closre of a State Transition Relation - * 30th ACM Design Automation Conference, 1993. - */ - Bdd Closure() const; - - /** - * @brief Computes the constrain f @ c - */ - Bdd Constrain(const Bdd &c) const; - - /** - * @brief Computes the BDD restrict according to Coudert and Madre's algorithm (ICCAD90). - */ - Bdd Restrict(const Bdd &c) const; - - /** - * @brief Functional composition. Whenever a variable v in the map m is found in the BDD, - * it is substituted by the associated function. - * You can also use this function to implement variable reordering. - */ - Bdd Compose(const BddMap &m) const; - - /** - * @brief Substitute all variables in the array from by the corresponding variables in to. - */ - Bdd Permute(const std::vector& from, const std::vector& to) const; - - /** - * @brief Computes the support of a Bdd. - */ - Bdd Support() const; - - /** - * @brief Gets the BDD of this Bdd (for C functions) - */ - BDD GetBDD() const; - - /** - * @brief Writes .dot file of this Bdd. Not thread-safe! - */ - void PrintDot(FILE *out) const; - - /** - * @brief Gets a SHA2 hash that describes the structure of this Bdd. - * @param string a character array of at least 65 characters (includes zero-termination) - * This hash is 64 characters long and is independent of the memory locations of BDD nodes. - */ - void GetShaHash(char *string) const; - - std::string GetShaHash() const; - - /** - * @brief Computes the number of satisfying variable assignments, using variables in cube. - */ - double SatCount(const BddSet &cube) const; - - /** - * @brief Compute the number of satisfying variable assignments, using the given number of variables. - */ - double SatCount(const size_t nvars) const; - - /** - * @brief Gets one satisfying assignment according to the variables. - * @param variables The set of variables to be assigned, must include the support of the Bdd. - */ - void PickOneCube(const BddSet &variables, uint8_t *string) const; - - /** - * @brief Gets one satisfying assignment according to the variables. - * @param variables The set of variables to be assigned, must include the support of the Bdd. - * Returns an empty vector when either this Bdd equals bddZero() or the cube is empty. - */ - std::vector PickOneCube(const BddSet &variables) const; - - /** - * @brief Gets a cube that satisfies this Bdd. - */ - Bdd PickOneCube() const; - - /** - * @brief Faster version of: *this + Sylvan::bddCube(variables, values); - */ - Bdd UnionCube(const BddSet &variables, uint8_t *values) const; - - /** - * @brief Faster version of: *this + Sylvan::bddCube(variables, values); - */ - Bdd UnionCube(const BddSet &variables, std::vector values) const; - - /** - * @brief Generate a cube representing a set of variables - */ - static Bdd VectorCube(const std::vector variables); - - /** - * @brief Generate a cube representing a set of variables - * @param variables An sorted set of variable indices - */ - static Bdd VariablesCube(const std::vector variables); - - /** - * @brief Gets the number of nodes in this Bdd. Not thread-safe! - */ - size_t NodeCount() const; - -#include "sylvan_obj_bdd_storm.hpp" - -private: - BDD bdd; -}; - -class BddSet -{ - friend class Bdd; - friend class Mtbdd; - Bdd set; - -public: - /** - * @brief Create a new empty set. - */ - BddSet() : set(Bdd::bddOne()) {} - - /** - * @brief Wrap the BDD cube in a set. - */ - BddSet(const Bdd &other) : set(other) {} - - /** - * @brief Create a copy of the set . - */ - BddSet(const BddSet &other) : set(other.set) {} - - /** - * @brief Add the variable to this set. - */ - void add(uint32_t variable) { - set *= Bdd::bddVar(variable); - } - - /** - * @brief Add all variables in the set to this set. - */ - void add(BddSet &other) { - set *= other.set; - } - - /** - * @brief Remove the variable from this set. - */ - void remove(uint32_t variable) { - set = set.ExistAbstract(Bdd::bddVar(variable)); - } - - /** - * @brief Remove all variables in the set from this set. - */ - void remove(BddSet &other) { - set = set.ExistAbstract(other.set); - } - - /** - * @brief Retrieve the head of the set. (The first variable.) - */ - uint32_t TopVar() const { - return set.TopVar(); - } - - /** - * @brief Retrieve the tail of the set. (The set containing all but the first variables.) - */ - BddSet Next() const { - Bdd then = set.Then(); - return BddSet(then); - } - - /** - * @brief Return true if this set is empty, or false otherwise. - */ - bool isEmpty() const { - return set.isOne(); - } - - /** - * @brief Return true if this set contains the variable , or false otherwise. - */ - bool contains(uint32_t variable) const { - if (isEmpty()) return false; - else if (TopVar() == variable) return true; - else return Next().contains(variable); - } - - /** - * @brief Return the number of variables in this set. - */ - size_t size() const { - if (isEmpty()) return 0; - else return 1 + Next().size(); - } - - /** - * @brief Create a set containing the variables in . - * It is advised to have the variables in in ascending order. - */ - static BddSet fromArray(BDDVAR *arr, size_t length) { - BddSet set; - for (size_t i = 0; i < length; i++) { - set.add(arr[length-i-1]); - } - return set; - } - - /** - * @brief Create a set containing the variables in . - * It is advised to have the variables in in ascending order. - */ - static BddSet fromVector(const std::vector variables) { - BddSet set; - for (int i=variables.size()-1; i>=0; i--) { - set.set *= variables[i]; - } - return set; - } - - /** - * @brief Create a set containing the variables in . - * It is advised to have the variables in in ascending order. - */ - static BddSet fromVector(const std::vector variables) { - BddSet set; - for (int i=variables.size()-1; i>=0; i--) { - set.add(variables[i]); - } - return set; - } - - /** - * @brief Write all variables in this set to . - * @param arr An array of at least size this.size(). - */ - void toArray(BDDVAR *arr) const { - if (!isEmpty()) { - *arr = TopVar(); - Next().toArray(arr+1); - } - } - - /** - * @brief Return the vector of all variables in this set. - */ - std::vector toVector() const { - std::vector result; - Bdd x = set; - while (!x.isOne()) { - result.push_back(x.TopVar()); - x = x.Then(); - } - return result; - } -}; - -class BddMap -{ - friend class Bdd; - BDD bdd; - BddMap(const BDD from) : bdd(from) { sylvan_protect(&bdd); } - BddMap(const Bdd &from) : bdd(from.bdd) { sylvan_protect(&bdd); } -public: - BddMap() : bdd(sylvan_map_empty()) { sylvan_protect(&bdd); } - ~BddMap() { sylvan_unprotect(&bdd); } - - BddMap(uint32_t key_variable, const Bdd value); - - BddMap operator+(const Bdd& other) const; - BddMap operator+=(const Bdd& other); - BddMap operator-(const Bdd& other) const; - BddMap operator-=(const Bdd& other); - - /** - * @brief Adds a key-value pair to the map - */ - void put(uint32_t key, Bdd value); - - /** - * @brief Removes a key-value pair from the map - */ - void removeKey(uint32_t key); - - /** - * @brief Returns the number of key-value pairs in this map - */ - size_t size() const; - - /** - * @brief Returns non-zero when this map is empty - */ - int isEmpty() const; -}; - -class MtbddMap; - -class Mtbdd { - friend class Sylvan; - friend class MtbddMap; - -public: - Mtbdd() { mtbdd = sylvan_false; mtbdd_protect(&mtbdd); } - Mtbdd(const MTBDD from) : mtbdd(from) { mtbdd_protect(&mtbdd); } - Mtbdd(const Mtbdd &from) : mtbdd(from.mtbdd) { mtbdd_protect(&mtbdd); } - Mtbdd(const Bdd &from) : mtbdd(from.bdd) { mtbdd_protect(&mtbdd); } - ~Mtbdd() { mtbdd_unprotect(&mtbdd); } - - /** - * @brief Creates a Mtbdd leaf representing the int64 value - */ - static Mtbdd int64Terminal(int64_t value); - - /** - * @brief Creates a Mtbdd leaf representing the floating-point value - */ - static Mtbdd doubleTerminal(double value); - - /** - * @brief Creates a Mtbdd leaf representing the fraction value / - * Internally, Sylvan uses 32-bit values and reports overflows to stderr. - */ - static Mtbdd fractionTerminal(int64_t nominator, uint64_t denominator); - - /** - * @brief Creates a Mtbdd leaf of type holding value - * This is useful for custom Mtbdd types. - */ - static Mtbdd terminal(uint32_t type, uint64_t value); - - /** - * @brief Creates a Boolean Mtbdd representing jsut the variable index in its positive form - * The variable index must be 0<=index<=2^23 (Sylvan uses 24 bits internally) - */ - static Mtbdd mtbddVar(uint32_t variable); - - /** - * @brief Returns the Boolean Mtbdd representing "True" - */ - static Mtbdd mtbddOne(); - - /** - * @brief Returns the Boolean Mtbdd representing "False" - */ - static Mtbdd mtbddZero(); - - /** - * @brief Returns the Mtbdd representing a cube of variables, according to the given values. - * @param variables the variables that will be in the cube in their positive or negative form - * @param values a character array describing how the variables will appear in the result - * @param terminal the leaf of the cube - * The length of string must be equal to the number of variables in the cube. - * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, - * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will - * be skipped. - */ - static Mtbdd mtbddCube(const BddSet &variables, unsigned char *values, const Mtbdd &terminal); - - /** - * @brief Returns the Mtbdd representing a cube of variables, according to the given values. - * @param variables the variables that will be in the cube in their positive or negative form - * @param values a character array describing how the variables will appear in the result - * @param terminal the leaf of the cube - * The length of string must be equal to the number of variables in the cube. - * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, - * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will - * be skipped. - */ - static Mtbdd mtbddCube(const BddSet &variables, std::vector values, const Mtbdd &terminal); - - int operator==(const Mtbdd& other) const; - int operator!=(const Mtbdd& other) const; - Mtbdd operator=(const Mtbdd& right); - Mtbdd operator!() const; - Mtbdd operator~() const; - Mtbdd operator*(const Mtbdd& other) const; - Mtbdd operator*=(const Mtbdd& other); - Mtbdd operator+(const Mtbdd& other) const; - Mtbdd operator+=(const Mtbdd& other); - Mtbdd operator-(const Mtbdd& other) const; - Mtbdd operator-=(const Mtbdd& other); - - // not implemented (compared to Bdd): <=, >=, <, >, &, &=, |, |=, ^, ^= - - /** - * @brief Returns non-zero if this Mtbdd is a leaf - */ - int isTerminal() const; - - /** - * @brief Returns non-zero if this Mtbdd is a leaf - */ - int isLeaf() const; - - /** - * @brief Returns non-zero if this Mtbdd is mtbddOne() - */ - int isOne() const; - - /** - * @brief Returns non-zero if this Mtbdd is mtbddZero() - */ - int isZero() const; - - /** - * @brief Returns the top variable index of this Mtbdd (the variable in the root node) - */ - uint32_t TopVar() const; - - /** - * @brief Follows the high edge ("then") of the root node of this Mtbdd - */ - Mtbdd Then() const; - - /** - * @brief Follows the low edge ("else") of the root node of this Mtbdd - */ - Mtbdd Else() const; - - /** - * @brief Returns the negation of the MTBDD (every terminal negative) - * Do not use this for Boolean MTBDDs, only for Integer/Double/Fraction MTBDDs. - */ - Mtbdd Negate() const; - - /** - * @brief Applies the binary operation - */ - Mtbdd Apply(const Mtbdd &other, mtbdd_apply_op op) const; - - /** - * @brief Applies the unary operation with parameter - */ - Mtbdd UApply(mtbdd_uapply_op op, size_t param) const; - - /** - * @brief Computers the abstraction on variables using operator . - * See also: AbstractPlus, AbstractTimes, AbstractMin, AbstractMax - */ - Mtbdd Abstract(const BddSet &variables, mtbdd_abstract_op op) const; - - /** - * @brief Computes if f then g else h - * This Mtbdd must be a Boolean Mtbdd - */ - Mtbdd Ite(const Mtbdd &g, const Mtbdd &h) const; - - /** - * @brief Computes f + g - */ - Mtbdd Plus(const Mtbdd &other) const; - - /** - * @brief Computes f * g - */ - Mtbdd Times(const Mtbdd &other) const; - - /** - * @brief Computes min(f, g) - */ - Mtbdd Min(const Mtbdd &other) const; - - /** - * @brief Computes max(f, g) - */ - Mtbdd Max(const Mtbdd &other) const; - - /** - * @brief Computes abstraction by summation (existential quantification) - */ - Mtbdd AbstractPlus(const BddSet &variables) const; - - /** - * @brief Computes abstraction by multiplication (universal quantification) - */ - Mtbdd AbstractTimes(const BddSet &variables) const; - - /** - * @brief Computes abstraction by minimum - */ - Mtbdd AbstractMin(const BddSet &variables) const; - - /** - * @brief Computes abstraction by maximum - */ - Mtbdd AbstractMax(const BddSet &variables) const; - - /** - * @brief Computes abstraction by summation of f \times g - */ - Mtbdd AndExists(const Mtbdd &other, const BddSet &variables) const; - - /** - * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf >= value ? true : false - */ - Mtbdd MtbddThreshold(double value) const; - - /** - * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf > value ? true : false - */ - Mtbdd MtbddStrictThreshold(double value) const; - - /** - * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf >= value ? true : false - * Same as MtbddThreshold (Bdd = Boolean Mtbdd) - */ - Bdd BddThreshold(double value) const; - - /** - * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf > value ? true : false - * Same as MtbddStrictThreshold (Bdd = Boolean Mtbdd) - */ - Bdd BddStrictThreshold(double value) const; - - /** - * @brief Computes the support of a Mtbdd. - */ - Mtbdd Support() const; - - /** - * @brief Gets the MTBDD of this Mtbdd (for C functions) - */ - MTBDD GetMTBDD() const; - - /** - * @brief Functional composition. Whenever a variable v in the map m is found in the MTBDD, - * it is substituted by the associated function (which should be a Boolean MTBDD) - * You can also use this function to implement variable reordering. - */ - Mtbdd Compose(MtbddMap &m) const; - - /** - * @brief Substitute all variables in the array from by the corresponding variables in to. - */ - Mtbdd Permute(const std::vector& from, const std::vector& to) const; - - /** - * @brief Compute the number of satisfying variable assignments, using variables in cube. - */ - double SatCount(const BddSet &variables) const; - - /** - * @brief Compute the number of satisfying variable assignments, using the given number of variables. - */ - double SatCount(const size_t nvars) const; - - /** - * @brief Gets the number of nodes in this Bdd. Not thread-safe! - */ - size_t NodeCount() const; - -#include "sylvan_obj_mtbdd_storm.hpp" - -private: - MTBDD mtbdd; -}; - -class MtbddMap -{ - friend class Mtbdd; - MTBDD mtbdd; - MtbddMap(MTBDD from) : mtbdd(from) { mtbdd_protect(&mtbdd); } - MtbddMap(Mtbdd &from) : mtbdd(from.mtbdd) { mtbdd_protect(&mtbdd); } -public: - MtbddMap() : mtbdd(mtbdd_map_empty()) { mtbdd_protect(&mtbdd); } - ~MtbddMap() { mtbdd_unprotect(&mtbdd); } - - MtbddMap(uint32_t key_variable, Mtbdd value); - - MtbddMap operator+(const Mtbdd& other) const; - MtbddMap operator+=(const Mtbdd& other); - MtbddMap operator-(const Mtbdd& other) const; - MtbddMap operator-=(const Mtbdd& other); - - /** - * @brief Adds a key-value pair to the map - */ - void put(uint32_t key, Mtbdd value); - - /** - * @brief Removes a key-value pair from the map - */ - void removeKey(uint32_t key); - - /** - * @brief Returns the number of key-value pairs in this map - */ - size_t size(); - - /** - * @brief Returns non-zero when this map is empty - */ - int isEmpty(); -}; - -class Sylvan { -public: - /** - * @brief Initializes the Sylvan framework, call this only once in your program. - * @param initialTableSize The initial size of the nodes table. Must be a power of two. - * @param maxTableSize The maximum size of the nodes table. Must be a power of two. - * @param initialCacheSize The initial size of the operation cache. Must be a power of two. - * @param maxCacheSize The maximum size of the operation cache. Must be a power of two. - */ - static void initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize); - - /** - * @brief Initializes the BDD module of the Sylvan framework. - * @param granularity determins operation cache behavior; for higher values (2+) it will use the operation cache less often. - * Values of 3-7 may result in better performance, since occasionally not using the operation cache is fine in practice. - * A granularity of 1 means that every BDD operation will be cached at every variable level. - */ - static void initBdd(int granularity); - - /** - * @brief Initializes the MTBDD module of the Sylvan framework. - */ - static void initMtbdd(); - - /** - * @brief Frees all memory in use by Sylvan. - * Warning: if you have any Bdd objects which are not bddZero() or bddOne() after this, your program may crash! - */ - static void quitPackage(); -}; - -} - -#endif diff --git a/resources/3rdparty/sylvan/src/sylvan_obj_bdd_storm.hpp b/resources/3rdparty/sylvan/src/sylvan_obj_bdd_storm.hpp deleted file mode 100644 index 393ce988c..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_obj_bdd_storm.hpp +++ /dev/null @@ -1,3 +0,0 @@ - Mtbdd toDoubleMtbdd() const; - Mtbdd toInt64Mtbdd() const; - Mtbdd Ite(Mtbdd const& thenDd, Mtbdd const& elseDd) const; diff --git a/resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp b/resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp deleted file mode 100644 index 26e5ea4be..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_obj_mtbdd_storm.hpp +++ /dev/null @@ -1,51 +0,0 @@ - /** - * @brief Computes f - g - */ - Mtbdd Minus(const Mtbdd &other) const; - - /** - * @brief Computes f / g - */ - Mtbdd Divide(const Mtbdd &other) const; - - Bdd NotZero() const; - - Bdd Equals(const Mtbdd& other) const; - - Bdd Less(const Mtbdd& other) const; - - Bdd LessOrEqual(const Mtbdd& other) const; - - Mtbdd Minimum() const; - - Mtbdd Maximum() const; - - bool EqualNorm(const Mtbdd& other, double epsilon) const; - - bool EqualNormRel(const Mtbdd& other, double epsilon) const; - - Mtbdd Floor() const; - - Mtbdd Ceil() const; - - Mtbdd Pow(const Mtbdd& other) const; - - Mtbdd Mod(const Mtbdd& other) const; - - Mtbdd Logxy(const Mtbdd& other) const; - - size_t CountLeaves() const; - - /** - * @brief Compute the number of non-zero variable assignments, using variables in cube. - */ - double NonZeroCount(size_t variableCount) const; - - bool isValid() const; - - /** - * @brief Writes .dot file of this Bdd. Not thread-safe! - */ - void PrintDot(FILE *out) const; - - std::string GetShaHash() const; diff --git a/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp b/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp deleted file mode 100644 index 27706dcdd..000000000 --- a/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp +++ /dev/null @@ -1,141 +0,0 @@ -Mtbdd -Bdd::toDoubleMtbdd() const { - LACE_ME; - return mtbdd_bool_to_double(bdd); -} - -Mtbdd -Bdd::toInt64Mtbdd() const { - LACE_ME; - return mtbdd_bool_to_int64(bdd); -} - -Mtbdd -Bdd::Ite(Mtbdd const& thenDd, Mtbdd const& elseDd) const { - LACE_ME; - return mtbdd_ite(bdd, thenDd.GetMTBDD(), elseDd.GetMTBDD()); -} - -Mtbdd -Mtbdd::Minus(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_minus(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Divide(const Mtbdd &other) const -{ - LACE_ME; - return mtbdd_divide(mtbdd, other.mtbdd); -} - -Bdd -Mtbdd::NotZero() const -{ - LACE_ME; - return mtbdd_not_zero(mtbdd); -} - -Bdd -Mtbdd::Equals(const Mtbdd& other) const { - LACE_ME; - return mtbdd_equals(mtbdd, other.mtbdd); -} - -Bdd -Mtbdd::Less(const Mtbdd& other) const { - LACE_ME; - return mtbdd_less_as_bdd(mtbdd, other.mtbdd); -} - -Bdd -Mtbdd::LessOrEqual(const Mtbdd& other) const { - LACE_ME; - return mtbdd_less_or_equal_as_bdd(mtbdd, other.mtbdd); -} - -bool -Mtbdd::EqualNorm(const Mtbdd& other, double epsilon) const { - LACE_ME; - return mtbdd_equal_norm_d(mtbdd, other.mtbdd, epsilon); -} - -bool -Mtbdd::EqualNormRel(const Mtbdd& other, double epsilon) const { - LACE_ME; - return mtbdd_equal_norm_rel_d(mtbdd, other.mtbdd, epsilon); -} - -Mtbdd -Mtbdd::Floor() const { - LACE_ME; - return mtbdd_floor(mtbdd); -} - -Mtbdd -Mtbdd::Ceil() const { - LACE_ME; - return mtbdd_ceil(mtbdd); -} - -Mtbdd -Mtbdd::Pow(const Mtbdd& other) const { - LACE_ME; - return mtbdd_pow(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Mod(const Mtbdd& other) const { - LACE_ME; - return mtbdd_mod(mtbdd, other.mtbdd); -} - -Mtbdd -Mtbdd::Logxy(const Mtbdd& other) const { - LACE_ME; - return mtbdd_logxy(mtbdd, other.mtbdd); -} - -size_t -Mtbdd::CountLeaves() const { - LACE_ME; - return mtbdd_leafcount(mtbdd); -} - -double -Mtbdd::NonZeroCount(size_t variableCount) const { - LACE_ME; - return mtbdd_non_zero_count(mtbdd, variableCount); -} - -bool -Mtbdd::isValid() const { - LACE_ME; - return mtbdd_test_isvalid(mtbdd) == 1; -} - -Mtbdd -Mtbdd::Minimum() const { - LACE_ME; - return mtbdd_minimum(mtbdd); -} - -Mtbdd -Mtbdd::Maximum() const { - LACE_ME; - return mtbdd_maximum(mtbdd); -} - -void -Mtbdd::PrintDot(FILE *out) const { - mtbdd_fprintdot(out, mtbdd, NULL); -} - -std::string -Mtbdd::GetShaHash() const { - char buf[65]; - mtbdd_getsha(mtbdd, buf); - return std::string(buf); -} - diff --git a/resources/3rdparty/sylvan/src/tls.h b/resources/3rdparty/sylvan/src/tls.h deleted file mode 100644 index 80fdfe7e5..000000000 --- a/resources/3rdparty/sylvan/src/tls.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Written by Josh Dybnis and released to the public domain, as explained at - * http://creativecommons.org/licenses/publicdomain - * - * A platform independant wrapper around thread-local storage. On platforms that don't support - * __thread variables (e.g. Mac OS X), we have to use the pthreads library for thread-local storage - */ -#include - -#ifndef TLS_H -#define TLS_H - -#ifdef __ELF__ // use gcc thread-local storage (i.e. __thread variables) -#define DECLARE_THREAD_LOCAL(name, type) __thread type name -#define INIT_THREAD_LOCAL(name) -#define SET_THREAD_LOCAL(name, value) name = value -#define LOCALIZE_THREAD_LOCAL(name, type) - -#else//!__ELF__ - -#include - -#define DECLARE_THREAD_LOCAL(name, type) pthread_key_t name##_KEY - -#define INIT_THREAD_LOCAL(name) \ - do { \ - if (pthread_key_create(&name##_KEY, NULL) != 0) { assert(0); } \ - } while (0) - -#define SET_THREAD_LOCAL(name, value) pthread_setspecific(name##_KEY, (void *)(size_t)value); - -#define LOCALIZE_THREAD_LOCAL(name, type) type name = (type)(size_t)pthread_getspecific(name##_KEY) - -#endif//__ELF__ -#endif//TLS_H diff --git a/resources/3rdparty/sylvan/test/.gitignore b/resources/3rdparty/sylvan/test/.gitignore deleted file mode 100644 index e04430786..000000000 --- a/resources/3rdparty/sylvan/test/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -test -cmake_install.cmake -CMakeFiles -*.o -.libs diff --git a/resources/3rdparty/sylvan/test/CMakeLists.txt b/resources/3rdparty/sylvan/test/CMakeLists.txt deleted file mode 100644 index 50fc616dc..000000000 --- a/resources/3rdparty/sylvan/test/CMakeLists.txt +++ /dev/null @@ -1,8 +0,0 @@ -cmake_minimum_required(VERSION 2.6) -project(sylvan C CXX) - -add_executable(sylvan_test main.c) -target_link_libraries(sylvan_test sylvan) - -add_executable(test_cxx test_cxx.cpp) -target_link_libraries(test_cxx sylvan stdc++) diff --git a/resources/3rdparty/sylvan/test/main.c b/resources/3rdparty/sylvan/test/main.c deleted file mode 100644 index 37bdee989..000000000 --- a/resources/3rdparty/sylvan/test/main.c +++ /dev/null @@ -1,654 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "llmsset.h" -#include "sylvan.h" - -#define BLACK "\33[22;30m" -#define GRAY "\33[01;30m" -#define RED "\33[22;31m" -#define LRED "\33[01;31m" -#define GREEN "\33[22;32m" -#define LGREEN "\33[01;32m" -#define BLUE "\33[22;34m" -#define LBLUE "\33[01;34m" -#define BROWN "\33[22;33m" -#define YELLOW "\33[01;33m" -#define CYAN "\33[22;36m" -#define LCYAN "\33[22;36m" -#define MAGENTA "\33[22;35m" -#define LMAGENTA "\33[01;35m" -#define NC "\33[0m" -#define BOLD "\33[1m" -#define ULINE "\33[4m" //underline -#define BLINK "\33[5m" -#define INVERT "\33[7m" - -__thread uint64_t seed = 1; - -uint64_t -xorshift_rand(void) -{ - uint64_t x = seed; - if (seed == 0) seed = rand(); - x ^= x >> 12; - x ^= x << 25; - x ^= x >> 27; - seed = x; - return x * 2685821657736338717LL; -} - -double -uniform_deviate(uint64_t seed) -{ - return seed * (1.0 / (0xffffffffffffffffL + 1.0)); -} - -int -rng(int low, int high) -{ - return low + uniform_deviate(xorshift_rand()) * (high-low); -} - -static inline BDD -make_random(int i, int j) -{ - if (i == j) return rng(0, 2) ? sylvan_true : sylvan_false; - - BDD yes = make_random(i+1, j); - BDD no = make_random(i+1, j); - BDD result = sylvan_invalid; - - switch(rng(0, 4)) { - case 0: - result = no; - sylvan_deref(yes); - break; - case 1: - result = yes; - sylvan_deref(no); - break; - case 2: - result = sylvan_ref(sylvan_makenode(i, yes, no)); - sylvan_deref(no); - sylvan_deref(yes); - break; - case 3: - default: - result = sylvan_ref(sylvan_makenode(i, no, yes)); - sylvan_deref(no); - sylvan_deref(yes); - break; - } - - return result; -} - - -void testFun(BDD p1, BDD p2, BDD r1, BDD r2) -{ - if (r1 == r2) return; - - printf("Parameter 1:\n"); - fflush(stdout); - sylvan_printdot(p1); - sylvan_print(p1);printf("\n"); - - printf("Parameter 2:\n"); - fflush(stdout); - sylvan_printdot(p2); - sylvan_print(p2);printf("\n"); - - printf("Result 1:\n"); - fflush(stdout); - sylvan_printdot(r1); - - printf("Result 2:\n"); - fflush(stdout); - sylvan_printdot(r2); - - assert(0); -} - -int testEqual(BDD a, BDD b) -{ - if (a == b) return 1; - - if (a == sylvan_invalid) { - printf("a is invalid!\n"); - return 0; - } - - if (b == sylvan_invalid) { - printf("b is invalid!\n"); - return 0; - } - - printf("Not Equal!\n"); - fflush(stdout); - - sylvan_print(a);printf("\n"); - sylvan_print(b);printf("\n"); - - return 0; -} - -void -test_bdd() -{ - sylvan_gc_disable(); - - assert(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_true) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_false))); - assert(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_true) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_false))); - assert(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_false) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_true))); - assert(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_false) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_true))); - - sylvan_gc_enable(); -} - -void -test_cube() -{ - LACE_ME; - BDDSET vars = sylvan_set_fromarray(((BDDVAR[]){1,2,3,4,6,8}), 6); - - uint8_t cube[6], check[6]; - int i, j; - for (i=0;i<6;i++) cube[i] = rng(0,3); - BDD bdd = sylvan_cube(vars, cube); - - sylvan_sat_one(bdd, vars, check); - for (i=0; i<6;i++) assert(cube[i] == check[i] || cube[i] == 2 && check[i] == 0); - - BDD picked = sylvan_pick_cube(bdd); - assert(testEqual(sylvan_and(picked, bdd), picked)); - - BDD t1 = sylvan_cube(vars, ((uint8_t[]){1,1,2,2,0,0})); - BDD t2 = sylvan_cube(vars, ((uint8_t[]){1,1,1,0,0,2})); - assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){1,1,1,0,0,2})), sylvan_or(t1, t2))); - t2 = sylvan_cube(vars, ((uint8_t[]){2,2,2,1,1,0})); - assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){2,2,2,1,1,0})), sylvan_or(t1, t2))); - t2 = sylvan_cube(vars, ((uint8_t[]){1,1,1,0,0,0})); - assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){1,1,1,0,0,0})), sylvan_or(t1, t2))); - - sylvan_gc_disable(); - bdd = make_random(1, 16); - for (j=0;j<10;j++) { - for (i=0;i<6;i++) cube[i] = rng(0,3); - BDD c = sylvan_cube(vars, cube); - assert(sylvan_union_cube(bdd, vars, cube) == sylvan_or(bdd, c)); - } - - for (i=0;i<10;i++) { - picked = sylvan_pick_cube(bdd); - assert(testEqual(sylvan_and(picked, bdd), picked)); - } - sylvan_gc_enable(); -} - -static void -test_operators() -{ - // We need to test: xor, and, or, nand, nor, imp, biimp, invimp, diff, less - sylvan_gc_disable(); - - LACE_ME; - - //int i; - BDD a = sylvan_ithvar(1); - BDD b = sylvan_ithvar(2); - BDD one = make_random(1, 12); - BDD two = make_random(6, 24); - - // Test or - assert(testEqual(sylvan_or(a, b), sylvan_makenode(1, b, sylvan_true))); - assert(testEqual(sylvan_or(a, b), sylvan_or(b, a))); - assert(testEqual(sylvan_or(one, two), sylvan_or(two, one))); - - // Test and - assert(testEqual(sylvan_and(a, b), sylvan_makenode(1, sylvan_false, b))); - assert(testEqual(sylvan_and(a, b), sylvan_and(b, a))); - assert(testEqual(sylvan_and(one, two), sylvan_and(two, one))); - - // Test xor - assert(testEqual(sylvan_xor(a, b), sylvan_makenode(1, b, sylvan_not(b)))); - assert(testEqual(sylvan_xor(a, b), sylvan_xor(a, b))); - assert(testEqual(sylvan_xor(a, b), sylvan_xor(b, a))); - assert(testEqual(sylvan_xor(one, two), sylvan_xor(two, one))); - assert(testEqual(sylvan_xor(a, b), sylvan_ite(a, sylvan_not(b), b))); - - // Test diff - assert(testEqual(sylvan_diff(a, b), sylvan_diff(a, b))); - assert(testEqual(sylvan_diff(a, b), sylvan_diff(a, sylvan_and(a, b)))); - assert(testEqual(sylvan_diff(a, b), sylvan_and(a, sylvan_not(b)))); - assert(testEqual(sylvan_diff(a, b), sylvan_ite(b, sylvan_false, a))); - assert(testEqual(sylvan_diff(one, two), sylvan_diff(one, two))); - assert(testEqual(sylvan_diff(one, two), sylvan_diff(one, sylvan_and(one, two)))); - assert(testEqual(sylvan_diff(one, two), sylvan_and(one, sylvan_not(two)))); - assert(testEqual(sylvan_diff(one, two), sylvan_ite(two, sylvan_false, one))); - - // Test biimp - assert(testEqual(sylvan_biimp(a, b), sylvan_makenode(1, sylvan_not(b), b))); - assert(testEqual(sylvan_biimp(a, b), sylvan_biimp(b, a))); - assert(testEqual(sylvan_biimp(one, two), sylvan_biimp(two, one))); - - // Test nand / and - assert(testEqual(sylvan_not(sylvan_and(a, b)), sylvan_nand(b, a))); - assert(testEqual(sylvan_not(sylvan_and(one, two)), sylvan_nand(two, one))); - - // Test nor / or - assert(testEqual(sylvan_not(sylvan_or(a, b)), sylvan_nor(b, a))); - assert(testEqual(sylvan_not(sylvan_or(one, two)), sylvan_nor(two, one))); - - // Test xor / biimp - assert(testEqual(sylvan_xor(a, b), sylvan_not(sylvan_biimp(b, a)))); - assert(testEqual(sylvan_xor(one, two), sylvan_not(sylvan_biimp(two, one)))); - - // Test imp - assert(testEqual(sylvan_imp(a, b), sylvan_ite(a, b, sylvan_true))); - assert(testEqual(sylvan_imp(one, two), sylvan_ite(one, two, sylvan_true))); - assert(testEqual(sylvan_imp(one, two), sylvan_not(sylvan_diff(one, two)))); - assert(testEqual(sylvan_invimp(one, two), sylvan_not(sylvan_less(one, two)))); - assert(testEqual(sylvan_imp(a, b), sylvan_invimp(b, a))); - assert(testEqual(sylvan_imp(one, two), sylvan_invimp(two, one))); - - // Test constrain, exists and forall - - sylvan_gc_enable(); -} - -static void -test_relprod() -{ - LACE_ME; - - sylvan_gc_disable(); - - BDDVAR vars[] = {0,2,4}; - BDDVAR all_vars[] = {0,1,2,3,4,5}; - - BDDSET vars_set = sylvan_set_fromarray(vars, 3); - BDDSET all_vars_set = sylvan_set_fromarray(all_vars, 6); - - BDD s, t, next, prev; - BDD zeroes, ones; - - // transition relation: 000 --> 111 and !000 --> 000 - t = sylvan_false; - t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){0,1,0,1,0,1})); - t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){1,0,2,0,2,0})); - t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){2,0,1,0,2,0})); - t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){2,0,2,0,1,0})); - - s = sylvan_cube(vars_set, (uint8_t[]){0,0,1}); - zeroes = sylvan_cube(vars_set, (uint8_t[]){0,0,0}); - ones = sylvan_cube(vars_set, (uint8_t[]){1,1,1}); - - next = sylvan_relnext(s, t, all_vars_set); - prev = sylvan_relprev(t, next, all_vars_set); - assert(next == zeroes); - assert(prev == sylvan_not(zeroes)); - - next = sylvan_relnext(next, t, all_vars_set); - prev = sylvan_relprev(t, next, all_vars_set); - assert(next == ones); - assert(prev == zeroes); - - t = sylvan_cube(all_vars_set, (uint8_t[]){0,0,0,0,0,1}); - assert(sylvan_relprev(t, s, all_vars_set) == zeroes); - assert(sylvan_relprev(t, sylvan_not(s), all_vars_set) == sylvan_false); - assert(sylvan_relnext(s, t, all_vars_set) == sylvan_false); - assert(sylvan_relnext(zeroes, t, all_vars_set) == s); - - t = sylvan_cube(all_vars_set, (uint8_t[]){0,0,0,0,0,2}); - assert(sylvan_relprev(t, s, all_vars_set) == zeroes); - assert(sylvan_relprev(t, zeroes, all_vars_set) == zeroes); - assert(sylvan_relnext(sylvan_not(zeroes), t, all_vars_set) == sylvan_false); - - sylvan_gc_enable(); -} - -static void -test_compose() -{ - sylvan_gc_disable(); - - LACE_ME; - - BDD a = sylvan_ithvar(1); - BDD b = sylvan_ithvar(2); - - BDD a_or_b = sylvan_or(a, b); - - BDD one = make_random(3, 16); - BDD two = make_random(8, 24); - - BDDMAP map = sylvan_map_empty(); - - map = sylvan_map_add(map, 1, one); - map = sylvan_map_add(map, 2, two); - - assert(sylvan_map_key(map) == 1); - assert(sylvan_map_value(map) == one); - assert(sylvan_map_key(sylvan_map_next(map)) == 2); - assert(sylvan_map_value(sylvan_map_next(map)) == two); - - assert(testEqual(one, sylvan_compose(a, map))); - assert(testEqual(two, sylvan_compose(b, map))); - - assert(testEqual(sylvan_or(one, two), sylvan_compose(a_or_b, map))); - - map = sylvan_map_add(map, 2, one); - assert(testEqual(sylvan_compose(a_or_b, map), one)); - - map = sylvan_map_add(map, 1, two); - assert(testEqual(sylvan_or(one, two), sylvan_compose(a_or_b, map))); - - assert(testEqual(sylvan_and(one, two), sylvan_compose(sylvan_and(a, b), map))); - - sylvan_gc_enable(); -} - -/** GC testing */ -VOID_TASK_2(gctest_fill, int, levels, int, width) -{ - if (levels > 1) { - int i; - for (i=0; i,<2,2,3,5,4,3>,<2,2,3,5,4,2>,<2,3,3,5,4,3>,<2,3,4,4,4,3>} - MDD proj = lddmc_cube((uint32_t[]){1,1,-2},3); - b = lddmc_cube((uint32_t[]){1,2}, 2); - b = lddmc_union_cube(b, (uint32_t[]){2,2}, 2); - b = lddmc_union_cube(b, (uint32_t[]){2,3}, 2); - assert(lddmc_project(a, proj)==b); - assert(lddmc_project_minus(a, proj, lddmc_false)==b); - assert(lddmc_project_minus(a, proj, b)==lddmc_false); - - // Test relprod - - a = lddmc_cube((uint32_t[]){1},1); - b = lddmc_cube((uint32_t[]){1,2},2); - proj = lddmc_cube((uint32_t[]){1,2,-1}, 3); - assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); - assert(lddmc_cube((uint32_t[]){3},1) == lddmc_relprod(a, lddmc_cube((uint32_t[]){1,3},2), proj)); - a = lddmc_union_cube(a, (uint32_t[]){2},1); - assert(lddmc_satcount(a) == 2); - assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); - b = lddmc_union_cube(b, (uint32_t[]){2,2},2); - assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); - b = lddmc_union_cube(b, (uint32_t[]){2,3},2); - assert(lddmc_satcount(lddmc_relprod(a, b, proj)) == 2); - assert(lddmc_union(lddmc_cube((uint32_t[]){2},1),lddmc_cube((uint32_t[]){3},1)) == lddmc_relprod(a, b, proj)); - - // Test relprev - MDD universe = lddmc_union(lddmc_cube((uint32_t[]){1},1), lddmc_cube((uint32_t[]){2},1)); - a = lddmc_cube((uint32_t[]){2},1); - b = lddmc_cube((uint32_t[]){1,2},2); - assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, universe)); - assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, lddmc_cube((uint32_t[]){1},1))); - a = lddmc_cube((uint32_t[]){1},1); - MDD next = lddmc_relprod(a, b, proj); - assert(lddmc_relprev(next, b, proj, a) == a); - - // Random tests - - MDD rnd1, rnd2; - - int i; - for (i=0; i<200; i++) { - int depth = rng(1, 20); - rnd1 = CALL(random_ldd, depth, rng(0, 30)); - rnd2 = CALL(random_ldd, depth, rng(0, 30)); - assert(rnd1 != lddmc_true); - assert(rnd2 != lddmc_true); - assert(lddmc_intersect(rnd1,rnd2) == lddmc_intersect(rnd2,rnd1)); - assert(lddmc_union(rnd1,rnd2) == lddmc_union(rnd2,rnd1)); - MDD tmp = lddmc_union(lddmc_minus(rnd1, rnd2), lddmc_minus(rnd2, rnd1)); - assert(lddmc_intersect(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_false); - assert(lddmc_union(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_union(rnd1, rnd2)); - assert(lddmc_minus(rnd1,rnd2) == lddmc_minus(rnd1, lddmc_intersect(rnd1,rnd2))); - } - - // Test file stuff - for (i=0; i<10; i++) { - FILE *f = fopen("__lddmc_test_bdd", "w+"); - int N = 20; - MDD rnd[N]; - size_t a[N]; - char sha[N][65]; - int j; - for (j=0;j 1) sscanf(argv[1], "%d", &threads); - - runtests(threads); - printf(NC); - exit(0); -} diff --git a/resources/3rdparty/sylvan/test/test_cxx.cpp b/resources/3rdparty/sylvan/test/test_cxx.cpp deleted file mode 100644 index 007a21e71..000000000 --- a/resources/3rdparty/sylvan/test/test_cxx.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Just a small test file to ensure that Sylvan can compile in C++ - * Suggested by Shota Soga for testing C++ compatibility - */ - -#include -#include -#include - -using namespace sylvan; - -void test() -{ - Bdd one = Bdd::bddOne(); - Bdd zero = Bdd::bddZero(); - - Bdd v1 = Bdd::bddVar(1); - Bdd v2 = Bdd::bddVar(2); - - Bdd t = v1 + v2; - - BddMap map; - map.put(2, t); - - assert(v2.Compose(map) == v1+v2); - - t *= v2; - - assert(t == v2); -} - -int main() -{ - // Standard Lace initialization - lace_init(0, 1000000); - lace_startup(0, NULL, NULL); - - // Simple Sylvan initialization, also initialize BDD and LDD support - sylvan_init_package(1LL<<16, 1LL<<16, 1LL<<16, 1LL<<16); - sylvan_init_bdd(1); - - test(); - - sylvan_quit(); - lace_exit(); -} From 5934a428980fb37cf0994a90621cc1c084e1013a Mon Sep 17 00:00:00 2001 From: dehnert Date: Fri, 6 May 2016 19:23:57 +0200 Subject: [PATCH 2/3] Squashed 'resources/3rdparty/sylvan/' content from commit d91f6ac git-subtree-dir: resources/3rdparty/sylvan git-subtree-split: d91f6acb554fd7de603ab80522507f41f2faa04b --- .gitignore | 43 + .travis.yml | 100 + CMakeLists.txt | 50 + LICENSE | 202 +++ Makefile.am | 5 + README.md | 97 + cmake/FindGMP.cmake | 20 + configure.ac | 21 + examples/CMakeLists.txt | 32 + examples/getrss.c | 68 + examples/getrss.h | 26 + examples/lddmc.c | 499 +++++ examples/mc.c | 616 +++++++ examples/simple.cpp | 121 ++ m4/.gitignore | 5 + m4/m4_ax_check_compile_flag.m4 | 72 + models/at.5.8-rgs.bdd | Bin 0 -> 50448 bytes models/at.6.8-rgs.bdd | Bin 0 -> 50128 bytes models/at.7.8-rgs.bdd | Bin 0 -> 59144 bytes models/blocks.2.ldd | Bin 0 -> 14616 bytes models/blocks.4.ldd | Bin 0 -> 41000 bytes models/collision.4.9-rgs.bdd | Bin 0 -> 73696 bytes models/collision.5.9-rgs.bdd | Bin 0 -> 73760 bytes models/schedule_world.2.8-rgs.bdd | Bin 0 -> 73472 bytes models/schedule_world.3.8-rgs.bdd | Bin 0 -> 97456 bytes src/CMakeLists.txt | 81 + src/Makefile.am | 39 + src/avl.h | 398 ++++ src/lace.c | 1045 +++++++++++ src/lace.h | 2743 ++++++++++++++++++++++++++++ src/llmsset.c | 564 ++++++ src/llmsset.h | 202 +++ src/refs.c | 598 ++++++ src/refs.h | 77 + src/sha2.c | 1067 +++++++++++ src/sha2.h | 197 ++ src/stats.c | 245 +++ src/stats.h | 262 +++ src/sylvan.h | 182 ++ src/sylvan_bdd.c | 2820 +++++++++++++++++++++++++++++ src/sylvan_bdd.h | 423 +++++ src/sylvan_bdd_storm.h | 3 + src/sylvan_cache.c | 220 +++ src/sylvan_cache.h | 113 ++ src/sylvan_common.c | 304 ++++ src/sylvan_common.h | 85 + src/sylvan_config.h | 30 + src/sylvan_gmp.c | 595 ++++++ src/sylvan_gmp.h | 182 ++ src/sylvan_ldd.c | 2560 ++++++++++++++++++++++++++ src/sylvan_ldd.h | 288 +++ src/sylvan_mtbdd.c | 2542 ++++++++++++++++++++++++++ src/sylvan_mtbdd.h | 608 +++++++ src/sylvan_mtbdd_int.h | 128 ++ src/sylvan_mtbdd_storm.c | 514 ++++++ src/sylvan_mtbdd_storm.h | 111 ++ src/sylvan_obj.cpp | 1039 +++++++++++ src/sylvan_obj.hpp | 855 +++++++++ src/sylvan_obj_bdd_storm.hpp | 3 + src/sylvan_obj_mtbdd_storm.hpp | 51 + src/sylvan_obj_storm.cpp | 141 ++ src/tls.h | 35 + test/.gitignore | 5 + test/CMakeLists.txt | 15 + test/main.c | 350 ++++ test/test_assert.h | 13 + test/test_basic.c | 331 ++++ test/test_cxx.cpp | 51 + 68 files changed, 24092 insertions(+) create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 CMakeLists.txt create mode 100644 LICENSE create mode 100644 Makefile.am create mode 100644 README.md create mode 100644 cmake/FindGMP.cmake create mode 100644 configure.ac create mode 100644 examples/CMakeLists.txt create mode 100644 examples/getrss.c create mode 100644 examples/getrss.h create mode 100644 examples/lddmc.c create mode 100644 examples/mc.c create mode 100644 examples/simple.cpp create mode 100644 m4/.gitignore create mode 100644 m4/m4_ax_check_compile_flag.m4 create mode 100644 models/at.5.8-rgs.bdd create mode 100644 models/at.6.8-rgs.bdd create mode 100644 models/at.7.8-rgs.bdd create mode 100644 models/blocks.2.ldd create mode 100644 models/blocks.4.ldd create mode 100644 models/collision.4.9-rgs.bdd create mode 100644 models/collision.5.9-rgs.bdd create mode 100644 models/schedule_world.2.8-rgs.bdd create mode 100644 models/schedule_world.3.8-rgs.bdd create mode 100644 src/CMakeLists.txt create mode 100644 src/Makefile.am create mode 100644 src/avl.h create mode 100644 src/lace.c create mode 100644 src/lace.h create mode 100644 src/llmsset.c create mode 100644 src/llmsset.h create mode 100644 src/refs.c create mode 100644 src/refs.h create mode 100644 src/sha2.c create mode 100644 src/sha2.h create mode 100644 src/stats.c create mode 100644 src/stats.h create mode 100644 src/sylvan.h create mode 100644 src/sylvan_bdd.c create mode 100644 src/sylvan_bdd.h create mode 100644 src/sylvan_bdd_storm.h create mode 100644 src/sylvan_cache.c create mode 100644 src/sylvan_cache.h create mode 100644 src/sylvan_common.c create mode 100644 src/sylvan_common.h create mode 100644 src/sylvan_config.h create mode 100644 src/sylvan_gmp.c create mode 100644 src/sylvan_gmp.h create mode 100644 src/sylvan_ldd.c create mode 100644 src/sylvan_ldd.h create mode 100644 src/sylvan_mtbdd.c create mode 100644 src/sylvan_mtbdd.h create mode 100644 src/sylvan_mtbdd_int.h create mode 100644 src/sylvan_mtbdd_storm.c create mode 100644 src/sylvan_mtbdd_storm.h create mode 100644 src/sylvan_obj.cpp create mode 100644 src/sylvan_obj.hpp create mode 100644 src/sylvan_obj_bdd_storm.hpp create mode 100644 src/sylvan_obj_mtbdd_storm.hpp create mode 100644 src/sylvan_obj_storm.cpp create mode 100644 src/tls.h create mode 100644 test/.gitignore create mode 100644 test/CMakeLists.txt create mode 100644 test/main.c create mode 100644 test/test_assert.h create mode 100644 test/test_basic.c create mode 100644 test/test_cxx.cpp diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..d45dee77e --- /dev/null +++ b/.gitignore @@ -0,0 +1,43 @@ +# autotools +**/Makefile +/autom4te.cache/ +config.* +.dirstamp +aclocal.m4 +configure +m4/* +tools +Makefile.in + +# cmake +**/CMakeCache.txt +**/CMakeFiles +**/cmake_install.cmake + +# libtool +.deps/ +.libs/ +/libtool + +# object files +*.lo +*.o +*.la + +# output files +examples/mc +examples/lddmc +test/sylvan_test +test/test_cxx +src/libsylvan.a + +# MacOS file +.DS_Store + +# eclipse files +.cproject +.project +.settings + +# coverage output +coverage diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..ac0da9fb9 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,100 @@ +sudo: false + +matrix: + include: + - os: linux + env: TOOLSET=gcc CC=gcc-4.7 CXX=g++-4.7 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["gcc-4.7", "g++-4.7", "libstd++-4.7-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-4.8 CXX=g++-4.8 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["gcc-4.8", "g++-4.8", "libstd++-4.8-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["gcc-4.9", "g++-4.9", "libstd++-4.9-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="OFF" SYLVAN_STATS="OFF" + addons: + apt: + packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF" + addons: + apt: + packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" VARIANT="coverage" + addons: + apt: + packages: ["gcc-5", "g++-5", "libstd++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test"] + - os: linux + env: TOOLSET=clang CC=/usr/local/clang-3.4/bin/clang CXX=/usr/local/clang-3.4/bin/clang++ BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["clang-3.4", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.4"] + - os: linux + env: TOOLSET=clang CC=clang-3.6 CXX=clang++-3.6 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["clang-3.6", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.6"] + - os: linux + env: TOOLSET=clang CC=clang-3.7 CXX=clang++-3.7 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="ON" + addons: + apt: + packages: ["clang-3.7", "libstdc++-5-dev", "libgmp-dev", "cmake", "libhwloc-dev"] + sources: ["ubuntu-toolchain-r-test", "llvm-toolchain-precise-3.7"] + - os: osx + env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="ON" + - os: osx + env: TOOLSET=clang CC=clang CXX=clang++ BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF" + - os: osx + env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="OFF" + - os: osx + env: TOOLSET=gcc CC=gcc-4.9 CXX=g++-4.9 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF" + - os: osx + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Debug" HWLOC="ON" SYLVAN_STATS="OFF" + - os: osx + env: TOOLSET=gcc CC=gcc-5 CXX=g++-5 BUILD_TYPE="Release" HWLOC="ON" SYLVAN_STATS="OFF" + +install: +- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew update; brew install argp-standalone homebrew/science/hwloc; fi +- if [[ "$TRAVIS_OS_NAME" == "osx" && "$CC" == "gcc-5" ]]; then brew install homebrew/versions/gcc5; fi + +script: +- ${CC} --version +- ${CXX} --version +- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DUSE_HWLOC=${HWLOC} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} +- make -j 2 +- make test +- examples/simple +- examples/mc models/schedule_world.2.8-rgs.bdd -w 2 | tee /dev/fd/2 | grep -q "1,570,340" +- examples/lddmc models/blocks.2.ldd -w 2 | tee /dev/fd/2 | grep -q "7057 states" + +notifications: + email: false + diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..27762655b --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,50 @@ +cmake_minimum_required(VERSION 2.6) +project(sylvan C CXX) +enable_testing() + +set(CMAKE_C_FLAGS "-O3 -Wextra -Wall -Werror -fno-strict-aliasing -std=gnu11") +set(CMAKE_CXX_FLAGS "-O3 -Wextra -Wall -Werror -fno-strict-aliasing -Wno-deprecated-register -std=gnu++11") + +option(WITH_COVERAGE "Add generation of test coverage" OFF) +if(WITH_COVERAGE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O0 -g -coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O0 -g -coverage") + + find_program(GCOV_PATH gcov) + find_program(LCOV_PATH lcov) + find_program(GENHTML_PATH genhtml) + + add_custom_target(coverage + # Cleanup lcov + ${LCOV_PATH} --directory . --zerocounters + # Run tests + COMMAND make test + # Capture counters + COMMAND ${LCOV_PATH} --gcov-tool ${GCOV_PATH} --directory . --capture --output-file coverage.info + COMMAND ${LCOV_PATH} --remove coverage.info 'test/*' '/usr/*' 'examples/*' 'src/sylvan_mtbdd*' 'src/lace*' 'src/sylvan_ldd*' 'src/avl.h' 'src/sha2.c' --output-file coverage.info.cleaned + COMMAND ${GENHTML_PATH} -o coverage coverage.info.cleaned + COMMAND ${CMAKE_COMMAND} -E remove coverage.info coverage.info.cleaned + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) +endif() + +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) +find_package(GMP REQUIRED) +include_directories(${GMP_INCLUDE_DIR}) +include_directories(src) + +include_directories(src) + +add_subdirectory(src) + +option(SYLVAN_BUILD_TEST "Build test programs" ON) + +if(SYLVAN_BUILD_TEST) + add_subdirectory(test) +endif() + +option(SYLVAN_BUILD_EXAMPLES "Build example tools" OFF) + +if(SYLVAN_BUILD_EXAMPLES) + add_subdirectory(examples) +endif() diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 000000000..6e1cf8acc --- /dev/null +++ b/Makefile.am @@ -0,0 +1,5 @@ +ACLOCAL_AMFLAGS = -I m4 + +AM_CFLAGS = -g -O2 -Wall -Wextra -Werror -std=gnu11 + +SUBDIRS = src diff --git a/README.md b/README.md new file mode 100644 index 000000000..f0f88b851 --- /dev/null +++ b/README.md @@ -0,0 +1,97 @@ +Sylvan [![Build Status](https://travis-ci.org/trolando/sylvan.svg?branch=master)](https://travis-ci.org/trolando/sylvan) +====== +Sylvan is a parallel (multi-core) BDD library in C. Sylvan allows both sequential and parallel BDD-based algorithms to benefit from parallelism. Sylvan uses the work-stealing framework Lace and a scalable lockless hashtable to implement scalable multi-core BDD operations. + +Sylvan is developed (© 2011-2016) by the [Formal Methods and Tools](http://fmt.ewi.utwente.nl/) group at the University of Twente as part of the MaDriD project, which is funded by NWO. Sylvan is licensed with the Apache 2.0 license. + +You can contact the main author of Sylvan at . Please let us know if you use Sylvan in your projects. + +Sylvan is available at: https://github.com/utwente-fmt/sylvan +Java/JNI bindings: https://github.com/trolando/jsylvan +Haskell bindings: https://github.com/adamwalker/sylvan-haskell + +Publications +------------ +T. van Dijk and J. van de Pol (2015) [Sylvan: Multi-core Decision Diagrams](http://dx.doi.org/10.1007/978-3-662-46681-0_60). In: TACAS 2015, LNCS 9035. Springer. + +T. van Dijk and A.W. Laarman and J. van de Pol (2012) [Multi-Core BDD Operations for Symbolic Reachability](http://eprints.eemcs.utwente.nl/22166/). In: PDMC 2012, ENTCS. Elsevier. + +Usage +----- +Simple examples can be found in the `examples` subdirectory. The file `simple.cpp` contains a toy program that +uses the C++ objects to perform basic BDD manipulation. +The `mc.c` and `lddmc.c` programs are more advanced examples of symbolic model checking (with example models in the `models` subdirectory). + +Sylvan depends on the [work-stealing framework Lace](http://fmt.ewi.utwente.nl/tools/lace) for its implementation. Lace is embedded in the Sylvan distribution. +To use Sylvan, Lace must be initialized first. +For more details, see the comments in `src/sylvan.h`. + +### Basic functionality + +To create new BDDs, you can use: +- `sylvan_true`: representation of constant `true`. +- `sylvan_false`: representation of constant `false`. +- `sylvan_ithvar(var)`: representation of literal <var> (negated: `sylvan_nithvar(var)`) + +To follow the BDD edges and obtain the variable at the root of a BDD, you can use: +- `sylvan_var(bdd)`: obtain variable of the root node of <bdd> - requires that <bdd> is not constant `true` or `false`. +- `sylvan_high(bdd)`: follow high edge of <bdd>. +- `sylvan_low(bdd)`: follow low edge of <bdd>. + +You need to manually reference BDDs that you want to keep during garbage collection: +- `sylvan_ref(bdd)`: add reference to <bdd>. +- `sylvan_deref(bdd)`: remove reference to <bdd>. +- `sylvan_protect(bddptr)`: add a pointer reference to the BDD variable <bddptr> +- `sylvan_unprotect(bddptr)`: remove a pointer reference to the BDD variable <bddptr> + +It is recommended to use `sylvan_protect` and `sylvan_unprotect`. +The C++ objects handle this automatically. + +The following 'primitives' are implemented: +- `sylvan_not(bdd)`: negation of <bdd>. +- `sylvan_ite(a,b,c)`: calculate 'if <a> then <b> else <c>'. +- `sylvan_and(a, b)`: calculate a and b +- `sylvan_or(a, b)`: calculate a or b +- `sylvan_nand(a, b)`: calculate not (a and b) +- `sylvan_nor(a, b)`: calculate not (a or b) +- `sylvan_imp(a, b)`: calculate a implies b +- `sylvan_invimp(a, b)`: calculate implies a +- `sylvan_xor(a, b)`: calculate a xor b +- `sylvan_equiv(a, b)`: calculate a = b +- `sylvan_diff(a, b)`: calculate a and not b +- `sylvan_less(a, b)`: calculate b and not a +- `sylvan_exists(bdd, vars)`: existential quantification of <bdd> with respect to variables <vars>. Here, <vars> is a conjunction of literals. +- `sylvan_forall(bdd, vars)`: universal quantification of <bdd> with respect to variables <vars>. Here, <vars> is a conjunction of literals. + +### Other BDD operations + +See `src/sylvan_bdd.h`, `src/sylvan_mtbdd.h` and `src/sylvan_ldd.h` for other implemented operations. +See `src/sylvan_obj.hpp` for the C++ interface. + +### Garbage collection + +Garbage collection is triggered when trying to insert a new node and no new bucket can be found within a reasonable upper bound. +Garbage collection is stop-the-world and all workers must cooperate on garbage collection. (Beware of deadlocks if you use Sylvan operations in critical sections!) +- `sylvan_gc()`: manually trigger garbage collection. +- `sylvan_gc_enable()`: enable garbage collection. +- `sylvan_gc_disable()`: disable garbage collection. + +### Table resizing + +During garbage collection, it is possible to resize the nodes table and the cache. +Sylvan provides two default implementations: an agressive version that resizes every time garbage collection is performed, +and a less agressive version that only resizes when at least half the table is full. +This can be configured in `src/sylvan_config.h` +It is not possible to decrease the size of the nodes table and the cache. + +### Dynamic reordering + +Dynamic reordening is currently not supported. +For now, we suggest users find a good static variable ordering. + +Troubleshooting +--------------- +Sylvan may require a larger than normal program stack. You may need to increase the program stack size on your system using `ulimit -s`. Segmentation faults on large computations typically indicate a program stack overflow. + +### I am getting the error "unable to allocate memory: ...!" +Sylvan allocates virtual memory using mmap. If you specify a combined size for the cache and node table larger than your actual available memory you may need to set `vm.overcommit_memory` to `1`. E.g. `echo 1 > /proc/sys/vm/overcommit_memory`. You can make this setting permanent with `echo "vm.overcommit_memory = 1" > /etc/sysctl.d/99-sylvan.conf`. You can verify the setting with `cat /proc/sys/vm/overcommit_memory`. It should report `1`. diff --git a/cmake/FindGMP.cmake b/cmake/FindGMP.cmake new file mode 100644 index 000000000..62c75c034 --- /dev/null +++ b/cmake/FindGMP.cmake @@ -0,0 +1,20 @@ +FIND_PATH(GMP_INCLUDE_DIR + gmp.h ) + +FIND_LIBRARY(GMP_LIBRARIES + NAMES gmp + HINTS /usr/local/lib ) + +IF (GMP_INCLUDE_DIR AND GMP_LIBRARIES) + SET(GMP_FOUND TRUE) +ENDIF (GMP_INCLUDE_DIR AND GMP_LIBRARIES) + +IF (GMP_FOUND) + IF (NOT GMP_FIND_QUIETLY) + MESSAGE(STATUS "Found GMP: ${GMP_LIBRARIES}") + ENDIF (NOT GMP_FIND_QUIETLY) +ELSE (GMP_FOUND) + IF (GMP_FIND_REQUIRED) + MESSAGE(FATAL_ERROR "Could not find GMP") + ENDIF (GMP_FIND_REQUIRED) +ENDIF (GMP_FOUND) diff --git a/configure.ac b/configure.ac new file mode 100644 index 000000000..5363e6daa --- /dev/null +++ b/configure.ac @@ -0,0 +1,21 @@ +AC_PREREQ([2.60]) +AC_INIT([sylvan], [1.0]) +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_AUX_DIR([tools]) +AM_INIT_AUTOMAKE([foreign]) + +AC_PROG_CC +AX_CHECK_COMPILE_FLAG([-std=c11],,[AC_MSG_FAILURE([no acceptable C11 compiler found.])]) +AC_PROG_CXX +LT_INIT + +AC_CHECKING([for any suitable hwloc installation]) +AC_CHECK_LIB([hwloc], [hwloc_topology_init], [AC_CHECK_HEADER([hwloc.h], [hwloc=yes])]) +AM_CONDITIONAL([HAVE_LIBHWLOC], [test "$hwloc" = "yes"]) + +AC_CANONICAL_HOST +AM_CONDITIONAL([DARWIN], [case $host_os in darwin*) true;; *) false;; esac]) +# test x$(uname) == "xDarwin"]) + +AC_CONFIG_FILES([Makefile src/Makefile]) +AC_OUTPUT diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt new file mode 100644 index 000000000..bb335935d --- /dev/null +++ b/examples/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 2.6) +project(sylvan C CXX) + +include_directories(.) + +add_executable(mc mc.c getrss.h getrss.c) +target_link_libraries(mc sylvan) + +add_executable(lddmc lddmc.c getrss.h getrss.c) +target_link_libraries(lddmc sylvan) + +add_executable(simple simple.cpp) +target_link_libraries(simple sylvan stdc++) + +include(CheckIncludeFiles) +check_include_files("gperftools/profiler.h" HAVE_PROFILER) + +if(HAVE_PROFILER) + set_target_properties(mc PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER") + target_link_libraries(mc profiler) + + set_target_properties(lddmc PROPERTIES COMPILE_DEFINITIONS "HAVE_PROFILER") + target_link_libraries(lddmc profiler) +endif() + +if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin") + # add argp library for OSX + target_link_libraries(mc argp) + target_link_libraries(lddmc argp) +endif() + + diff --git a/examples/getrss.c b/examples/getrss.c new file mode 100644 index 000000000..f3aa5e381 --- /dev/null +++ b/examples/getrss.c @@ -0,0 +1,68 @@ +/* + * Author: David Robert Nadeau + * Site: http://NadeauSoftware.com/ + * License: Creative Commons Attribution 3.0 Unported License + * http://creativecommons.org/licenses/by/3.0/deed.en_US + */ + +/* + * Modified by Tom van Dijk to remove WIN32 and solaris code + */ + +#if defined(__APPLE__) && defined(__MACH__) +#include +#include +#include +#elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) +#include +#include +#include +#else +#error "Cannot define getPeakRSS( ) or getCurrentRSS( ) for an unknown OS." +#endif + +/** + * Returns the peak (maximum so far) resident set size (physical + * memory use) measured in bytes, or zero if the value cannot be + * determined on this OS. + */ +size_t +getPeakRSS() +{ + struct rusage rusage; + getrusage(RUSAGE_SELF, &rusage); +#if defined(__APPLE__) && defined(__MACH__) + return (size_t)rusage.ru_maxrss; +#else + return (size_t)(rusage.ru_maxrss * 1024L); +#endif +} + +/** + * Returns the current resident set size (physical memory use) measured + * in bytes, or zero if the value cannot be determined on this OS. + */ +size_t +getCurrentRSS() +{ +#if defined(__APPLE__) && defined(__MACH__) + /* OSX ------------------------------------------------------ */ + struct mach_task_basic_info info; + mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; + if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) != KERN_SUCCESS) + return (size_t)0L; /* Can't access? */ + return (size_t)info.resident_size; +#else + /* Linux ---------------------------------------------------- */ + long rss = 0L; + FILE *fp = NULL; + if ((fp = fopen("/proc/self/statm", "r")) == NULL) + return (size_t)0L; /* Can't open? */ + if (fscanf(fp, "%*s%ld", &rss) != 1) { + fclose(fp); + return (size_t)0L; /* Can't read? */ + } + fclose(fp); + return (size_t)rss * (size_t)sysconf(_SC_PAGESIZE); +#endif +} diff --git a/examples/getrss.h b/examples/getrss.h new file mode 100644 index 000000000..653e78e76 --- /dev/null +++ b/examples/getrss.h @@ -0,0 +1,26 @@ +#ifndef GETRSS_H +#define GETRSS_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * Returns the peak (maximum so far) resident set size (physical + * memory use) measured in bytes, or zero if the value cannot be + * determined on this OS. + */ +size_t getPeakRSS(); + +/** + * Returns the current resident set size (physical memory use) measured + * in bytes, or zero if the value cannot be determined on this OS. + */ +size_t getCurrentRSS(); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif diff --git a/examples/lddmc.c b/examples/lddmc.c new file mode 100644 index 000000000..9e9c9c008 --- /dev/null +++ b/examples/lddmc.c @@ -0,0 +1,499 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_PROFILER +#include +#endif + +#include +#include +#include + +/* Configuration */ +static int report_levels = 0; // report states at start of every level +static int report_table = 0; // report table size at end of every level +static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy +static int check_deadlocks = 0; // set to 1 to check for deadlocks +static int print_transition_matrix = 1; // print transition relation matrix +static int workers = 0; // autodetect +static char* model_filename = NULL; // filename of model +#ifdef HAVE_PROFILER +static char* profile_filename = NULL; // filename for profiling +#endif + +/* argp configuration */ +static struct argp_option options[] = +{ + {"workers", 'w', "", 0, "Number of workers (default=0: autodetect)", 0}, + {"strategy", 's', "", 0, "Strategy for reachability (default=par)", 0}, +#ifdef HAVE_PROFILER + {"profiler", 'p', "", 0, "Filename for profiling", 0}, +#endif + {"deadlocks", 3, 0, 0, "Check for deadlocks", 1}, + {"count-states", 1, 0, 0, "Report #states at each level", 1}, + {"count-table", 2, 0, 0, "Report table usage at each level", 1}, + {0, 0, 0, 0, 0, 0} +}; +static error_t +parse_opt(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case 'w': + workers = atoi(arg); + break; + case 's': + if (strcmp(arg, "bfs")==0) strategy = 0; + else if (strcmp(arg, "par")==0) strategy = 1; + else if (strcmp(arg, "sat")==0) strategy = 2; + else argp_usage(state); + break; + case 3: + check_deadlocks = 1; + break; + case 1: + report_levels = 1; + break; + case 2: + report_table = 1; + break; +#ifdef HAVE_PROFILER + case 'p': + profile_filename = arg; + break; +#endif + case ARGP_KEY_ARG: + if (state->arg_num >= 1) argp_usage(state); + model_filename = arg; + break; + case ARGP_KEY_END: + if (state->arg_num < 1) argp_usage(state); + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} +static struct argp argp = { options, parse_opt, "", 0, 0, 0, 0 }; + +/* Globals */ +typedef struct set +{ + MDD mdd; + MDD proj; + int size; +} *set_t; + +typedef struct relation +{ + MDD mdd; + MDD meta; + int size; +} *rel_t; + +static size_t vector_size; // size of vector +static int next_count; // number of partitions of the transition relation +static rel_t *next; // each partition of the transition relation + +#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); } + +/* Load a set from file */ +static set_t +set_load(FILE* f) +{ + lddmc_serialize_fromfile(f); + + size_t mdd; + size_t proj; + int size; + + if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); + if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); + if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); + + LACE_ME; + + set_t set = (set_t)malloc(sizeof(struct set)); + set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd)); + set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj)); + set->size = size; + + return set; +} + +static int +calculate_size(MDD meta) +{ + int result = 0; + uint32_t val = lddmc_getvalue(meta); + while (val != (uint32_t)-1) { + if (val != 0) result += 1; + meta = lddmc_follow(meta, val); + assert(meta != lddmc_true && meta != lddmc_false); + val = lddmc_getvalue(meta); + } + return result; +} + +/* Load a relation from file */ +static rel_t +rel_load(FILE* f) +{ + lddmc_serialize_fromfile(f); + + size_t mdd; + size_t meta; + + if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); + if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); + + LACE_ME; + + rel_t rel = (rel_t)malloc(sizeof(struct relation)); + rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd)); + rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta)); + rel->size = calculate_size(rel->meta); + + return rel; +} + +static void +print_example(MDD example) +{ + if (example != lddmc_false) { + LACE_ME; + uint32_t vec[vector_size]; + lddmc_sat_one(example, vec, vector_size); + + size_t i; + printf("["); + for (i=0; i0) printf(","); + printf("%" PRIu32, vec[i]); + } + printf("]"); + } +} + +static void +print_matrix(size_t size, MDD meta) +{ + if (size == 0) return; + uint32_t val = lddmc_getvalue(meta); + if (val == 1) { + printf("+"); + print_matrix(size-1, lddmc_follow(lddmc_follow(meta, 1), 2)); + } else { + if (val == (uint32_t)-1) printf("-"); + else if (val == 0) printf("-"); + else if (val == 3) printf("r"); + else if (val == 4) printf("w"); + print_matrix(size-1, lddmc_follow(meta, val)); + } +} + +static char* +to_h(double size, char *buf) +{ + const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}; + int i = 0; + for (;size>1024;size/=1024) i++; + sprintf(buf, "%.*f %s", i, size, units[i]); + return buf; +} + +static int +get_first(MDD meta) +{ + uint32_t val = lddmc_getvalue(meta); + if (val != 0) return 0; + return 1+get_first(lddmc_follow(meta, val)); +} + +/* Straight-forward implementation of parallel reduction */ +TASK_5(MDD, go_par, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks) +{ + if (len == 1) { + // Calculate NEW successors (not in visited) + MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta)); + if (deadlocks) { + // check which MDDs in deadlocks do not have a successor in this relation + MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur)); + *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc)); + lddmc_deref(anc); + } + MDD result = lddmc_ref(lddmc_minus(succ, visited)); + lddmc_deref(succ); + return result; + } else { + MDD deadlocks_left; + MDD deadlocks_right; + if (deadlocks) { + deadlocks_left = *deadlocks; + deadlocks_right = *deadlocks; + } + + // Recursively calculate left+right + SPAWN(go_par, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left: NULL); + MDD right = CALL(go_par, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); + MDD left = SYNC(go_par); + + // Merge results of left+right + MDD result = lddmc_ref(lddmc_union(left, right)); + lddmc_deref(left); + lddmc_deref(right); + + if (deadlocks) { + *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right)); + lddmc_deref(deadlocks_left); + lddmc_deref(deadlocks_right); + } + + return result; + } +} + +/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */ +VOID_TASK_1(par, set_t, set) +{ + MDD visited = set->mdd; + MDD new = lddmc_ref(visited); + size_t counter = 1; + do { + char buf[32]; + to_h(getCurrentRSS(), buf); + printf("Memory usage: %s\n", buf); + printf("Level %zu... ", counter++); + if (report_levels) { + printf("%zu states... ", (size_t)lddmc_satcount_cached(visited)); + } + fflush(stdout); + + // calculate successors in parallel + MDD cur = new; + MDD deadlocks = cur; + new = CALL(go_par, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); + lddmc_deref(cur); + + if (check_deadlocks) { + printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks)); + if (deadlocks != lddmc_false) { + printf("example: "); + print_example(deadlocks); + printf("... "); + check_deadlocks = 0; + } + } + + // visited = visited + new + MDD old_visited = visited; + visited = lddmc_ref(lddmc_union(visited, new)); + lddmc_deref(old_visited); + + if (report_table) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled); + } else { + printf("done.\n"); + } + } while (new != lddmc_false); + lddmc_deref(new); + set->mdd = visited; +} + +/* Sequential version of merge-reduction */ +TASK_5(MDD, go_bfs, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks) +{ + if (len == 1) { + // Calculate NEW successors (not in visited) + MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta)); + if (deadlocks) { + // check which MDDs in deadlocks do not have a successor in this relation + MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur)); + *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc)); + lddmc_deref(anc); + } + MDD result = lddmc_ref(lddmc_minus(succ, visited)); + lddmc_deref(succ); + return result; + } else { + MDD deadlocks_left; + MDD deadlocks_right; + if (deadlocks) { + deadlocks_left = *deadlocks; + deadlocks_right = *deadlocks; + } + + // Recursively calculate left+right + MDD left = CALL(go_bfs, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left : NULL); + MDD right = CALL(go_bfs, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); + + // Merge results of left+right + MDD result = lddmc_ref(lddmc_union(left, right)); + lddmc_deref(left); + lddmc_deref(right); + + if (deadlocks) { + *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right)); + lddmc_deref(deadlocks_left); + lddmc_deref(deadlocks_right); + } + + return result; + } +} + +/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */ +VOID_TASK_1(bfs, set_t, set) +{ + MDD visited = set->mdd; + MDD new = lddmc_ref(visited); + size_t counter = 1; + do { + char buf[32]; + to_h(getCurrentRSS(), buf); + printf("Memory usage: %s\n", buf); + printf("Level %zu... ", counter++); + if (report_levels) { + printf("%zu states... ", (size_t)lddmc_satcount_cached(visited)); + } + fflush(stdout); + + MDD cur = new; + MDD deadlocks = cur; + new = CALL(go_bfs, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); + lddmc_deref(cur); + + if (check_deadlocks) { + printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks)); + if (deadlocks != lddmc_false) { + printf("example: "); + print_example(deadlocks); + printf("... "); + check_deadlocks = 0; + } + } + + // visited = visited + new + MDD old_visited = visited; + visited = lddmc_ref(lddmc_union(visited, new)); + lddmc_deref(old_visited); + + if (report_table) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled); + } else { + printf("done.\n"); + } + } while (new != lddmc_false); + lddmc_deref(new); + set->mdd = visited; +} + +/* Obtain current wallclock time */ +static double +wctime() +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return (tv.tv_sec + 1E-6 * tv.tv_usec); +} + +int +main(int argc, char **argv) +{ + argp_parse(&argp, argc, argv, 0, 0, 0); + + FILE *f = fopen(model_filename, "r"); + if (f == NULL) { + fprintf(stderr, "Cannot open file '%s'!\n", model_filename); + return -1; + } + + // Init Lace + lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue + lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup + + // Init Sylvan LDDmc + // Nodes table size: 24 bytes * 2**N_nodes + // Cache table size: 36 bytes * 2**N_cache + // With: N_nodes=25, N_cache=24: 1.3 GB memory + sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26); + sylvan_init_ldd(); + + // Read and report domain info (integers per vector and bits per integer) + if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n"); + + printf("Vector size: %zu\n", vector_size); + + // Read initial state + printf("Loading initial state... "); + fflush(stdout); + set_t states = set_load(f); + printf("done.\n"); + + // Read transitions + if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); + next = (rel_t*)malloc(sizeof(rel_t) * next_count); + + printf("Loading transition relations... "); + fflush(stdout); + int i; + for (i=0; imdd)); + for (i=0; imdd)); + } + + if (print_transition_matrix) { + for (i=0; imeta); + printf(" (%d)\n", get_first(next[i]->meta)); + } + } + + LACE_ME; + +#ifdef HAVE_PROFILER + if (profile_filename != NULL) ProfilerStart(profile_filename); +#endif + if (strategy == 1) { + double t1 = wctime(); + CALL(par, states); + double t2 = wctime(); + printf("PAR Time: %f\n", t2-t1); + } else { + double t1 = wctime(); + CALL(bfs, states); + double t2 = wctime(); + printf("BFS Time: %f\n", t2-t1); + } +#ifdef HAVE_PROFILER + if (profile_filename != NULL) ProfilerStop(); +#endif + + // Now we just have states + printf("Final states: %zu states\n", (size_t)lddmc_satcount_cached(states->mdd)); + printf("Final states: %zu MDD nodes\n", lddmc_nodecount(states->mdd)); + + sylvan_stats_report(stdout, 1); + + return 0; +} diff --git a/examples/mc.c b/examples/mc.c new file mode 100644 index 000000000..2e7938fd9 --- /dev/null +++ b/examples/mc.c @@ -0,0 +1,616 @@ +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_PROFILER +#include +#endif + +#include +#include + +/* Configuration */ +static int report_levels = 0; // report states at end of every level +static int report_table = 0; // report table size at end of every level +static int report_nodes = 0; // report number of nodes of BDDs +static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy +static int check_deadlocks = 0; // set to 1 to check for deadlocks +static int merge_relations = 0; // merge relations to 1 relation +static int print_transition_matrix = 0; // print transition relation matrix +static int workers = 0; // autodetect +static char* model_filename = NULL; // filename of model +#ifdef HAVE_PROFILER +static char* profile_filename = NULL; // filename for profiling +#endif + +/* argp configuration */ +static struct argp_option options[] = +{ + {"workers", 'w', "", 0, "Number of workers (default=0: autodetect)", 0}, + {"strategy", 's', "", 0, "Strategy for reachability (default=par)", 0}, +#ifdef HAVE_PROFILER + {"profiler", 'p', "", 0, "Filename for profiling", 0}, +#endif + {"deadlocks", 3, 0, 0, "Check for deadlocks", 1}, + {"count-nodes", 5, 0, 0, "Report #nodes for BDDs", 1}, + {"count-states", 1, 0, 0, "Report #states at each level", 1}, + {"count-table", 2, 0, 0, "Report table usage at each level", 1}, + {"merge-relations", 6, 0, 0, "Merge transition relations into one transition relation", 1}, + {"print-matrix", 4, 0, 0, "Print transition matrix", 1}, + {0, 0, 0, 0, 0, 0} +}; +static error_t +parse_opt(int key, char *arg, struct argp_state *state) +{ + switch (key) { + case 'w': + workers = atoi(arg); + break; + case 's': + if (strcmp(arg, "bfs")==0) strategy = 0; + else if (strcmp(arg, "par")==0) strategy = 1; + else if (strcmp(arg, "sat")==0) strategy = 2; + else argp_usage(state); + break; + case 4: + print_transition_matrix = 1; + break; + case 3: + check_deadlocks = 1; + break; + case 1: + report_levels = 1; + break; + case 2: + report_table = 1; + break; + case 6: + merge_relations = 1; + break; +#ifdef HAVE_PROFILER + case 'p': + profile_filename = arg; + break; +#endif + case ARGP_KEY_ARG: + if (state->arg_num >= 1) argp_usage(state); + model_filename = arg; + break; + case ARGP_KEY_END: + if (state->arg_num < 1) argp_usage(state); + break; + default: + return ARGP_ERR_UNKNOWN; + } + return 0; +} +static struct argp argp = { options, parse_opt, "", 0, 0, 0, 0 }; + +/* Globals */ +typedef struct set +{ + BDD bdd; + BDD variables; // all variables in the set (used by satcount) +} *set_t; + +typedef struct relation +{ + BDD bdd; + BDD variables; // all variables in the relation (used by relprod) +} *rel_t; + +static int vector_size; // size of vector +static int statebits, actionbits; // number of bits for state, number of bits for action +static int bits_per_integer; // number of bits per integer in the vector +static int next_count; // number of partitions of the transition relation +static rel_t *next; // each partition of the transition relation + +/* Obtain current wallclock time */ +static double +wctime() +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return (tv.tv_sec + 1E-6 * tv.tv_usec); +} + +static double t_start; +#define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__) +#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); } + +/* Load a set from file */ +#define set_load(f) CALL(set_load, f) +TASK_1(set_t, set_load, FILE*, f) +{ + sylvan_serialize_fromfile(f); + + size_t set_bdd, set_vector_size, set_state_vars; + if ((fread(&set_bdd, sizeof(size_t), 1, f) != 1) || + (fread(&set_vector_size, sizeof(size_t), 1, f) != 1) || + (fread(&set_state_vars, sizeof(size_t), 1, f) != 1)) { + Abort("Invalid input file!\n"); + } + + set_t set = (set_t)malloc(sizeof(struct set)); + set->bdd = sylvan_serialize_get_reversed(set_bdd); + set->variables = sylvan_support(sylvan_serialize_get_reversed(set_state_vars)); + + sylvan_protect(&set->bdd); + sylvan_protect(&set->variables); + + return set; +} + +/* Load a relation from file */ +#define rel_load(f) CALL(rel_load, f) +TASK_1(rel_t, rel_load, FILE*, f) +{ + sylvan_serialize_fromfile(f); + + size_t rel_bdd, rel_vars; + if ((fread(&rel_bdd, sizeof(size_t), 1, f) != 1) || + (fread(&rel_vars, sizeof(size_t), 1, f) != 1)) { + Abort("Invalid input file!\n"); + } + + rel_t rel = (rel_t)malloc(sizeof(struct relation)); + rel->bdd = sylvan_serialize_get_reversed(rel_bdd); + rel->variables = sylvan_support(sylvan_serialize_get_reversed(rel_vars)); + + sylvan_protect(&rel->bdd); + sylvan_protect(&rel->variables); + + return rel; +} + +#define print_example(example, variables) CALL(print_example, example, variables) +VOID_TASK_2(print_example, BDD, example, BDDSET, variables) +{ + uint8_t str[vector_size * bits_per_integer]; + + if (example != sylvan_false) { + sylvan_sat_one(example, variables, str); + printf("["); + for (int i=0; i0) printf(","); + printf("%" PRIu32, res); + } + printf("]"); + } +} + +/* Straight-forward implementation of parallel reduction */ +TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks) +{ + if (len == 1) { + // Calculate NEW successors (not in visited) + BDD succ = sylvan_relnext(cur, next[from]->bdd, next[from]->variables); + bdd_refs_push(succ); + if (deadlocks) { + // check which BDDs in deadlocks do not have a successor in this relation + BDD anc = sylvan_relprev(next[from]->bdd, succ, next[from]->variables); + bdd_refs_push(anc); + *deadlocks = sylvan_diff(*deadlocks, anc); + bdd_refs_pop(1); + } + BDD result = sylvan_diff(succ, visited); + bdd_refs_pop(1); + return result; + } else { + BDD deadlocks_left; + BDD deadlocks_right; + if (deadlocks) { + deadlocks_left = *deadlocks; + deadlocks_right = *deadlocks; + sylvan_protect(&deadlocks_left); + sylvan_protect(&deadlocks_right); + } + + // Recursively calculate left+right + bdd_refs_spawn(SPAWN(go_par, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left: NULL)); + BDD right = bdd_refs_push(CALL(go_par, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL)); + BDD left = bdd_refs_push(bdd_refs_sync(SYNC(go_par))); + + // Merge results of left+right + BDD result = sylvan_or(left, right); + bdd_refs_pop(2); + + if (deadlocks) { + bdd_refs_push(result); + *deadlocks = sylvan_and(deadlocks_left, deadlocks_right); + sylvan_unprotect(&deadlocks_left); + sylvan_unprotect(&deadlocks_right); + bdd_refs_pop(1); + } + + return result; + } +} + +/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */ +VOID_TASK_1(par, set_t, set) +{ + BDD visited = set->bdd; + BDD next_level = visited; + BDD cur_level = sylvan_false; + BDD deadlocks = sylvan_false; + + sylvan_protect(&visited); + sylvan_protect(&next_level); + sylvan_protect(&cur_level); + sylvan_protect(&deadlocks); + + int iteration = 1; + do { + // calculate successors in parallel + cur_level = next_level; + deadlocks = cur_level; + + next_level = CALL(go_par, cur_level, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); + + if (check_deadlocks && deadlocks != sylvan_false) { + INFO("Found %'0.0f deadlock states... ", sylvan_satcount(deadlocks, set->variables)); + if (deadlocks != sylvan_false) { + printf("example: "); + print_example(deadlocks, set->variables); + check_deadlocks = 0; + } + printf("\n"); + } + + // visited = visited + new + visited = sylvan_or(visited, next_level); + + if (report_table && report_levels) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n", + iteration, sylvan_satcount(visited, set->variables), + 100.0*(double)filled/total, filled); + } else if (report_table) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n", + iteration, + 100.0*(double)filled/total, filled); + } else if (report_levels) { + INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables)); + } else { + INFO("Level %d done\n", iteration); + } + iteration++; + } while (next_level != sylvan_false); + + set->bdd = visited; + + sylvan_unprotect(&visited); + sylvan_unprotect(&next_level); + sylvan_unprotect(&cur_level); + sylvan_unprotect(&deadlocks); +} + +/* Sequential version of merge-reduction */ +TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks) +{ + if (len == 1) { + // Calculate NEW successors (not in visited) + BDD succ = sylvan_relnext(cur, next[from]->bdd, next[from]->variables); + bdd_refs_push(succ); + if (deadlocks) { + // check which BDDs in deadlocks do not have a successor in this relation + BDD anc = sylvan_relprev(next[from]->bdd, succ, next[from]->variables); + bdd_refs_push(anc); + *deadlocks = sylvan_diff(*deadlocks, anc); + bdd_refs_pop(1); + } + BDD result = sylvan_diff(succ, visited); + bdd_refs_pop(1); + return result; + } else { + BDD deadlocks_left; + BDD deadlocks_right; + if (deadlocks) { + deadlocks_left = *deadlocks; + deadlocks_right = *deadlocks; + sylvan_protect(&deadlocks_left); + sylvan_protect(&deadlocks_right); + } + + // Recursively calculate left+right + BDD left = CALL(go_bfs, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left : NULL); + bdd_refs_push(left); + BDD right = CALL(go_bfs, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL); + bdd_refs_push(right); + + // Merge results of left+right + BDD result = sylvan_or(left, right); + bdd_refs_pop(2); + + if (deadlocks) { + bdd_refs_push(result); + *deadlocks = sylvan_and(deadlocks_left, deadlocks_right); + sylvan_unprotect(&deadlocks_left); + sylvan_unprotect(&deadlocks_right); + bdd_refs_pop(1); + } + + return result; + } +} + +/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */ +VOID_TASK_1(bfs, set_t, set) +{ + BDD visited = set->bdd; + BDD next_level = visited; + BDD cur_level = sylvan_false; + BDD deadlocks = sylvan_false; + + sylvan_protect(&visited); + sylvan_protect(&next_level); + sylvan_protect(&cur_level); + sylvan_protect(&deadlocks); + + int iteration = 1; + do { + // calculate successors in parallel + cur_level = next_level; + deadlocks = cur_level; + + next_level = CALL(go_bfs, cur_level, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL); + + if (check_deadlocks && deadlocks != sylvan_false) { + INFO("Found %'0.0f deadlock states... ", sylvan_satcount(deadlocks, set->variables)); + if (deadlocks != sylvan_false) { + printf("example: "); + print_example(deadlocks, set->variables); + check_deadlocks = 0; + } + printf("\n"); + } + + // visited = visited + new + visited = sylvan_or(visited, next_level); + + if (report_table && report_levels) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n", + iteration, sylvan_satcount(visited, set->variables), + 100.0*(double)filled/total, filled); + } else if (report_table) { + size_t filled, total; + sylvan_table_usage(&filled, &total); + INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n", + iteration, + 100.0*(double)filled/total, filled); + } else if (report_levels) { + INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables)); + } else { + INFO("Level %d done\n", iteration); + } + iteration++; + } while (next_level != sylvan_false); + + set->bdd = visited; + + sylvan_unprotect(&visited); + sylvan_unprotect(&next_level); + sylvan_unprotect(&cur_level); + sylvan_unprotect(&deadlocks); +} + +/** + * Extend a transition relation to a larger domain (using s=s') + */ +#define extend_relation(rel, vars) CALL(extend_relation, rel, vars) +TASK_2(BDD, extend_relation, BDD, relation, BDDSET, variables) +{ + /* first determine which state BDD variables are in rel */ + int has[statebits]; + for (int i=0; i= (unsigned)statebits) break; // action labels + has[v/2] = 1; + s = sylvan_set_next(s); + } + + /* create "s=s'" for all variables not in rel */ + BDD eq = sylvan_true; + for (int i=statebits-1; i>=0; i--) { + if (has[i]) continue; + BDD low = sylvan_makenode(2*i+1, eq, sylvan_false); + bdd_refs_push(low); + BDD high = sylvan_makenode(2*i+1, sylvan_false, eq); + bdd_refs_pop(1); + eq = sylvan_makenode(2*i, low, high); + } + + bdd_refs_push(eq); + BDD result = sylvan_and(relation, eq); + bdd_refs_pop(1); + + return result; +} + +/** + * Compute \BigUnion ( sets[i] ) + */ +#define big_union(first, count) CALL(big_union, first, count) +TASK_2(BDD, big_union, int, first, int, count) +{ + if (count == 1) return next[first]->bdd; + + bdd_refs_spawn(SPAWN(big_union, first, count/2)); + BDD right = bdd_refs_push(CALL(big_union, first+count/2, count-count/2)); + BDD left = bdd_refs_push(bdd_refs_sync(SYNC(big_union))); + BDD result = sylvan_or(left, right); + bdd_refs_pop(2); + return result; +} + +static void +print_matrix(BDD vars) +{ + for (int i=0; i= next_s) break; + } + } else { + fprintf(stdout, "-"); + } + } + } +} + +VOID_TASK_0(gc_start) +{ + INFO("(GC) Starting garbage collection...\n"); +} + +VOID_TASK_0(gc_end) +{ + INFO("(GC) Garbage collection done.\n"); +} + +int +main(int argc, char **argv) +{ + argp_parse(&argp, argc, argv, 0, 0, 0); + setlocale(LC_NUMERIC, "en_US.utf-8"); + t_start = wctime(); + + FILE *f = fopen(model_filename, "r"); + if (f == NULL) { + fprintf(stderr, "Cannot open file '%s'!\n", model_filename); + return -1; + } + + // Init Lace + lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue + lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup + + LACE_ME; + + // Init Sylvan + // Nodes table size: 24 bytes * 2**N_nodes + // Cache table size: 36 bytes * 2**N_cache + // With: N_nodes=25, N_cache=24: 1.3 GB memory + sylvan_init_package(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26); + sylvan_init_bdd(6); // granularity 6 is decent default value - 1 means "use cache for every operation" + sylvan_gc_add_mark(0, TASK(gc_start)); + sylvan_gc_add_mark(40, TASK(gc_end)); + + /* Load domain information */ + if ((fread(&vector_size, sizeof(int), 1, f) != 1) || + (fread(&statebits, sizeof(int), 1, f) != 1) || + (fread(&actionbits, sizeof(int), 1, f) != 1)) { + Abort("Invalid input file!\n"); + } + + bits_per_integer = statebits; + statebits *= vector_size; + + // Read initial state + set_t states = set_load(f); + + // Read transitions + if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n"); + next = (rel_t*)malloc(sizeof(rel_t) * next_count); + + int i; + for (i=0; ivariables); + fprintf(stdout, "\n"); + } + } + + // Report statistics + INFO("Read file '%s'\n", model_filename); + INFO("%d integers per state, %d bits per integer, %d transition groups\n", vector_size, bits_per_integer, next_count); + + if (merge_relations) { + BDD prime_variables = sylvan_set_empty(); + for (int i=statebits-1; i>=0; i--) { + bdd_refs_push(prime_variables); + prime_variables = sylvan_set_add(prime_variables, i*2+1); + bdd_refs_pop(1); + } + + bdd_refs_push(prime_variables); + + INFO("Extending transition relations to full domain.\n"); + for (int i=0; ibdd = extend_relation(next[i]->bdd, next[i]->variables); + next[i]->variables = prime_variables; + } + + INFO("Taking union of all transition relations.\n"); + next[0]->bdd = big_union(0, next_count); + next_count = 1; + } + + if (report_nodes) { + INFO("BDD nodes:\n"); + INFO("Initial states: %zu BDD nodes\n", sylvan_nodecount(states->bdd)); + for (i=0; ibdd)); + } + } + +#ifdef HAVE_PROFILER + if (profile_filename != NULL) ProfilerStart(profile_filename); +#endif + if (strategy == 1) { + double t1 = wctime(); + CALL(par, states); + double t2 = wctime(); + INFO("PAR Time: %f\n", t2-t1); + } else { + double t1 = wctime(); + CALL(bfs, states); + double t2 = wctime(); + INFO("BFS Time: %f\n", t2-t1); + } +#ifdef HAVE_PROFILER + if (profile_filename != NULL) ProfilerStop(); +#endif + + // Now we just have states + INFO("Final states: %'0.0f states\n", sylvan_satcount(states->bdd, states->variables)); + if (report_nodes) { + INFO("Final states: %'zu BDD nodes\n", sylvan_nodecount(states->bdd)); + } + + sylvan_stats_report(stdout, 1); + + return 0; +} diff --git a/examples/simple.cpp b/examples/simple.cpp new file mode 100644 index 000000000..22bd9eb8b --- /dev/null +++ b/examples/simple.cpp @@ -0,0 +1,121 @@ +#include +#include +#include + +#include +#include + +using namespace sylvan; + +VOID_TASK_0(simple_cxx) +{ + Bdd one = Bdd::bddOne(); // the True terminal + Bdd zero = Bdd::bddZero(); // the False terminal + + // check if they really are the True/False terminal + assert(one.GetBDD() == sylvan_true); + assert(zero.GetBDD() == sylvan_false); + + Bdd a = Bdd::bddVar(0); // create a BDD variable x_0 + Bdd b = Bdd::bddVar(1); // create a BDD variable x_1 + + // check if a really is the Boolean formula "x_0" + assert(!a.isConstant()); + assert(a.TopVar() == 0); + assert(a.Then() == one); + assert(a.Else() == zero); + + // check if b really is the Boolean formula "x_1" + assert(!b.isConstant()); + assert(b.TopVar() == 1); + assert(b.Then() == one); + assert(b.Else() == zero); + + // compute !a + Bdd not_a = !a; + + // check if !!a is really a + assert((!not_a) == a); + + // compute a * b and !(!a + !b) and check if they are equivalent + Bdd a_and_b = a * b; + Bdd not_not_a_or_not_b = !(!a + !b); + assert(a_and_b == not_not_a_or_not_b); + + // perform some simple quantification and check the results + Bdd ex = a_and_b.ExistAbstract(a); // \exists a . a * b + assert(ex == b); + Bdd andabs = a.AndAbstract(b, a); // \exists a . a * b using AndAbstract + assert(ex == andabs); + Bdd univ = a_and_b.UnivAbstract(a); // \forall a . a * b + assert(univ == zero); + + // alternative method to get the cube "ab" using bddCube + BddSet variables = a * b; + std::vector vec = {1, 1}; + assert(a_and_b == Bdd::bddCube(variables, vec)); + + // test the bddCube method for all combinations + assert((!a * !b) == Bdd::bddCube(variables, std::vector({0, 0}))); + assert((!a * b) == Bdd::bddCube(variables, std::vector({0, 1}))); + assert((!a) == Bdd::bddCube(variables, std::vector({0, 2}))); + assert((a * !b) == Bdd::bddCube(variables, std::vector({1, 0}))); + assert((a * b) == Bdd::bddCube(variables, std::vector({1, 1}))); + assert((a) == Bdd::bddCube(variables, std::vector({1, 2}))); + assert((!b) == Bdd::bddCube(variables, std::vector({2, 0}))); + assert((b) == Bdd::bddCube(variables, std::vector({2, 1}))); + assert(one == Bdd::bddCube(variables, std::vector({2, 2}))); +} + +VOID_TASK_1(_main, void*, arg) +{ + // Initialize Sylvan + // With starting size of the nodes table 1 << 21, and maximum size 1 << 27. + // With starting size of the cache table 1 << 20, and maximum size 1 << 20. + // Memory usage: 24 bytes per node, and 36 bytes per cache bucket + // - 1<<24 nodes: 384 MB + // - 1<<25 nodes: 768 MB + // - 1<<26 nodes: 1536 MB + // - 1<<27 nodes: 3072 MB + // - 1<<24 cache: 576 MB + // - 1<<25 cache: 1152 MB + // - 1<<26 cache: 2304 MB + // - 1<<27 cache: 4608 MB + sylvan_init_package(1LL<<22, 1LL<<26, 1LL<<22, 1LL<<26); + + // Initialize the BDD module with granularity 1 (cache every operation) + // A higher granularity (e.g. 6) often results in better performance in practice + sylvan_init_bdd(1); + + // Now we can do some simple stuff using the C++ objects. + CALL(simple_cxx); + + // Report statistics (if SYLVAN_STATS is 1 in the configuration) + sylvan_stats_report(stdout, 1); + + // And quit, freeing memory + sylvan_quit(); + + // We didn't use arg + (void)arg; +} + +int +main (int argc, char *argv[]) +{ + int n_workers = 0; // automatically detect number of workers + size_t deque_size = 0; // default value for the size of task deques for the workers + size_t program_stack_size = 0; // default value for the program stack of each pthread + + // Initialize the Lace framework for workers. + lace_init(n_workers, deque_size); + + // Spawn and start all worker pthreads; suspends current thread until done. + lace_startup(program_stack_size, TASK(_main), NULL); + + // The lace_startup command also exits Lace after _main is completed. + + return 0; + (void)argc; // unused variable + (void)argv; // unused variable +} diff --git a/m4/.gitignore b/m4/.gitignore new file mode 100644 index 000000000..5590b8bc5 --- /dev/null +++ b/m4/.gitignore @@ -0,0 +1,5 @@ +# Ignore everything in this directory +* +# Except: +!.gitignore +!m4_ax_check_compile_flag.m4 diff --git a/m4/m4_ax_check_compile_flag.m4 b/m4/m4_ax_check_compile_flag.m4 new file mode 100644 index 000000000..c3a8d695a --- /dev/null +++ b/m4/m4_ax_check_compile_flag.m4 @@ -0,0 +1,72 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 2 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/models/at.5.8-rgs.bdd b/models/at.5.8-rgs.bdd new file mode 100644 index 0000000000000000000000000000000000000000..8a0c1950069e5977d02e225f5bfb06cd7902c26a GIT binary patch literal 50448 zcmZwQ1-Mm3+Xmo~?ot8CBcLJ{A}SzpK)SoTyFpOUO?P*L3J6F_NOuX6(js;%pWXUD z&)PG*4_yCT-(l80v-UY>=6%=9+KY`}2IpKVd}YGd4SY$7MaLi9YKh&&!}rZeczD&E zl!uqi$#{6toScX6nN#raoH->A&%}Qp9K}d?%DL3hf&b&Ab7|D?IG0v^%(-;xBhIB) zA95~(`has8)%)UeajR*MbD3`e2fLihqTb=$-RifUyGOm%xqH={oy)4;rfUucYio$z0kRQ>iN#) zSI>2>fO@ub52$B4_n>;Za}TMfI#*CV*|~?+6P+uh9`D>E>aor}svhm!W9pI4J+2<^ z+!N}Up)e0$Hb2S3A?6}He9`+gyx?JhfU4T^RThGJP#Y1EAX(s zxgrnink(_Jwz)D7YnrR@u)4V_52HWgZPi#sFN!-lHTofO*I*U>QgKJ$IvPaWwOA$I z_A5AtNvad?j{k{Cm0#I81fibxBWN;2=Mdxu+K;4=DRB--)JXfIq>DF1Qz^;Xgw2xN z(bN~R7dPW!L30$U2hA;bSil^GHlMi_4-;-n0q62K*Tzn9F4t`-<6I8s+G&6G+fpoA z>^f-wJ-4M~w3v3%{!E4_l{1*T@GzY@O8qqEZahq7?#{y$<{msuX70(uB<5cJ@GfhL z_PajntG8XU(SFxY`!C*hiAVe00PR0_+maCNcQ0uFsoRzi=T15|So`0(ZOL)&m~$^_ z|B>64DCZ72H&pu%+_t1Sx6ip(w0}?7czxq8=Z0zj4(Eoe-*#?5^{dWJRxfpKih8kgQ`HNdo2H)c z+;sI^=Vqv9J2z7e%u?gUIK>Kn*c?;z%i&vE!C>Z@qQT==u!6~2V2UOezp@nsW05He z9)@To=G+oflpIUdL!4Wte(|@| ztujUX9a6wbXXnW+PU?nXum^BT50awMpLxkA%(3pcJ3`x zw74VXt<-mJiz(VXaEVx{?c6p~wBO;9vQpi-?WSnI!zE~?vU594&Q*+VQLUxCbGuE> zm2(c4xRpfb_L`#oZlAiObNkgLoWl}grI>REP0@aLNL|#q!|Ec=9Z?r{?x_0lXi2k{ zN1Z!vif;a}1X?NR+zC^3b9qu-z`1wT`J%TJ){@}dX;ZY{ol)mT%l4yQXussxvH>DcmO{K>8QmZk(G-`}5ts3J?r^fiwt1-R|YK$+V8sp2P#`rR; zF}^J7{dh*;ot*W)Ffc3+G#21{eSg#=3v^btV*!uO3onxeJ3ISvesmrV@AK#9@ZT>R z>s{f6&gG=BVMOPJX4xQe-*J9;eO~|l^7z*$c;{j*8`Ax>Y*_hd+0gRSvf&k=WkY;` zmJRbkdNaO!d5D$`w;;VK^x|0ojSV}Rm(Yy)d4%nuV7!m2F+Y!~F+Y#1F}^3%7++yE z#`mNe<13=Z_?}W@d_~n5-_vT0?-@15_pBP@E9PB@^@cDo=Ze!)@U~RXpKSflv3-(1 zFEr!)P)e~M=SSz^u(UtFjQ@TmSx*dSV`-!(`13+DzJDm??l?cZzP$f_&->R`@Gi@G ze0VjMc6yw-5M3pgt=V46pBI{Oekg6(kMpDRaM;eD-`;<}HmpmAvz_ZeKj+U2&G`PI zbh_jG@cJ(P`*rrO@9N!=b&2q5=ep6w&E4r@<{tF3=AQI3=3eyE=H7Hsb07LCb6>iM zxgY%`-iELa1@kjNjqwgtV}4#xV}1syF}}fSjPFG?#`lsM;~S#J_=c)6zL(V)-z#d2 zFQ&%$hN&^W;okjO7Y+l(rAFt)C8+1;vHnqP&+X3(%{V`lG3=M~e`W{n561cL_aiO$ zL*v&~pDXk(T?PNwnPmOs3`jX9|t`gHWc@a{n`p#{EGk(`mW?nL*3_&rEv1 z-5<=N_u3?{@`_5?hlsJa(}RbmivR1wA>%O zL1X_6Wfd*=2dio9pP{UwW&eDWmi==rjs0*P>rk-XulEl7;Rf%ppM_I5vK{Mv^nF63 z?5}UJAK!1Ye|>a{?3Y{EF8gcrbESWq?|<9Bek*JIyzSnbSj+yogO>esCoTKuE?V}_ z-L&kVduZ7|_tLU|?xSV@+)vB?d4L{f`^!N(hA&?Z(PeN6u@0p)E-~-0UmW!g>wP%& z7~8QPM&Bni;`@d24*T)_PWabH?WIC5mI}7Z{uBLN=|AQBPy5%u%NjrLjQ4TYvcH_A zWq&zG%l>knmi^^DTK1Ore%M*Ld*Jlm6rAQ8l4x<2v~=b2hRw+ z$Fk0iX9V7(S?3BT;_ry*oOtHo`=cIwzfeA6JHFq?+K=xS$|r1>?-%`C>Hp04f9_xZ zDQo<^FT6iwog;iV=f0%xGk-;AH-AlMGk-&8HGfOrYyOVD$NW8gxA`WW#r!`yGcF<4 zpquFFd5i{X#)C?iU_b<9?x#8uts2sBypWs2cYRkEwCL z@VFZH3s0zVzff3>{ryQb_V*%c?B7qRvA-8pWB-0yjs5!>HTLgk)!4s_sj+_-S7ZM! zp~n9GoO&d-JT><3QtIK@GS$PdMXQnZiE8Y(W!0teNL5`5k6hIy@kmzv93I)Kv7c5{ zV?V8=#(r8^js3KW8vAKgHTKhLYV4=g)!0vKsIi~cRAWD_rN(|*TaEp%jvDKKT{YJK zdTOly_0?Ga8>q4VH&kQ&Z=}Zh-&l?Hzlj>_e^WKq|7L2e|IO7{|68cB{wjA{*8g^DtpDxRSpPeyvHo{dWBu=>#`@n`jrG5a8tZ>oHP-)bYOMd= z)mZ;~sBzp=jrG5m8jF4=@7}(y9c=`6Joj(q8&UP)c`eL+>1O7BbQ5!bx{-MR-M~DM zu4jIMu45iVKVcqBKaS@DtV4M$9N_r?+aEO#p&v01r3;x~rXM!HLKig0=!eY1=m*Wi z=?Bas=mO@Ebbj+FI-hwoeZP4Ooq)GCtV6+dIZlo1aJ(AV-2^qRvx#b4SCiDZjwY*d z-AqyAI`JhI)rkKz?Qeye33!yCM*eM7V_(>$Mn1l!#=fvw zjr`oA#=fvsjeOmv#=h{j8u`0jjeTK<8u`3ajeTL48u`6jjeTK{8u`9gjeTLN_dZ|u z@)v}!VmEnUjj{bauZQo&{qzC4oB1H!#e9hFWIjxHFdw1YnUB(K%*SZt<#E>G{vCPw zPTWzWeqi(j+htumNz1zUE{(iArLRX`o>t4cc!uq=E}o@jT|7t2x_F+Jb@4q~*2N2S zK79Fdk(PDw5}n{XQC+5ymsePaf;m;vftwEKB6t&?qk~G?LMI`-tJS{;_W`8E#B^P8u9*ubts5;C||N2@ebuHTK2)O zX^HnYw8Z;c8u9*)bts7U_iBmvO}0zC|3^!_f1oAaKhhHKpJ<8q&vZU~`SJ@b@&1)g z!1Dsup&;JBs}b)%)QI<=YQ+05HRAoZ8u9)|jd=g-eS`I#`yk_=H2gBCd*Us%8u^e! z-5sZ^ksry_UGbJ)jeJR=?uP$QqxsN3U_gt{FbS*Y9Mk%k)imOS{ zc^OJ+TGqufw5*GXw5*F|X;~M`(XuW+Pv^&%FXd_EWhfPBSr;qP385Fy6KLdRW!9m{ zK3IkAvJY0JWgo0Y%RX40mVK}Wjl7BCio>WNzO~pM3gTN^jri74BffRjh;KbL;#*%W z@om6%#J8dLOMDx#9r10f{j%>hVY|e)DJ}7BMkBt>_4N|p7HpUJwxlJ#t!RmFYg*#l zhR%;KU)s_V-*&Xbw>_PJTLadiAif>dh;Jt~;@eq`_;yhvzFpObZ#OmK+uge+>pS;7 zT;DzQ{KdHSRU;pIs~6%qfg1VIS3Tdkern`PfAw5^jzEq48K|C(&k?ARPlMDm@i_uD z^6N$QbUY_eBj1Lor{Z%2YUJO`YUJN5YUE!`jr<#?M*a;~BmYLIk$)rA$iGo)HMYyX^Exf-(sEkXr4_WSODkzvm)@Xd zU0Oxw$CodwX<3)n(6TPQNhjdTm$fwVB9wJB@?t&fP>>g)Y+yU`B9x6Z@*R(8!BWcG9*Ex?ME#!k1W7 zBmR5X9tz^WSB?1ZQzQQS)rkKAHR69zjrbo@BmRfgi2o5a;(t_)_#aav{>Rn%@w`Bd z_@7WC{wLK5_=u7k@jsUElmAUC;a#UDy0IUB~Cs%0JgiS4ou{!Gg{_zNxT;IFi-gTK+T4*pK($CodE(6SEx zNy|F;7oC7FU;d_X9sI*O6kGS=`$VhI;bQ;sJj;)mO9mr9eE!_(Pr~-_+zI_jz3*ae z`4dYY4aFf_er%kD#88&m*X(;W>hODn5^( zmOL!McI4r6+K)Uesg`xF6x)%9zQossC4nwhhW(gNUt)xMSjd%oCp%IVZsx;yeT#ZINf~(VrM{o@q z@d&O-BObxEXv8D9Hf`~U)u9m&Ut)D>i$|;;ZR(*GiAO`WBOZ<7{jM}? zSr;19vMxkrfoi58`|d6wWV!7 zT|3(5)3v8ufJt*4f^)th0S+S!esw1@PrdKU&t={+B%bq2vw+ zc%H!aoaPs4S!Z9OWt|;D%Q`!h#yT6#6Aq(-`Fe%zp-BG4*e>}O<*D=!_x&UM>xZ$H z`5Z~he2$`JK1b6spJV6(`0`~eE%P~!miZh{%Y06t6Yv~?btst6NovgJWHsh7d zRgL+arpA0uS7SbBc)!dV`_@eF@V;HPPX1Q$zo+dPenA(T#q)cZXVcxybLej7xpY_a zJi3c{KHb^8fbL{oNOv?ZqC1!u)9uYm=yv9%bX)T>x{di&x-~wJz#8L?{={|tdfX9Z zopj6Dj_Z1b_RDp>lI_UTH?$vlxJoT~x|;2hhihob!#8Qk!?m>J;X1kizI<6vOCD~Z zB@Z{!l82k<1biNWHA>#3@w!aB3V+7$vs(nZ*k+#J%)EteV%|zOGH;_BnBS)BnYYt* z%sc2>=ACp+d`^LNh*OQ|IQnBZ+pC-R(ACU)>8j>^bQSY{y0ZBIUCDfqu4q0)S1=!@ z%bSnT&zq0Z<;=(EvgYG-BAzd>4h2Ctp)Q5z3+m_ad_i3t&ll9s;`xI5X*^$0KZWND z>L>5`91rIIy!K=M-&14$FQ_s97uA^mOKQyjWi{siiW>8ORgL+-rpElgug3geSC_}X zUr=NIZ>TZXcczmWV>D=dP#ODij3H-YSHRAP^x|nlc zs}a9%)XzBgts3$CPF)nAM^GcaH`PV(-!D)j-an`dJkG{4`FiOf;^f?+d{uJ+z zMXkt-pY{95b@>b1<+}Wpmh1928hP=%z8-n;hgz=7KiMwV4#(d{dW4`mMG2aQ^ zsafN?$e1+zaMYOpe0o0SKffCDUqFrde?X1-e^8D2e@Kn_FQ~@+Kdi?57gA&XA5mAp za|AW!|1mY@|8X_u{|Pnbzpy$HQ=rED7g1v!d`gXY6jfs#d|Hk8Jfp@s_^cZ7DyGId zSX_vGYVrQ)B(4M1)v^xMU_0_K zl$td1FqB%ftOK=aSqJLSvJTXxE8xqQdbF$q^=VlL8ql&1G^Ax6XhbLC%a_J9@~{c( zP-NY0%63_Io6)lFHm7CXZ9&Vr+mc3}gwlZ5KjynN+e4B0Zo_uWcU$e3b+;YcWxm_f zGT$9&%y&n9z07wfw#$5Xre(gn&=v6IOIKRvyBjU@-JO>C?m^3Z_oNf?IR)0CV7_~+ zG2eaEnD4%7%y&OE=DWWd^F2U~`5x%qiZ!mop`7~C*pGoYRvywHP+p6 zYQ$r_8td)^HR3Z-jdgdD8u6N}#=1L2jrdJfW8IymMm(phvF^@LBfc}$Sa)Zs5%1Y* zth+CG&+#>;L`(cRF!Ewv9p>WgT2h%XPVg zmg{mUE!X8Tx&pp@d6kyy@-&e)tfgh$Tt_1>LRnACI=X>IUWBrdmUVR#jl2luEn3#u%{1~Nlr1#! zB9yH(^1_!`R3rXxvpp2Vf4ds--=RkQcd8NpU24RCw;J)^qelGqsuBNvYQ%rPx&l6@ zpho-;suBM~YQ+Dr8u34(PQ>RF)QJBvHR690~pI0OP@2L_03u?swq8jnv=6%W6b$#rMCksh{8hH`QWuAw;xT4<& zd2v-O`}H-pBQKu!->)g_h2gv5IRpC_;L`W~AJFry{{}tJ{2@Kp{1H9J{4qV-{0TkF z{3$)t{24vN{5d_{`~^MD{yy$YdaCt*Ma%qrO=EsS`G&^)e9Jl%nV;|2j`{iC_rK3t z*3Fx=t(&omoF~keFR>rkZ}S!Vk+%7Y{Y2Y*#eSx3zGA=7Heaz{X`8RuZ?w%Dq8lL)U@m~X=vGJ($b0e@+BRO zbtXOQP-I`pz;@Y3GSaedWTIuC$V|(=kcH+xz&aGn*F9>Cfi-v-{U) zWi9i0A1(8lgRY1#UvknipSft6&)l@kXC7MSGcPUknLsDveI)BpFrWFWKI|Rtr(_8Htzs#7oi^iPRAPmAep4^>KSDP#KT0DuO}X}eBi&(OB6#h#^co%#|h zM%#57D^4TtO0W*sJ@V?gxT8kRyOL~|yedUYUX`XR#$U!i%g~ZniL~TZSz3Zpj+VT7 zo|e2SPbcEbmkKoUsv_%9kXK0=SCl(HKUbOOA>Ju{e-+x|?W)oiZ&!`Bc)RMf#oN`O zE#9ssZSi)sXp6V2Oe3c(SC2-#>$48U;_Vu+-Qw*U(iU&mh(^2{>+2Ek zCTdwnnzCKinP#-ayE!fKZb4VXmoF`8iFYen;@z5-c(r}H`@`HKH87C^i@k-`mtT&(w~;N44^CG%a?()#N`EA;xdSqxD2Kx zE-%s&mzU^7JYQgq@?`RO9hRn>amJ!*3Z6Uo^CC~ea|iVVJaf9%PV7v2bibO{moP9e&%U(U-NXjk9h{&+dPwQgXhOOpW|LuEu`-jvD!X zLXAamq4!B&|G(?vUOacu*X_o02lY-ocTjJ~a|iV{Ja+}lq2lR6D4f=KShxBXa zkLXv;AJfaspU_M3`32UYATK{t%eweE+htw+f|hmhOIp^&uV`5pzounf{D!`BT}1r9 zV|yrj@O(jye7LFJh0iajksm*(ci{d+jePk@{WdsF8pFsF8pFs*!*HshNK%!(^dG{@tZU{v}Z(|B|Yaf63G< z@!wNWufW@JHS#Z|8u^z>jr>ckM*gKyBmdH>m*T&tpho_sS7TkwphiAsRAXJtq(**b zR%2bvqDH>nt;V`|j~e-VuNv!ORyFcDn;Pq4b~W<*J~h_G9BSlyPBj+6Z+$#-`S!3b z`l!bLe&fci7{_w+ybtjCh4}fgsOS2vewUZ+@0%0oYv%jutLA+46?1<2vbg|#$@~C) z(flBN!Tb>Yp1B}>-uy6q&RmE-Ykq`2gU>Ip4&^jHzo32>pI=a)z~>j#$MIJL>Z7FAYUtNv(*HE9rTYNS0p_ck29ucUKA9d94;1Pow`BG1P49^$TM-ejh5j^5h zBcB?n58?TO8u`^keE^SG)X2AH>V5bpYHH+P3pLiomTKfc@|)@tNu8#UI&wrb>S zJ2lqD_G;vB2Q}8kj%ws{CpFf^&T8a$7d6(!u4?3aH#HW)lHT2YofvXIUdPwPDR{mR z?~g^zlkB9PY@cB6MUON0rpK83(4)+K=@I6B^e}UO`W3uYWgW`Pcq^+O8anWnm+eE$ zgXovcgXtIXKYjgRoT46N=MQ1~3+AEpK=aG=0P`z!e{+oPXC6lPH4mrzm`BjP@%aV4 z|G@Y|8O45#Ka|n5jDHL*;~z`Q_{Y&0|9IA+VEhx*GX9Bdm+?=cW&D$A8UGYo#y^#o z@lT^={L^U}{|p-ApUE0!B zk=|n7M89Qzi{5D7Os~gFScii9_^s--p#wjP?eaeIZF-IMZ>Mn|ze8WY3PVuK`^a5v zUum!3O|LNTp_iNY((*oXAN`v3@2BN`#?t{-d;v z{}?UfKTc!(@30O9<3FL6@ttX3BQ@5AkJb2nKT%^{_*C5q&k@vE7d}^GJYT4>E_|uR_`XtOUHDp!@qVLj zjki4B-}<_epNrga$0Fjs`8(eq`MN&|)%R@2eRC)`>1#oF&Oqb7`3HUd6+B;1$lujo;@%HP)er)cE}hs<93|tj6zKNZlF# z4nmFJ|4}v8p~uu1&*N&WLr2 zjJK-39^-{p+i+miI+X=oj$iOH*3j7d4{?gkD^)w7f5BLHD!%mbAPtYDM?4{?;_! z7v1(beawGbwudqqONAQoXs@1#rA3YSbX1SWQlv(_I;+QG=~5$pUDczp)Tt5A?&^_P z8r6tzPxWvtrE0{xw>pL?Q6v6+)rfyTHR9i2jrb2xBmM)`i2n;}#D9<)@gJ;4{9jZf z{x7K!{~>C`f2bPqe_4(AzoJI`V`{{|jrTBL=MAxs*RdWcaezD+9`DDDp~yNkg6+tI zP)5?SE{&p*2ce9nWt|#Bj}5&zq-EV2MZ%Q~=#mUUn;E$hG%8hNmkHA?vVZfx1NmFV-?SRa=8^P*14gIC!uaes}L zIKNIyT$j@l#}%~1Z6%F3y}>#Z#3htfY)4#HYd_+$MlI{}n{2oBIaZGQ`l4I^-KN+& z_AkU&yu{Yi3$SnTJY>iQdVX{qc_Tf~yosJ`ev7vKKem~kZT(wlyB~;crDt0IHrnnF zVsF#at$#a>`-Niu_uD~Fwf>zn;(}>p*)}bI?iGDm$6XJ7B`z=21INK4QP~M>}KJEl<@o^_j_4W%b*W0hOTyMY8a=rad%k}mLjq&}-Iuse-Uu?(t{?>ks z?;o{{?_ajd`2M3gzSQ_a`H@CE@ACeXmVGM;+hyNMO3S{LjFx>XIW7BE3R?E9l(g(y zsc6}^Qq$PC(y&Ge_b;X6y!GeM6gWg2()#nF4v9lLwvW4Yx=T+>95T=nhm5quArp-_ zWM&{q?x$Pf-%GF#1IJ!>mKWd=*m5d_BT;nXgA_nXkuanXkubnXe~knXkgM%-55&%vTXw=Ibe1=Bp?z z^Yb)~`FVzQD43sTy&quRJal0Hr<H6mKbUkwgx~{n*UB_IBu8q$ruto`=OH7P($8$_vhgJM} zQMcq(RkkCqs%gLERdu!_uWD#N@~Wm<@~RfwC9i7Jl2>(T$*a1w9dDVcH zylO~GUNxd6uNu?6@a0Pr8qe`UX-fA9LJHA%ju%REx?2!Zh?X}6E$J@S--_;RZcTSG zx1sSIFO;@)hafy>pxc|<)9uV1XgtRYr6b)Y2tSH$eao@v2=VX2_D~T2u4=@;n;P-& zu15TOs1g63YQ(>n8u9O~M*RDz5&ynw#J`^!@$auj{0FEJ|AA`6{{=PTKS+)E4^|`o zFRBs$m(+;=5H;dIRE_w*tnPwaDmCICQzQPv)QJCZHR3-)jrfmLBmSe*i2rCc;y*^+ z8Y$>K*4HIN?0p^meePOp`TDvy@rXjb#$Ggm?W@fb=~d=Q^c&{M^h)y-dWCr^z1%#F ze%(Bse$70Ce$_maUS^&}FU4CI)}buHTO0MF(1Eu=Y+qoWN6$0Qr{|a#(6h`7=^1z* z!~3dG)`bInj)DD{-zBU=!Tg4@l+z{vC+-AzxPsvG8^D_rurm?<4ee@8kJ``kKAyDBG`^ zkI`4m$LY)Fcj!yz6ZA#%N&15MUHU!qDf+zmG=0u|hCXXPOP?{Hqfg^ouny(jaDX4h z_7mm{^l|e=`l$I5eb{`NK4`u|?>Aqik$0h7qmgItvkpbpmFsMmb>#zE)|DHytScYV z$cs>-xJkS|X1~Pi6B_aQlyxW)ug};n@%o&Wczr=jyuPF*USH7?udiu|*Eh7p>suP} z`i^xdh}ZXO#OtOS@%o?oI6kMKM!bGhBVIqL5wD-sh}SP_#Oqfz;`N&v@%mkjc>SS9 zy#7=pUVo_(ufM%NVvXzew!cTl&;M7?$2#?&8o!TA6DA8a)~UPH`2CWou}&pbaW1afPk1xG%FD=94Vf62$cH`ei z=<9acNg3I`-JFTuX3k7+F=wIQGT%*a#OD`Shq68#;5h@^Wxvi!W4{h18;$)syS^U# z^?mBq;cR@4f$gizIceFibJ4P2=cZ-9&O^(7otKvVI)Rq``hHsW>wL8A*ZJwC`0}Ly z-3recScd{X==}iemZ1ZmW1w4@qt64%d_By5nXf{$+}}JxW4<0`9m;BaenBns^*GyQ zzMi0Ez6#SaUr*99Uqxt{ucv64ucEZf*VDAj*E94|JYQfP3g)MncbK2z+K>4u;a!k* z^YB7^UV(0A4zCX_rPwd?TiUX}ONN&`a@rfi+6_TxUqtXq||6-L_tI^6k6gjm6mu+qa_~GX^F=STH-O2UW!|D)}dhjXM2bFpQHVl|GC~V z*0_%gWgfjC2+xyfiQfVmuMcIRfBhoXq2Tq4z2~#WePAd{=y^f-)oI)ZhO&&tePAfB z(zp){!NJ}{J(H0}e#^&8?F`d6`EuD8{+#CHuX@qLq) z_^zcTzUyd-?|NF|yMdPYZloo?o9Lxj+F6H!_-W7@wOZ^_lji%{NSyX3_QTJqu~EqU=SEqQT@mb^Gk zOJ1CzB`?m>k{9RbrTFsYJl!h@Z;k1m<_mNW^F_M5`4ZjDe3|ZQzCw2~U!^;nuhE^% z@6#R4*Xa)C59s#h8+1GKhjd%>M|2zW$8_skjzvd^|EFvZ1@Zq(jrf1AM*P1}BmQ5i zSL5>wYQ+C*HRAt`8u9;Djrf13M*P25BmOtli2wi8i2o02#Q#V2QaoQ!BmO_D5&vJ* zi2tu@#Q!%n;{Ut4E1oZ?5&u8ci2q+|#Q$$K;{T5t@&8wi`2VM7{L_Zup+@}gQX~FJ z)UEOFWBlur`u6eiava9;q{N@s@!TQaAB(!*kN%AQNY3_ac9r_Y-+(C5q<>9gic^cizz`ZWIg3#>zVHyq&KNwEEd`5yYX z`Cj^{IV*kGoQ*zcj^;_`=RWphesZu5Mdl|b+hu-o(K0`|X_=oqw9HRlTIMH#mif7# zmifs?V}A0p4h8d5K#lo%Kz#zw7u1-aht!y#f@;jq!)nY=A@A(0_lF;hezmDqA z3_s@2f0VV{Up-D^|BmW#K3v$J{{(BfUwe|qex5v4I2kqa{V9Kb)GGO2l}#PeCT#IqRNC7#7;iDwB~;`tmc@hnM8JWJ6M&(gHSvkWcyo=78}Wm$(J`Cg9g zCqf7QJp?WJUY?eGuRu$_SELcoXr6Ew70g# zS4~>xs}?QuRhyRiszb|s)ul0C^;n03`KqtRd^J#?z#|tm=Btq!^VL|5`D&uZd^Pp1 z#9H#d8I61or8zD6-hxJchtiTpez#&B3i7+PcM;a}LkDgN=y~S0^jvd0dXBk0J=@%Y zo@MSx&opnWVkjs21z-D$~>9<<~~Pg?S$7cKeGo0k0OLrZ@2 zr6oW5(UKqiX}O*T(8!O0tV5CO=>@i*2p#ym0xj3mU|O!H7iqbkUZQ1v96}>MhO!O? z`SG$E`SFSx`4LkiKZdE1AH&tij}dC*$4E8uW0V^CF%-I=W4N7kJwY?r*6N=sf%qb0AV(~?&+XvwRY zwB*$+TJmZ(EqOJEmb{uvBd_MM4h4A?%6zsXuNG+kiO`Enp6!xXi)hKK#kAzr5*m3G z%2Ha^on>?@yk%k?3jCV)tE^jw4!jkkTlhLO!z=vx%UL%M9eArpH@oFnGTQdp*eV+F zTg^Iro*MC66L-|8@q3f)62G;y#BUuf@mo(z{5H@Mzm2rSZxb!?dyAI%ZKh>i*g_+I zTUm!9>%um+p9mdzo79kj%6CoS>YMN9m4(}-6nduWN*UK;TUWgm_B>}MSc z;&Z_J4c1LV2Yxi&#C(WuY(7jkG9RHEnvc>A%*W{Z=Hqld^E-51^9j0+`6OK%mk4W= z?kVGSu6X65e<$ycnOCR$c~O(B3#ZvG>%tjY^6D%td3BDKygE-yUcE<4UR|IiuP)M( zSC?qXtIM?H)fHOSg{$-_Tq>+Xkvw~!?I%MAZf)rk<`3w1%r|Js!w>0W*8dSLdHFGY z#QHy>B~L%44_W_bwB+sQ^a1Ptf|fk~lHP~s2&_@U-?u*zr%R^m{Rh4g)z>`#I-W21 z*F`;H%DYW|n*V*ZcrYaeg7K$OWBh5=-SPKSYK%X< z8spEP#`rU;G5$p4Xko` literal 0 HcmV?d00001 diff --git a/models/at.6.8-rgs.bdd b/models/at.6.8-rgs.bdd new file mode 100644 index 0000000000000000000000000000000000000000..71ef84a77c48033d3a1dd2db70756bf85eea2a2f GIT binary patch literal 50128 zcmZwQ1-Mnk+s5G$5EL>0wx|f$-Gz8mz*f4uOA))>pooe=H;RRV-Gzaz2-u3<-Q9lA zv(6gdA6(yD*EzH9nY9ml=6%=9-iv);wUp9I`dUL@@9GPsQXIeYUrS{LAHEf?;lnq= zHGTMcxRwuJ4X^0Km&3Jv_+q$@51+4oK02zHX>m#`7YF6H=TfRmKbz7j^wTNTqn}J^ zRr>LiR-+$HX?6OM>bdx@>7kU?{0};qpVC_N{VDyAzBi?{>AO=}hrTnVb?MtvT93Xp zu)Ys(4sYPY8^as=a9(&LA6_4>@54FajeU4ccoQF972ec`vr}r|I+x2++AJTto|#fZ zdU{HY=&30+rYEPgIXy9@E$HznZAp(!X)F5Dl(wcXPH7wZqLj9!FGy)S`uvo(r_W1i z2YO^mJJOZFPCh(4yt5C_4DaH@)5E*^@YL{bK0Ghb_a0`0%Lkp*}n!e3%c5Z`EywyDA=3?c&tp52^N%u8JpB?czrl zgQ)h=uBw)K3MpxlT2*&ff7hhqDW{|$v|+!3h9Rb;Ah%<`k^)nbl9H$c`-`NjZYZV_ z$=b=yNbX|lw}}UL@!?kCB2-(1yZNwjxCm{-a1S3gSSAHh+9ai3aY{<{mr0qFHcY7x z``2G4#frtQAN$u{CMAo-bO8I;2o$NjT6mBT>xGNduNxlX!es==zB{G! z=sQyyMcq!3lrE&_rF0Q}eM)2KIVoLCUz5@$^i?TcO3zN|GWznA z#?muW8b?o0X*@kOr3v)plqS-`Bw7#FDN*!?O$ilGPCqh=1~V;G3|>zmiY9ADsF++m z3a?GaBOzBGc^krT$6v=uu`%hlxB}Q>ET+9CBmw8E2?046( zUl*l98f9oob3?^`rxb`XFr^zp#p0rriPATvn?l9nr4)D zrxcFTF{L{~#p14%kJ2`!yF$h0p-Uu6%ara375kkosVGOJbYH00?{o=9IV7bALMgQ< zUZO@zvy>hTrF39Qy2PVYrSx#9*zX>po20aW-X|q3AyM{9>9J6;-#t$6meLdSE-5`p z@08M0^bW<67A@PQ^h~ID@uwv)%2p{Y3>7afpQ9V6w1{q4yrqbi1}QBG75m-ubp4cG zpf}`O7Imzj(o5`Lmv3>@v35$YuzxMSB~r&4DZR%2)%X@l9raRL%Ko~1%cYK$QhJm9 zwfPoJ9ko(=oBcIXdWY8Wai#S~7v(CnSO0>qeGOh8|r8T})XpOHPt?{i&YkaHG z8sF-)#@Rw-K(Fg|E#Sp@`C(YF*LT0p zFV5592Icu1mOpPj*YooOQ`*SZhEbfCo3TM`yxjTu@tc-EZ^t>UmMv{;xLet`=U&|_u(e?q^OBo2Kij%J7mas2TJy6# zt@+u3*7$a$HNKr_jc;dKe`)mZ>-I~^^K!G!&t*UN>-^$89X2h`-@p8MOr;N(cwew^TUVPqr!*V=Y@~3&kY}Gj|?AW zk4UMQm)xxRY3cS{G~QOU=BG8S`DsIId~IosuN|%NwWl?{4z$MCk=FP+(HdW8TI1_N zYkXbl5h-;meYERJ9++MZ+xw@~qwFtwnnr(5x9?Y;mz#BdF1_8a^NaIz*rz0Z+TvB*3Zvnz;fs3#}6uh-oWzlgG={!y-$90N<-|u!$a-8!pGQqhL5%P z2oJM&44U268Gx&(RtCec62?HiZp&t8`aOUQxQfYwY*4ZS40~+Su=}vb7)Paq`S{z*^KW*o_5Y622e`)mcBhT~?JgVp+ub(yw|i{tZ}-~R-|n-qzuj+Re|x~j z{x;tp5$pehc10=XI+y+PfnKV*y{S@^{aUYbS>Sf8=f%&_;~(SsdVDUAyB&{zqI~?5 z<@t}g)_V6;>He;-i!Z>-l0E>-iEJ>-qCG*1H#M zt#>cF&SlelpnC;w-$eHa*uQb~zv6bRU&YVS<6q%0 zjqg)h^Ya<4@qJEfd|%KS-G*5r9XDP zMjoi%*W0Uyf3{Z(|6;Ei{?)D*{>@$`{JUK@{D-}A_)mMK@LzVF@ZWar@IUs7;eYK~ z;o|R4YKB*+p<{oQrG}6Fn6lJFpPDK~^Ai0LY+tOWKrhl$q@U9e=!F_0t^K_&t^Iu! zTKjuFTKoH|wD$MaXzlN-(~m2JwD$Kk=|`2~^a5Q1^dq`N=!bO)(GTeoqaV~INNayz zpVt1o0j>RcLt6XuMzr?l`n2}vjcM)Ao6y>yH>I^dH=wmYZ$@i>Zb)l?ZbWN;ZcJ-` z-JI6`x&^KMbxT_N^H#L>*R5&ouiMbtU$>>Tzivlsf8Cze{<;IL{dGtBJT39G_Sc>1 zk=i2Y5!ym%W&Lin_Q&1nrrM(D{j`PA+F$pgwLk7nYk%B_*8aFJt^KhHt^IL7TKi*D zTKnVvwD!j;TKnSxwD!jXY3+{((b^xI(c0gd(^~&q&|3cwrnUYbLTmj$l-Bxx7_IgH za9ZpC5wzC-BWbPwN6}jUkEXT$x1_cHx1zQFx2CoJx1qKEx23iIx1+WGx2LuKcc8WY zcciucccQiacc!)eccHcZccr!dccZobcc-=f_n@`@_oTJ{_o8*&o7Vc@ht{HBt90M8 zZdq&u%iTBcQ8udT=kvOS``cZ@1ME)Wfp&-RAiG_7u-zs+#BLQHYVR06#@<06$8nv@ z_W2;CVQ$|pe4M>)c(}by_;`Ek@Co)-;S=pG!zbBWgip3N51(Q;4xef_3ZG^-44-aq z7Cysnp!))@bJ2Bq7Om^>Y+BddIkc{`3a#sE1g-06B(3Y_Tw2#jSt>=X_>W?LcfIwY zl@Fuou6pZ4D?cuzJL|0%t$Z0nchp-yTKRJc-Cl1UY30*pbX$FPfL4Buqg(5<1GMsO z0UpTMyon<|&ydcV|@~sb|v2vHsJErW_<#)F| zBz%uOD15IyAbg+QFMPk*5nO*2O1ntcy?CSQnqRu`WJiH`G^Ip0%+qF0>oyuUK8@qP$#0 zD-Uy7>~`f{E=z3XSuW4p%Bx&nu$4!-yl5+Lia6)_jd-V*-5>Ezuh*e=B#s4Rsf2=+)Kr0`9p@-`4`Dx|HZ}eb&E`V0P{6P=Y=LKlx z&tG)^l>VlbPyf(;Q~H;7e$~vAsk*mr3DC;78gx&cPAmUv(aOISY2{ySTKQLpR{pI- zEB{uem49_<<=-l_@~3#v-P$8z3e{0bVv;@$~zqM)YgX_@B z$8~A#gX_`C&-H2TgB#Gw*9~dygB#Jx-}jdgJk8|&hp zb|Zb2WiMNKnakcb*2R772D*>nIv3?-6B_&Aes0G;*wn^8xWA2ku*$|hcz~_EDdMWb zqA0!xxjh%fw;8SYHm4Qe7PR7fFs=9=LL)H=|n4k2Gf)EQj1nT4W%dQr5UaKI+h-9q3i3|jejCawHCi*BU*1+?<-99sETp&RIa0j>NS zNo(IbmsUQWM{D03MJqqgr?u~mrj@T3(AxJdq?NxH(c1UM(8}kFY3+NL(8}*iY3+NL z(aQI+wD!GTrN@+1J^%8T*U{gt9wc`<=MM|qLUM7Jw1Cb3_6F`34?G{x=M zcc$7{m!{cRm!{iTmuA>lmuA{nmuA_G^i`J2ZLCXI*jSfl+YNLd!F4Xmi(Ia9yYk{{ z_A4)PxyJ3vi(Ia?l^40pv6UCOTxTmUa=G4CUgR>@R$k;X&sJXKa)YhB$mK>`d6COa zw(=sEn{DMqF1OgR4yIdeHerxpJPXvKd%t@uAkH`4tATJe9FR{S5K8|Wi4wBr9Lt@uAiEB=quivJU|;{PPA z_&-G}{!i12|1-4W|17QeFQgU!=V--$5v}+yrWOAswBr9ft@z(o`h~JSvW$Jd8aWA6 zcaP&2eO|ZlOLo`r%XXLWD|YAbt9GaGYj(%*>vo6mQoDWl4ZB_VO}lOQExS$lZM${& z9lKTdUAtxYJzLj7F7MlTefEKkb?`$Qug^ZRbsc={Iu~6BpU_wbKXp6S!Ov{0gP+@2 z2fwhf4t{B49sJ5}q_47kZDSq$#>P7Mt=&NP5nSh@>)?Aj*1hU|qDMY4Retn&kspS7k+8`BmF>zVC+os^fO#*Ge|> zYh_#cRk!-_f{OBM6&m|&J+~vjR<)5|tJ%n})otY08aDE4O}nwa%CeT-C`((8jr>~M zMt-ehH_%q*Iv4D->$zR|wSL+EgX5ufG?xwBj(u}O8~fx&HulB(Huk}dZLNEoxK_#c zzXw-y+4q5$>;6J{UeP^EPo18rq0uumbb7j`hMuOWqo-Ahda1kdAK9{m4`dgSm${;!&1L3p?UbIoOVMp>l|= zcpU0FuOo=ZVQyDE4zKP{yST=>aDY{Kju3fZO444mUhf%YGucKrq*`M zXKG`|e5ST`%x7w6$9$&tcFbq$V8?u>j&{sv>SSv^bLnhrKD)TiCFV1Ab-U)X8~Zh% z-D%8c54T5tr=GUvvlq|TeD*GVq-(6ReQd0=eQm6>{cNnW{q4s3D$4*H>+C=q>+B#K z>+E2=f$k%?&PD6&PnjdgaIjdk`o8|&i%+ z<~kS6=LlN!Ig-|Vo=a;!&!aV;qiD_N`LyP9bm`+=Yu~z{bbjBCt<%q{`tNCv%}+2@ zF7)}wgfFs(hR4`L!WY|v!+EissdBx~?-HJC zcM8w5JA`kr+l6nm+k|hjTZM17j}G5rAEnPJxXy7pvN$fj-RAZq!nfOphwrct3*TuU z8otXuBz(7haQGg(MfhI3dH6oNS@?eYpzs6sf#Lb~0pSPjD&1djor{9-FukAdFVOqy z{sO(X?k~`L>iz<~yY4U0yXyV|z4LOPJaX>2`OX(*1 zHx6jU=S_Ma{oVqtc)d;UrGMjqR{Y+j_ekkITJe0J-c6rJpcUT_>0MI#h*rEmrgzf) zrP802bzFi)QdWx3(Wvp$>i$a6s=WA&KL^+4=WfS!`Gt+^@=IHJ@f9Dhy!e{Nb@`3k zab13E!{V@|Uf+E%P}R1mPd|Yrb>&*T#Gozw?0kUSUP`W4>$Hn(vyfbJ2X) zqA}kqx*hXf+s1s?v76@~q|YnZ&B80&nD4qa=6e+z^Igxb(&rRh=c4&ujn;gxPHVo` zpf%rX(wgtJXwCQkXwCQ9rT_MP>bj^|GymbLHUI1Ke9ixQwB~<(TJygFt@+=O*8Fco zYyRugn*WVy&HpB}=6_SVxwddx^S>Fb`EN*T{u|Mn|HgEcUSiOi|1D^(gIm&y$5yn~ z!L4bW>aC=(u+kw_PxFfB2?nG-H+?iHDdD~CN#T9$3E_S1ap5Mm@^C-b`SlOhfu?R( z9`0Y=U!3H1w~EF(aDdyDhq)YRD-Uxy$i_O*%*Hy<+{QZ4!fvjwvK(w<9XQ0sI&i3s zb>J`?>%if5m9`GoxhM~hq_OTE<#w#QN84C;TiRH6TiIB5TieQ$Tn_d1ula84_FORE z?cA>UZqI(KyB*w)`R-_AzB}2P@6LQY=DUmAG2dNn%y&1txxUKM-Ntf&t-GUX z#rFbQ>+XfL;(ZaVb$3YVF=eeOVX3|kth~6my1!C%DlabK&rx3Fa;e*u7niYLc`=s8 zbve%MSO>@3xGpEyxGpE!xGpEz&Gl85$u_RbDK@UlsWz_5X*RCQ>2{U27T38bFLIgb zcICw^_G4YV-0jMXT&}ROPR_QK7r9(%W8J*UR$kjwPU_2pV={AmCx;%ugVv8 z%va@0JLaqMl^yd{`Pz>8s(fQ>zRFVh*4BJ|=Q_W>#(aJ6cForh)&0dup06Ki%-2tD z*L>yjv#t61h39L&el7ixYwR<>+1O`(x3SOsVK>)TS^l(}W$E`GZ0s|C+t_FRv9Ztm zYgg&3w!U)FIjZd^LQp286PtSZIlUb^Hbm@1q2 z{LbM`?M~qacE|8$c8737yM4Hk-7ehNZX4d*ZWG?Z&WpRA-$I|s z9qU?UTU*y@St{Gvah+DSx0QE0xX#zT@@mIw7mdujo!pMR+Sx{4?P9m6ewe@SY9p_9 zvyoT3+X%`YHu7pu8+o;tU8PISbuP-QeQ4!X`T3``{O9MI_&mkC{Crc|&yIMfrgp?T z?QciCQ0&7%r z*}pecTzZw~72SwSZ?`Kheb^t@Ve0F4#ibwn6_@@r;xfSPh|545aT#Q{&{tUo+lb2$ z8*v$GBQD3-h|94y;xf#x()|V3Dx0rZUFW6g+iF&d>N4GTD9sLi8{$1N~ZqMaXy%eRD4-@E%^-`8reoUe-(o11l z`7(vRKrf|f<l}|J1^Yqq-R({Q*N9v<@wDRo=x{}gtTKRV+t^B)+R{mX0 zEB~&cm4DaL%D*|Z^6xrY`FB07{F_TF|K`!kzZ+=f-;K2L?r z0qXvP+iwX!W8bX%4m|&+=wIme8^h1pH-s12^TLbmx#1=D_2K93>%uSCbHXp$*M?uR zuL-|wUmbqMzAF5xeP#GHd$vBm;5rxO*Cus*2Q;htc&m3 z%dd-y|NCywF@3*YpU}#ePw9L0IR#qz^ErLD{{2N-`Sc}y zr#`1ZE5E*`Z`Z%4Kr7$ArEk^!1zP#{J+1uvfmZ(gNGtz-qLqI?)5^bJXyxCpwDRva zTKV@oJxBkZ0vH(8|Am>DdZ&og8e{%D)w8t&25itZch z`MDykb+IcGE7gwW|@2k^V1m7#?V~w&s zuZ!iZRR8_PyZ_BmWlf*=jy}IoeSW3rdHcWq^gp-16<*tZGrW%dMtEI&X?Q*R_3--k zYvB#-SHm0HuY@#N;UM}sy_KVtAI<4!^;VEpz8p+Ht+$f&Q@RB} zKdD={wDRe2`f=S~pp{=o(vRvE1+9ELntnvLIB4ZxD_ZMfYg+l(hSs{+mR5eYqqQ!! zrT<3DOKEFVpl{@q~2DhIX9&Dcx9%7%a z&oA)tr|I(x^r>w;; z6!&ZVxtwZa{HNI%|LHcye}=8`pXoXmjsGkf<3HQ&82>po#$T~9{t-6DKhnnd&$TiB z^K6WNl&$ff?^@+#*Tag#;@fEZnD7Pmknn}}pzuZZfbbZ*U-)9XPxunM*K+R@_2*ya z_FRtD`zTt^XB<6L_Yr74zX|kU-AADHd?(QZbsvEqp!*1Pf89r*HJ)j7U)@KbHNF{i zZ{0_rHQrftPyIU+r7th*<*)nV{on(6LUfF}YWueEHTErf ztK~Wuy$+v4-;_J_md)*WA9%feL-fzJ^*VeWA3s;8(0CtsquZ~G$KPbn3EymA8@|QH z`@mc6tE2xm8}9>ex37%;JM7uz``_HG@#k`v`!)Vt?zS=hdu)vVUK``T&(`?wcb$vI z{{W5g&v*NE`9$4kurdCJY>fY58{>b(#`qW582_U-#{ZZ-TlW!MtK8`t*ZmVVuKOo# zT=!4exbB~}2k5IT&)EI4bU(qyb-&Qob&y==qCbBTt?Pa?3Z)^-0pt>^awt?T|p zTF>_-TG#!{^Z>o2r*+-GN^3l?(Yo$mr!~H%w66O%XpQ$xx~F~*r1V>5eQ-G!YgW5b z@E!erMA=_huX}TO$L)IEo6EcQn^`HnXX|zE`+WRT-Cv;fx;K{(-TqpZ?lahW-J8qD z_A6Ps&tU6yZ!VwOFJ#Z|rBn z-`Y=wzq6kRe{Vk){=r_b+`reU_Z_+X(Ht;)}hsGtV65Y1N2pvHEgUyYuZ?c z*0Ob7w)CHyi~jtz-JXlqp>=3IpLJ=iL+jCce(Td(hc=+~d^e;A>hlY>(Iuu z#!s4j<$zKe@k2MdzSf}z2?8Q z+jF^8_ZMizqb+^0?k~`aPkZ_z-Cv*;ua5Kuy1zgxex2#_b$@|YJiF58>HY$(_;#m9 z>iz<)c=x0$TKZ_kzc;P;_n{U4zO>@sk5>Hq(~AE9TJax9EB=FM#eXoZ_z$5K|Dm+v ze+;eoA4@C#!)V3-I9l;>ayik)x^$AQJjmr_ z8|&05_650DmxPUV>oi+=kjv>d*0D2el?S<;>vrYAdF)pnjH0m)obPt!K`x_htOFO=SO+e&u?}2hV;vY{V;#8I#yW6` zjdkEs8|%Pjw(?-CYnA->)!K54vdrhIwLXk1&nr5S2jkt2xKFSV=ZQArI>|;HC)N*$2C6{S#S6rsEUvZg1V|||K_E?`Qdw5@;rPm3r752;BKeIS4e1$zj`%l?F z+nyf%SK8CUSJ_j;SKG1wSFW)qNB^~UydJ2`u_s3Vb#}Zys9bN4kN&x~UN3A{KEHYP zSiJ>zoikeTy0O|tBmbYro7|3g-E1RXx7di+tv2Fyn~iwgZsWST!$!RBw6UMwWh-8H zyUs=N+N~T<(WLm?%l?Q@y3g&3PcHY{5ufyc9q~!??TAl$(2n?|hwO+?df1Nmq(|(C zPg-C{eA1(K#3wywD?YhAZYw@dxXwlKd6L%k^c1c5JWVS;&(Mm`v$W!~kXC%2qZOY; zG~%<^?TXJ5_A5Tm(~8dvrSEaA>*>YPvs`1Hd&$;1SLB%vi=y$oQl4K}<9U_VcwVD5 zp4VxOXDN+*d&BJ*&zm;#?JZm5d7F>dcyf8i?Xft|8vD*S zHujxwZR|VW+1Pi!x3Ta1U}N9;(Z;^>lZ}1nXIuNuFRoRFmvz%>{`b9j{h{mP*RsFp zKpcK^JL2%WjX3;aBMyJsh{IpD;_$cYTo8wU+>Si@*H#?k7F1(}NHoTMFCcLxVI=qYB zD!i-RQv1AXmHfHDszS@%htzerdwE{bjl9~!?aHe?*^j*1%k9dmz1gq4+J{D7?dx{r zRTCR|wV#c=YHA~|_P3E&RW|bK02_IAppCpb$VOf@vxn)cEX{4*hs&jfeN2`<$6)I| zTrP*$L$Y+A#nyeeTn@7bW$AMa_Q3EF_JHt_c79W!!=vne(SNkvH{8=D5qWHI`75@&j;@^>0{5#Q#e`i|p??Nm7U1`O?8?E?vrxpJm zwBp~BR{VR>ihplf@$W+`{(Wi1zaOpm_oo&A0kq;jkXHN$(Te|IdXQ3tR{V$3ivKaR z;(siy_z$BM|Kn)Ie>ko9A5Sa(C(u2$w3I%vtefQ6mv!;?u{Y_`JrozA`+*o~`=}u5-CUFHz`O zxkL9E+&&{b%AOWJ-<}d4ZBGhcU{BEdh}zM{zvsQk{hHq~u5;1+=5n#yF~67CnBPn7 zxw@6>Iv33EShr(-$Jv z8MNkiCaw9MMQeU9r!~J`B(D%%QXaA&*y4-rlx?_^ShRwt|_GT ze6ORYD#d9%|GD&Jg^<>GZlEXXJ_4=r-9(SqM+In&_ZE7rLQwkFvOW~k;ltwZci+}W zGWfW+^!pL?oAIDK-2O)RPJ3ziF8lTH-S%tYd+b-k_u8+7@3UVH-*3MZe!zY)Jl}pH z{Gk23em}x>E=%5Qr)=ff z)2?&Dy7G+Mv93I8V_jKjV_kX9R$k;%#0~LU?0&>+iLH1&?>ZO6>jk$XUN72+*Go3y z^|Fn4y<#I?uiA*$Yc}Hbx~+IEb)Adi^#-kYy-6!xZ_&>vk7>p09a`~vmsY&qqZP0B zX~pXUTJidjR=hr<6|awJ#p@GV@%ofjygn-`kb@6*j59;#^eB1+Z(w}a>FZ`E%Pxx>9uJAwh9pQiN+w@g8+W7lPE4cmU=&NCC zzs{wmt^K+dAFus-MS5O7Tc2Za``mCH8~gQ2Hume4ZS2=|ZS2>p*x0Y@+1RgFwXt8X zW@Eoz-JY$lvaDhE(0vBixyWmkUek5=+@Zh6x4VU}^}dVwTF3pEuXSy_zFE)Ke68;~ zmwEd90*(3F(CwJ7jcm+UeH-(&v5ooK#KwGWYGb|{*qE=)Y|K|fd$#T`xXwlM)3|h= zpUv5?`PriM+OE6i2kQ4B>@MN__}sF!`!T=Ul#k!mbuN1RcBQv+-8pyY-*dD(g?F$! zhIh0(gm<#rhj+Hyg?F*rhIh5wgm<%Bhj+JIh4-*q>OO*Nl{K6%Dw|eU|9vm6`-k-V z5#@PBht`2y_ICUHEZt|Yu`cXu-yi)=Y^)Re+4n|&Qyc5X{`TF`Uu9z*Il#U%`VX|R zt{i0F9{tU1uQO$7Zr_@v-;=O!(R~Hix#&7NgvNDrsM~cN9malLM~Bn%@(1meqImPDxFtkcy`r4Scem3T8GSpJVuV&Cjv4=4Ti^FaI>%e{j3z=NkSz<#8^@yI*;H0w1qG??f8$ILYmZ$I13Q z-5TWi`twdLJ=`_oahi>IoNgl?XV{3xnKt5amW_CvZ6hA%*oa5Po~^&qaGi_he`M)A z|L3w_^M79Hp|15hE|*dEj4a(xvJt=0wjQ6$1?A%}be)SHe^Kf4UF&sVE@SLzS^6l6 zt=ECMTw?2WU@n*1dL5X{Wwu@i<}%jS>%d&b*?JwA%XnL_19O>R>vdqhesg?t|0MV0 zdYfz`zEf<(cdCu}PO}l;={Dj!!$y2(+KBHg8}Yr|o~=vFbuNnUY+CWTl2&}Kq7~n( zX~pLnTJgD-R($5riqCbl;&VN%_{^mhpLw+6a|5mT+(;`vH_?jE&7~*0*6Z|JO6U6t zRpnfKTU}R*O4t3Z{5g0Xc$?dG-QUiBT=#dlUDy4c?4PGCmsVcna<|)&7x&o6i+gS4 z#eFvN;(i->@qmrIm~SI59<-4c581QzRhEbCVOhGrU>_S^U>_5H)E*js%pMYc+#Vc$ z!X6ZU(jFLo${rAY+U_5I#_kt>*6tf#X!i*}XZH>-vU`OW+dcntr8rXjpLcsMivJ6= z;{PJ8_`gIe{x8$>bjyKO{9mON|JP{6|8-jNUrHF-ivMS{;{Q3V_3XKh;ZvktBKS+{g8 z*9-C=tlyKcwOi076z;<=TLcy4VYp4-@n=eD-u zxt;4=6wmExW+?>pPb_g!qovzR9x7De;5o7;0i{_pN~tPgwG zn6Eu;%-3Ev=4)>o^RDuIv3^l5v4bAJw12m-&?S! zg^#kQhL5(Vgj?E^!>#N|;nw!Va2tC@);f6kNoIpBR@LX$dAr8 z@}rB5{OD>UKf2k-kM1_|qlbSyD6>TlzE z8en669B3;)2D#2f`7xMQehi_NA46&7$1$|><5*hxF^pDz97iiZhSSQA<7wr`3AFO# zL|XZA60Q6=nO1(BLMuN`E#2PrSbZe0bpCsXs&X!h->1y;qIKtV{v513XSf}Cb*7EH zI?G00ooyqp&ashK6&raq!bV<=w2@cm+Q_T(Y~|G`*SRRKayj4a%B#`rUzmIKD{T>bb;ai-!orP|dGMEs_@9r2rHBYx9u#BYX;_|3Etzgafo zce#!DU11}Bvu(xiO4qq4epk_07p`{u!hE9M%G!wEwKn26$430Fvk|}RZNzV`t$5`! z&qlm%uoa(NZnPDjn_TCj_}pB2qU+ANL-!NxPT^baj^W$v4&mGF_TfA1cHukiw&A<% zHsQPN*5P~XR^fZ?mij9K*D5{hRM+*Zt5p1+dwJ}&k^uJ&uZ(p<@jsBNxhs@LsLIC`r^Nb_UU$3lU@7~R2XD~GgE!rdJb24S9=vTM58knn z2k+X*gZFIY!TUDy-~$_Z@S&|d$mJt@SeEWH*tlQyiG57;e`@1?)o1pQ=>Oav9R9){ z6#ml2{i?6*0nz`pt@{7M%hM)~tg<*Zv!cQt;{D z=9GN;r#Tg$UNxuY)8EW#`1BWZT0XsEPRFM|CVn0~i%56bxeW0W|Hnn=GOEuzmr4Dd zbD7oWoXes<>s(g#Y3FWGpG?H!zo8S(-S{8y!ty4{?YPq&)$@#&Z5{CxU_`4&F?++2W9H<}Cb>3Zi1u@7ynbGL@) z>_2htHuXo&6;`iyu84Z2bGNIPJ9meAsdGiu9~$oD)5Yey_;jJU7@xjxF3zX(%q95r zJ@egs`i}V?K7Gr4FQ3jam*mqo%%%8rrnxkqzGl9UPp6y9@aa_Z{d_vvT$WEKn#=L& zcyoC^ebxK`pN=s<$fu*s75H?7xgwtqGgso%A?C__I>=mwPb24&*oQX2xrf4Y_Whiz zs_x_5!|GnnRa5tHuDZILb2ZdmoO?vw$xxF|UozL?(-+LO`Sf{n9X{<~ew0t2H9y9u zPn+xV>67Ni`LvyL^=!zwHqO;ow{ot5x`lHM)yKo*WDRfD)RgQm$6GlU z|B#6dnzM^{Dq;N9&c#7UxFx$dXbIy-O&09dv?K?+mMocV>7+OWe3ob^k|)A*_9!X| znR6(@r*s}gD3cjVJM&2g#R zVSbTMiO;P`w$LFbPoTe6)yrlbvFnTkhPuI=?j8DcQLXotvxki<6zgsDL>wLfzH2|>=GgV zVHWB9Tgfgd;+_3K=ii8bBpaFO+!9k9eC&d?>CP=P#mU7kTbu0M3R7G>2$8k%&aE=V z#fgww8{^y>Q(XK=fwd9NeQb&ku20lMocmNg$T_6gTIAe1lXC;&lCTjhA{$K3^+|Tg z=v*)7KGXRg$u2SD#c-3(cS&|h8ZWk+b^fJfm$32qev8gOpX`!1KHqQC`Dc?|BF87A z9XkJHvPYjkI!ZkIA_f&fyTY*37wmrugKJL*7~==k}Z8iw7(b*6KNT z&=jBV52+t@?y$PHb4S!QojajbJx^(9(m(OiQB)vFOL3r3Xen= z#ogmoZ}6j$<9uCn3i>f~O8QZAD!Pt2HC@}BhOT8!OV>1~qaQJ+r)!uq(ACWu>1yUo z^uy-NbX9W}`XO^xI>~$kje^O>K7>vR>PC*E;NtDVycF0?oX7U@c6ge@Z=W;qel9!v zi+K2!i$;OR+l4_X=seeLA6}o=zu(RN_4&MWvzG%XKP?B?E%bNcFT&+7EeB{pS`Oes zv>f2K(r5AT?KWB#h{CiiAVug?xQD_%G{ol)HTJit8u7VPjriQ9#{LykWB-b)v417h z*uT5g*uQ(!*uQ(#*uRo$>|ZH0_OG=1lymoa-_HJI*iq-o&{%-)_w(_LEYM{+js-m4 zF1$<@?DCw)_VIRj`heg5LH~Z`*dGfobglx86GpsU7?u-6rR%m2udm|Yud;uAl6OV+ zazc8DmJ?Q0T25#W({jSAM#~AYIxQ#68nm2HAED)hTa%U(axEGs>^LrX8aKqJ4#z{o z{ywTkd>&IHK6TaDzsJ?szj|uyUwt+9uYnr-*HDf9Yox~hHCAK)ny9gVP1V@HX5O{g z?+!cWTyuIIE>m>-wKm_9w(;Ay_3zh;{U>2-Tt3ks`|ZLo z-aoV_uiHMn{%QYyPx;qBZ7v44Hl*uQ>i>|cNHm)U<9b_^c}=(f(iqT9Ez`H17K{dQp(+lMxY^VmM#4o?UB z?T7gH8_2#@*xI?FbW6Wo7{>dDHvGEn!|O--_Z#6~KgxR;`xfEVxK}_oH;1{hOi2{=KHg z{=Kfo{>@Zl|7NMNe{ZOQAkevAC;Kk#0_ z{=x8S94hn!<|TA_^HRE;c^O^Syqvz@yn-%cUP<3)UPYHSuck}k9t!)=5TB3K*x!%U zh|ec##OG5r_HV5k`?pSw{adfb{%ufW|2C?zf1jzbf1j(df1A|Uzc19-zs=rj*q01D zihBigLFcyU_62NyE5~o~+l66lAKG@#%l0{FTyO65@0XpuT#xUf*+(ZTu&dSPucb65&9$^z8$5%#w8*9&`#Lr zyazab9G8@Sz8w2wVMDx>K8pYO`FIBJ7uqR~prr-e>-S-fRAm-ecEeKhbhM zc7MP9LM|pq4RjZ(EjAO zykGqH%KYDc{vZGPzu4pNyXO5n`;WrA;X4BK8gu;ngVp8~xWAy+V=3vCHlK=KVNOji zH>aVOnbXos&FSbRcD<6Gmg|)abX)vdgnejj@Erl~aQ%_lJ6u16O|x*kCBAc@ugCj^ zb_2)pe%W-sb(qEHdXBd;$A7QP-{j|W_}6D=kH0UccUJZ-?De_m=H}dVGjkpq=kw5R zrke!eb`{;&oR4l~&QCWq-$FMq7oh9g`Me-4=kr2z6?{j4eQ1?&o6S3%p9_13^K00& z2*)cp7r##!#QTMI2j}sAMg8mJ@k(J9_W?Lw(H#H1GGENk7x%Bfi#`6n65hA7$N4U_ zyJ?*7Lc52?`7X43X*r*lq;b9rtrU&(U1+6gobN)rkH+~fv@$f#ccI-+%lWP>E$6#( zbV2+`>_aPnAK7~ckI!4MOWxb){9!}vI>+;27xeXbztAdi9Pd|I=kb1_RpGe2U;OvV z{6l`es(*bFd;EP5dsk$iH@q8e&CxfTtJ8VRHR#;tN9bJUnsiQcEjov}Hhq)14xQcn zD19S-XTUgxhW)S0@zAh;kE^l&_0-tE`fBW712y)qp&I+wNR9n#tj7K|QDgs_sdI9a=j&QxLu@K;s^D zXiw4^g794d8uzS2dzwxcgzpN_xQ89uvvisud{=-@ZSFv)GCxQ2;oI|kF1hU}bC`+Z z&2WGAMIFce$CuQ&9(-Ai>%mTHTn~0u<9e`*8rOqe)wmw)rpEPPcQvjDd#G_e*i((` z!Cq=y5B64{#^-O@36MRQN{jqZs)F0v3yy`Xhj(~bKz9XPsh0p)$mH7OxUV+d5>gD+SuU>}F|LUdq z{I6bu&;RNV@%dlf7N7ssZSeVD-5Q_&)vfUPU)>U)|J5yUy`XN6&-3bL`24SKiqG@v zCisqkx-mZAsvF_+yt<)t^VJRTxnEu1xdrNa_>O?OigSz9mGK<`btUIMP*=nyr@8_z zN!1VHl2!cxE@{=}amlMLhf890SzI!!@5d#zx(sfCs_(-kxw1bK>*AItM<_ zt8c>Re|2_zM?ig}bKBI}aK2aL{I^4m^WRQ2&VReqIREWd@30!rkEn6}JF3P>?*;E;zJDn`5nlIu%UAp$y5rog zzxf2+*Zeiz+kBGlX+A}FH=m}vn$OUk&1dO*aVvy_HnlI8t&6nsq%$Mog%|FmZ%si>F zG#rPa{l;+|ccJ}G<2VcLDvjeRv_EJZN1^>mM8)6tUO^fdCDL0>QV&B$@dZzfvu zo0*pUW}zj&S?R)f_;v$*TM#a>Y02-6bRm57U>_Rtdy^XZ&7nqqbE=WwTx#Suw;K7) zqegyj_Wp}`!aC?%6!$p3^Xc{j@X=0l`KG;Py>g85-FN8>q`YEjL zfe@=vPq(YPAw_D`S5b8r9KveU+g<8TC=oU4ueciZS3-^YyIYO=yGM=syH}0+E2&2P zl~SYrN~=+S_o-2TWz?v@`_-twvg*S49IrtZun&U?*iSr=Q-vM#oyWnFAV%evT_mUXcWT^JAF z+S0Nvwxdy(p*=wt!oMkC9~$cNDK+ZwX*KHZ88zzcSvBgay&84YL5;e3PJR8li2Qfd z`B!nhq((iws2+p+1!~mC%j!|MU!X?4bXJeR{Q@=Wr>lAx?iZ+0PudV=>W3@%lLXzrpBr zZa2$3lYZSii=JVAgPvxdO;0h;p(mN&q$il)qEVM`vk&*}P?zr{91rq(F|>C%F6-iZ zw5*GBY1HLBeLd=OzFOAB_c<=>;sRRM#f7x2i;HMk7Z=lo@$l^fTGqu6X;~MS(1q~u zZ7GeqT*f{$)M04LIgYvuZ3T@w3vDHhx(aO-jXDZ#HI2GSb{@2RyN@_;`F0=EmT&h7 zZTWVe(w1+xmbQGmb+qN%t*4Ri4eUchzC+u{apXI+&uGc_=d|Q|6D|4vf=0eKvkwjV z{!%UZ-okOo_f}f+y^WT9Z>J^SJLtlA__mXleD9(q-@EBTxR1a-G~|1)8u{L*M!vsN zBj5Yg$oBy?@_kT^d>`^&!~Xj7Ao71iw|@io5!9%MW9pgsJgG)~oKU}p?+>U^FDKR0 z@zn)2>gTk2D!%%lMm?QXPsaTMHR|h}dLr%@s8Mgt+uOr=IWr&i-Um`08I zPOHXwFr6Cpo?eaf;9>vsW$@!k^5fxY;yj4D$e5Uq;%U@HCVd}Smojr4b&*BqQ5RX& zsEg2U;5h0cv~0AjOE=Q8E@h`>UAl>ubtwm31P|YG(uIR?KY^BYDK{95n5py zbrD(-8g&uc?KJ8lv^!|jMQBB7TL;~pH0r{)DDIK}VjK?*`7f?U{!6Hl|GU-5|2=Bt z|6VonUs8?ymr^7DrPav)eQM;tjJgQ!BdC%8vTEeNoErHruP%i92x{d2K{fJUL5=)Z zR3rbD)X0BjHS%9Yjr=F6k^hI($bVHe^8c_J`LCu%{;R8z{~BuK{}DCvUsH|z-{oD) z_jP^l>*#v+YA6W&lO}FA)?9}kV}6t#ZGMa%Wv)w)G(S#{FxR7po9olV%nj(F=7#hT zb0d1NxiLM++=L!zZc0byX0%+NHK%bLgw}$Vb+9Fk;~=zFG>(JT>_fwG&_*rmU|WvM zI@peub?^yV*1;!fSqGn@i{Rng)3mIE&(N|CK1<6w*q$zghi@Hd90$*_56#xS#Cc*| zm~c@?ZfEroy+ETreEs}4|5D=pqWG1Vf7$y*_EtYpC)(;KieG2-7IpF4clEE2-w)e& z^X|;v>MiO{TfId+XsfrVCvEi>^`fobqTaOCThxahf=8l7eQB$=s2^?h7WJpC-l73? zg!>5WL*+@mMI4uU8%RsN4WdzRgA=a@&`@tf)Tp=6hH_l$Z5S=}Hk_7v8$nCGjijaC zM$tv^@NG0L^)`lVk(Tr5BwEg!lW93m zPNC(zIF-gaIE_78xG&H#!Q{U;8i4x?e!FL?Pyc(m|K>kPynhsLhxre^7qdsaeT$aRh<9-O zI&2@j%x}Nkzdjzv_A9)XvPXV=i&oOePw*-l`3YW4BR|1wXyhmOBO3V${+LF7fKR5 zTRh!98u1M6D;n|K&ptGZr#rxL#PgueBc6xU63@dNw|aL+XvFiVZjX2#^WMx}*4^W@ zth*;@S$DstW!*hV7s11~Q?#tRr)gPt&(N~&o}~-n;oCPf*4=aLL%StB!Tkh|=QDpt z%ewnLE$i-iTGrhQG}hfXPIwwO#Oo5rLzDWu%yFr|xK3sMM?e3QfBg^aC7xGkiRaI> z#Pb(g;`u9G1P|YSqa~id(-P0Cw8Zldx)AOoun!IK{7a2^{;ftl|4}2J*VKsTziP%a zO?Y$kh-V7#i`)+9v6SAmQzf>OQz-`$_p{$f%+aa1{Va28dZsxI{kl0V{hB!)J;R)y zo^H-SPcvtvruEdBy(1JqWK1Tf;k&K9-&|#);%28*%OWjWu0_4aU92W z4xN|dIw!|br@3?pDq%FOCMV5umCM}Sdf-F zEJPQ=5PP(6pQ}lNE+OVKsVrRnPC`{-)sGW5gd z`{}CYvh+jda&(fpJY5C%7ubhZDLlb_298%SSD+s-SES3CE7A9xE7SLxQ}XynypuQ{ zn#B7djw9Yxbsq5!?O~2fysOa?@9H$-T|-|l@qUEk67QO{#Jd(<9S`4X(-QAGw8Z;S zTH^f}E%B~PCt;V_hlY69QzPE>)rfZkHR9b+jd(XwBi@bGh<6k3D(p+4jv4pR5dUVn zJ>uV7jrg}vBmOPbh<__J;@?`0__t9b{%zHWe>*ke|Ae|azK@_r{GU=I{!gnB|7X;Q z|Fh~Od>=uL_;*lO#(e}e^6|X7BEFBHMt)vUKZx%msFAOi)a7v>L5=)%QkTV&q((lw zsLNmpQzO6K)TOcHsgds<>XP{PE8abQj}XMIW9qnz;y=g6f5w0G;&!9Wz3GwWKJ;*N zUwWvyA3fOIpB`u)K%)*rdxb_FM(jgF9fmfL%eGQ)`2m!tOH}|B;0~x9~zP{PK~-7uST6sP@}FUs!>Oi z)To=uYShUTHR__zb?d-1ok#qqs}cVhYQ+CFHRAue8u6d0M*L@~5&t*Ti2rOg;y*`S z9rqE`i2qw^#Q$wI;{T2s@qbsHg!>3;#DA_D>%crU@-bhHb>Mw9^0PpVbzq?y`C6pL zI3cZ@S(VXStPc#XO3X(b zcb~>FRj+wt<%8aw9Fr ze> zTH<|#mUthfCEmyAB-~G99~$C)LXCKTtwy|0suAx~YQ+1r8u31(M!e5@?_iJPBH8y0 zQ19n-d&K`+HRAuB8u9;LjrgBeBmNiEi2p@3;(tkv_+M5d{y(Uz<9iBf#Q!HX;(tYr z`2Va%{C`m=;d=^d#Q!%n*1_M^$j4PR*124k*~kiSO@=6BY)S_SO@=A z^Y}>{iUmE^!4zubH>DcuU@A58om!1`@EiaBY5X`sqAhV>7LRqvv>caNXgMyk(lx@?_?{SD-JFe<J|M#eC;MTet`7fzP{!6Km|I%vY|2{SHUq+pTkgAdYvTEeN zoErHruSWhKP$T~js*(Q+YUID78u_oJM*b_Sk^d@cFAo zf4g^e-`DWDe<5}JB#QqWi$hUgH^y&-?h%fUHrJ#_nQPG_&9&(f<~sCn^P}`I^JDZ- zb6t9f`Eh!%xgI^pT%R6jZa_!ohBWFvv_`aCk2I!b-D^Ul?n7%zqwbrr4-IwSTrKNf z3y#aW*OHcXuN7S*yb7dzD!&GqfWHtKk7_d{-Z8*g!>EZLnTQ5yK!9d-<_8H_n?vgo{853Xvlvr zHS!-?Z;ng;`_PjAzO>}OA6)|v-}=*%{{eKhFpKXi(31a%mi!N-CI5ryB!rB8XmZ{g z!g1t(sGom^y`0yE(Q+OePRn^~1TE*Ok+htbM$uS@Mzco?pUWC0nEdzNl9w^u9(ft- zUl(tVyu7NG<8mCwk(U|1F2nDg{a+JJ;Cx^Ez4Ju6kDoYrNlKFYCZZw5$Uk(>3t$?Gswoflp~!2iDTE4y>bP9avAxIBZQzM?o)rjW_?=9@Hu72&^Gfm=$$mV~R8xxLVqRvn1``EhSPH`M{ zep=_H&d+chc{{7~$jdis$=f-OOJ2UEB`@F6HSqB5ds^~xo|e2^pd~LCY01kaTJmz4 zPQv{K_GsZgK$8T+pNa1|VSV#0`jOk)dKUde+xit

    4g2ex_}GihiMOJ&Jy%ZT*RU zqhH2-2liooeJOq(|8bS$FXFyKVm>kP0`5DgJKFYtar}An-}H0lf9MY8Yjk__zx1=_ z_`i|$j5$R*Jb&7pl77mZihk0ZntsBZhHht0OSi@S1@@t}4o~p!BskvEoPlm`&PX>k zXQCUMGt&*tDO2G&;+>V_p-H@N;5g!)P3IBs&~D_o#5+4J@xF;hymRR5CEhtX-X0I% za?uj++_c0y4=wS&nU;9xr6u0^Xo+`zx*fi^z&H6+avx()rkL{YQ+C8HR4}PjrbQ=BmO1Si2vQ{_PD>GM*Qzp zBmO1Th<_g_}`~S{L84@;S{Jw{L8A_;FPIGKFX_G;S{SzejZe}z$sacd{tC8 z!==0$`KzqPB3MO@d?u-JQT~t``K_vMfTdWCd{b}P*QCs4B=wtsIifVAX z(dI|!k>;B8aC0qssJS*h*j$GmXnvGN9r_kMMxzevvJdMp>M*p&IWFr!JzCa*`n0SA z4QN?+8`824G@{$%;ag)`)`2FptOHGHSqGZYvJNz-WgTci%R11KZinwLun!G&*jkOc zYokV;wN<08+Nn`TPpDBhPpVNTPpMHC$$k%u_&=lbi2t){#J{~7@$aBU{GU@J{?Drs z|BhEcI%{|0Olz|FRnK@1#cjJF5}@E^5TTtGXRNs;CkF?rN+9J=DlYPc_zo zUTWm0w;Jm}A2ssTSB-U`pBnk=uf{qsK#hF9qQ*K9sgd7-YODi;)X4W>HP(Try@&Yz z@lcD2KJj~4)Wy)md=w9(E{5s*$ZFZG!qt$X;j^TKFJbW8V z%W?TCEyv|JT8_){v>cZcXgMw?(sEo*qTAu&+hiJbF@=3-sDsd^avXUNZ5oX{hc=x? zUPGHfBaflIMk8;@eh+Kwpqt5g#5=TEw8Z-jTH-yMmUz#h5$`wIhlY5+rIvWV&GGhO zLwt{cmUzEQOT6EsCEjysiT6BO;ys_1c)w4#!}k~1hlY4BR3qMt)QI8BUuTcwBH8a@5&z}7J>tJYjrgxrBmS$@i2rIe;=e|X_?~_6;Ci=whVNn-f zCFY}e7{9Y z9G73ya$KII<+waW%W-*{Zik0&XK2(#XlH5E#W(CjlXdYN$59udeM`$a`5leA2<>}X z*3I)Y>LRoYw5+2SX}MW&iI#QsGL5LRqCXw*e$S7_9QZ&BPM|G#iN zH01wRHS+(P8u|ZSjr?C#BmaM>k^eu{$p2sJ_W1sS8u|Z6jr?CzBme)Zng8_Rhewb6 zr%)sRDb?-p{RK7hpIVLlr%@ySY1PPoIyLg2UXA=`P$T~t)yRJ)HS(WXjr?a(BmY^| z$o~y$rsN%-(IdNDQr9P{5`l!M!iHs_>AnRC%2&AI6j z<~;Op^Ud@yb6$F=IUha5oSz)2MUb zq9Qcv{C4)?ol)m^BpeUwI#-nA9TLO*^G>?G`7T=4xni`ebH(YWZTk|mtaEqMvd-N@ z%Q|;2-3|}mO46wFQtU%RUBApcqt#(=`F8hlJLKE{p4FA1E#K~b+Vbtn(j)NjtsHIn zcI9cyw|juLe7gtfA$a&!fwp|RinQh1RiZ85t}-3r{sQ~ZEZ;7P!i<+vYB%W*%3Zik0& zV`&`sud)wKu3yJ-T#ozkv>f*nXgTgD(sJBSqH$2ial+HMAzo8B9-7qKRE}G{CF)Y< zr~COC{`J$?cL;BUe-A-RJYT0Jo-=8Q=PX*{`35cVoJ~tS=g<<*H|ch`kH9`O#Pe-6 z;`xpm@qAZ}c)q7bJm;zr&v|OZbH4Xv_E<;X_pa{O(QN)_N%nhH)cFE^A6X|BavXKO zNav-_7jqnW`#|TBmk-sFw|GsoXIe@V|bZ=vUzx6*UX+vxYq+v#`B zJLq@JJL$L0yXd#fyXiO0d+0g1@4!B^SMcv6)cwN*?lW-Q&M$61ZRZnrfVT68J4oC4 z!X2VmT=IR6mVAFpBj4ZY>*s}6 z`$u{X{(S`d(2(ydYUKN8HS+z78u|WJjeP&6M!tVnBi~oO53`r^fBYjL|9|TCYjJ-; zje7W7{R!?bs8Ju+)F0vgf|~0?hOo2fSL6PI8ugP>y%P5q)TpP_>gBkpv7 zIn=1XoNClxE;Z^ew;J`AM?DAs2@*BxFRyw4{*xtY)MI{iKYUL?jruI0#`SJNHR`pH z8rQqGs!_kUsd2qqSdDrvqQ>>^?P}Eb9coG=6mS<=6mUV=92Utb18ZkzQ4dew4LDzzQ@4v9r*r& z&Tq%}7u4Hq`?4J0YA#1_F_)*mG(SLZHa|#zVXi=LGFPNOH&>!RGgqcJnyb(o%t`cm z^FuW1GPJ5R>hNLqp`q?VtHyEES!mU1)KzFTXw*??kI<-_WamNTyB5boL%wUPk?%Tc zqrMcN<#r-IiXDOH%fsA>U7^k?$wf$oErfW7e7ELd8*tVX@OtUiQ~ z-fGlOXY~PmbXTLEx~jjzM}IZytGjwHZXKvmZ#~t!aqB^i`s=Mm{q<3!{`#s>fBn>` zzy4~}-vIR%9MWpkU!+F;4OFB42B}ehgVm_NA!^j$P&Mjrm>TsrT)iH*T-B(*k!qX= zN2yVdqt!SMj!~mN$EtB2d{vEl9jC^5aJ(AzJ3)=};6yd*d6F9E!O3dW_Y^hGgHzS0 z_i1XJ2RnFA_kBlyK=>~4dGKHSyNtwq6pvrCjb7vUKltw~_}9lXf8)Qip#ICYpT+S% z&2P|um}k>h&2#AA&2Q4bnct#+HNQ>&Vt$AI+59ej#rz)qlX))vqj?_v13p5q5A9NT zf{z*;zhGWKe{Wt$e`{Vue`8)upD}+xpTd0v_MxFJLtDae)ZtQ{N8N?CjN_=Y(3aDv ztI$@^sH4zU(x{uz;=D<|R&!qRwT4E%K4Kr5`P!;RzP726ukC8&Ylj;7 z+NnmqcBzrC-D>1(j~e;f>%EFSj%(kdxX1DOm2QvYb-xj5>6*Mn;0|BxE_KdeUn zkEoIVqiW>;m>T&%u15Y(sFDA#)yV%zHS&K-jr^ZhBmZaA$p2aOWgK#9)WbRTMI54P z)W>(~^Ejl{-{Zfhp#Ba^fEx94QGE{oJq0!D>9YDPmKZha>qqrzl)M`Cc13*>hm0Ea z_lp|mv0v4w$KTXAkNvJjeO^`LJobkg_4=n8=dr)ksNcWUIFJ3KMm=9s<2?4Sn(I-< zxRMip;5?Q>je1Y1#(8X?e|;)HUQ}Mrr}6!z#kh~4uUljrrQ!HOb6R?VIUW7JIXyk! zoPnNa&PdNSXQJOTXQtmZXQAIQXQkgZ-$1`*&PKm!zLB1T`v~kq8;AP{>ak%0_Y*ii z+MJUfY0gCtH|M5@n)A?u%{S8nue+~+zds+xLwgnX5!Cp3Zc&fHeFQarzJlsexR0R5 z&wHzS1nwiK@$(l}55s)~HTLIr^$^@gP-Fj!st4gdf*SjKmpa0I6z^ibzrGIZvrchc zU0h$cBLoQl9)jbzt`6;PdRq|wJp{egd@sGlT$09hb!esN%|W=IK;ybPwEO5yLAakl zNYoYpL<`*H)tr z>!`6mkE&6JkEyYLb=9cD$JN;1dg=)O4#c~@@9z!!p6H|adLGvW4HENs1sbjkLTkuz zTo;7ah?eWZ#6)o3=t!cR~Y(vX+ zVOtv41?|{}_56xWAzO2KN`#XK;T(eF~qi)N&j=$8pr% z^ExlbK}U|uaqt2y$H9xV90xDaavZ!&qb`zN7vS&j%<<509CT6R=jp1(anMbTpRc=m z6z(sm@$>dn<2dN0#?RkdjpLw?8vD~%jpLx78vEB@jpJZ|8vFZ-I>P-G@5uLM{l3S) zM+|XT6rLbt+-{+H5WT=Wn0_Dk9oUC9AE{8!3lq5i!11}};q-gv5%jy}k@P#}QS{s9 z(ezv9G4z|}vGg2VFQ>OL{(ddAahzWqg!>P)?EeH>_J1NR`#*_(ANL*Dhlc&1qL%%i z%5mBMX|(MBbXxX*1}*#l8ZG<(IxYJ@la~FTMbE*1$B{kStL$gTPvbvk)3eNT=-16} z(lg9&(bLRt(^Jgv(38yX(i5(`4#M9*m*b(mflEa-exCX2nFz5OKi>lNYe%P@g|C&BT}=1=J_%xmdQ=5_Sv=JoVv<_+{l^G14u z`7?Sw&cnKYyKH_F=W*Q^+84C!|7KeD|4Ul-e+#`8|6?B-_J5mN_J2FaW&d~3vj012 z+5cU%?Eh|B_J0p8`@ff#{ohBg$9)I(XrHr}<8D7K$K3&1j=O`j9CwFkIqnYAa@-xE z<+wXaqYjf@_u=n9&hgN2+?`P4=lNQVIPT7>@$-M9#&LH} zjs5vnjpOb+HTLg&HIBRUYV7X?HIBPu-WPpe!|!{1pEktdpYR0tCAghj=Ut}%viToq zT<87h-!Go|1D9HAxz4-7@!#$BKhwXNf1!Uh|4Pes-f#5JHvc;<*LhdzpKSgQ8rOND z{Yn3TU0@&DrSJs1%JB>4f9UVc*XVD}|I*)>T_!v~V@^Szy6*Rg|Jr;i&f_{ewA8f3 zKMgJMPfJVu)6s~3diJ3q{u$H~|BM`$_-CRe{+Vfse->KepOu#Q-#|k^a^u1dYQRAy#)XL1@@uIb?<{5m+RgN^rEmK{yPjbu6rx#>lfg^ zzo35Kwy(nR`Q{{Au6rM%=h}Q#`aSc*^tG53RRhRP;ug7WmoKTNmfbTD`4-N5Zpq6+wR{5(C_+>st+ zet{lnevyvw{RQ@D#dsb>E12lQpHcih;kEc)Lt?uqp1^S)S|^Tw5`_B?v>f+c=#Ol^ zD=q6lH+r?rcc*1t=s~Zv`JS|_6TRr=Hs70;b)yfx)aLurSVuzZM}HWE`wukMl>zKS z!*TM8T8@*5<2X(R>ihzPUM=g&V2;aiGK7}nWGF4i$uL@uli{=+CnIP%PDavloQ$I7 zI2ldLaWaOUgQbBz+RYgfebYqe;&s43*Y)64ZodFakbhmgiR|Zij>~?gW_=-U6Lou8 zUnX&ULHsW9ACqZ$zbQ1{Zz_AV3G5|q(`bp?bXwvzgO<3xMoZjYrzLJPX^Gn`TH^Kw zJqHipX49|W(u93z@SEOq*bfL3cx$@9zdsX(5ubPT^@z{A>IK*(wZvyG#}S|S`}N`d zP{*Oo=RE58eSJOtz6EN@$3l)T2(L!!X#9PPb$k4MA9&AWFZuY8mV7LsB_B&^$;UEU z^0Azje5{})A1i6e$0~XbmJ;@%A^vN;L;OF|dBp!?@3-0a3opcyM)x&``L+6b%&+tQ zlzkuDem&jWyn*gz-bnW}e@6E(e@=HdZ=$=Izo5ICH`86rU(%iJ_zd|ATWsaL98cS5 z$=`Nb^0$MQ{OzPAf4gYO-)>s+w}+Pe?WHAu`{+5izra2;7k*`yMNf>u>aZi{c*nPs8!hkpHx5>pG%GW=T;;CdDO`N&1&R7uNwK!r$+ws ztC9a()X0AUHS%9jjrj{g?66 z_>YqGW^*a}3v+3DlleaSb8{K`GxPoQMsrzugSi~N-dvu>`6lkeF#I6L(H>wg*KZYQ zoPXk;8?eEZ{Pq>u%k@NM8t0?<_X6CO4e?C!+sEFH-|MRp&#G#P=ffOFJWDa}q3sG! z@cjnPqaJGL>+$zJqTX)X*W|e5uNIBJueNTFzpsvWHTIIfM`_95W3=S2E-m?coR<96 zqa}a!X~|y$TJqPBUXO=wjcCbNV_NdnghqZsYf2+O&De*A{51Ej!XDQPp|zmpdaflc z*K@6Cxt?oH%k^9vTCV5X(sDi5j+X1WCuq5zdy>ZWLO5PSelQxhr#Uaj*E6)_=UH0v z)1H?6bf6_a&(V^f=V{4LM_Tgp0xkJ@kzSAc3+zKfeqL51Kb_RbPiHmq(?yN^bX6li z-PFiWcQx|ULyi3OR3ksV)W}b7HS*I(jr{agBR~Dr$WMRor`Y3qZh&|A{(XfI3*Xn` z@p~MltM7BvHi|fY*gTLvXdXoGHxH)wnTODOaO;SDXtGWW& zp<$gErIvMRG{;egp^c%X4#(0`hp*C7hvR6e!|}A#;RIUha3U>rIEj`zoJ_CB!?!8) zI9#H!4-KB?J(c~dVFH(a^jP1AVfZz_{S5YF!UQfY>Cxsp=Z9XmWdY_hfEuayf&=%5T z@R5XlXo$~Z@0sjJg$dkGphucNq(_*S(8JA3>0#z&^icD1dWd-iJ=naG9%Noc4>Yf) zBiu(|k5)fpqA!@}!k*tet241(6i?u|4((%(9}B|$1zL{tPw68zzm}HcejRil;9Cg_fF9C10W^N7m{wZ!FXj!Rrl z(h`?bw8Z5!Epa(ROI*&<5|?jiiOV@!;_@xM9uMEXqh|-6zDU1r zzC^!fzD&Lc2mw3BvDnX?#8l?H77d5biI~_h- zDmC(-T8;dtQ6v9p)yRK3^?KZ2P$T~t)X0BEHS(WHjr?a;BmY^{$bVKf@_&OG`Ol_C z{%=$x|Jl{Z|4nM-KZhFmZKbE%R4+-l_is?SFrKmL$UN7_u1m%nh|L0|W$&E@6z zALe}YRdas&ck?auZ{`B@ujYdEFXlq@&*od{E9TqipUj2nAMw2g_M!a{p5Xfp9KVG7 z3p#(n=8JOtd-I+2x8}R(Z_LH$Gv?y-DRT*W9KPqkJ~a3q@4MN{b;!N+Sln;$^YKgI zQr;!mkBLX)KT6Z1|8vxU=lOrkWjO!0`FplFur%u5A9NTf`323ami;jTJl+)mVDNr zC7+MblFynn@>z?0Xvkk}@9XndhvQ?zhWPgrH1hSB-#(r}zUq44$9|M;|2RF;T#uIP z$olkfn{Pl5GdHA%nj6tW%#G>6<|edUM>eGg+I%xQ!uU_wxL6)>R9? zT|6Y~L`#m#I?;-jx@t{JUA3X5uG-R4SM6x2t0!owt0!rxtEXtGtEXwHt7mA`RcO!B zm+|i=*oSs0Ji))8;P^%JbF|dm^YnR}??_7>zCeFx^Doj;moL%hZ2o0h>a-Jm*5*6Y zQny{`(>C9gmUXEceG>N_*rSE-iG|XO*Nw0~*!tmmaC_vrr+;1iV##waj!T|<(~{>t zwB)%jEqU%oOP>4FlIH=mvw zW*-{jGevy~m!fLKXPO%EnXX2BW~dRL*VKs5>)s>TOI^;Salb9@!_Nc1;kTc~9{1lu zn@#t}rMsVx$Kf}<=dkY=w#KjQ=)Sma%w%IcU%sQSmwJDfH|=litea{(>+Tu4hk7txZ>#kAz}16uO=AuaX3ghoD>vJdT2c!IASaa`(sIW6_R zf|hz;NlU%2qLI%yPIwwO#A^-5LzDXdh~u(8d`wHcKA|OEpVAVqwY0=*9WC)%PfNTu z&=RkWw8ZN(8u9v^eQ1c+CiNwJ6jdW$o7ITdmukdoiyHCTsz$uFd9P-V`roeesP`S- z*Vp?_j-$Rq+eM?kckAm>-+R2@X5S}ljawyjZ}UF7m-#EYr+Gi!!+e15ZazqNGasV6 znh(=m%tz?XcK!&*Q`q7d=cPW5(^4NNXsM5{X{nEswA9BbTI%C8E%kARmijnLOMQGp z%kgxMMtywCKD0~W2~O!8m*eSsT8^jlv>Z;G>r^VW)M09lOC6@6 zr4G~5Qithisl)WN)L{l%>M$cMb(o2kI?PN<9cH0tHiO!JNO z>*nnAYv!Bi8Ri`HbaPI6nmHFe)tsB2V$MTPHs4H7GUufyn)A^U%=zi@I5aZY827&b z$3w&Z7gS^a3#qaHx2m!Kx2duJh1J;qB5Lga?P~1*9ct`T8m1HtUGRag{l2kJ3y-YI4kxC|+VlxHj-Jpu}u9YGGSAh{Wp_2m*(;} zd8?dX^}MUAvny7*OrtB|;i2cm;FE$+4sIi}|F$yWX)^itGS{brOxQ8Flg#|HWx^i? zpBsFB@C7pG-c6={JDKnznf>;Zxz@d8!rn6V`pASo4ZbwkcZmHBP@a0NWx^|D@`Gf; ztAnqRIiDdi@5iAs;V_x{H^_uH$?SK8%=lI^;Vr?p%8b8VCcG>79+~}*mXFBeF*4y; znfl{o_B%ef^88df89z~Z!UuvM3Z5)8e~L`_NbsXF=QCYq{tTILrc8cT@`SSYu@sFg zJ-3W1!Q<&=VA=co6m>8CWEoXH8(Rb~3|?@P+ClmIU zsXriiVDJ^egMzP0u727-n^&Y=TzS#a-)}Hwi0Uxxy5Q?&!W(4n+i;orBZ5cDgrj7} z-xhpF@Lj?8$XvJ4GT|_p`uEA~cbv?9880(_f=oD3CjUV2L%|Qr>~D%pI8>(IqrtzC z+3yUQ&%K#4^JmF~vt{yggCCdK-+YM~ODfc{m13ssQyo(O3I1>J*D~QDnfn}nu~5rzygXf7W@;Uo>2-sT z4n8Khfy{H#P^SJt?T`A$D$jnK$b?O0#y68G)m$e3gWw+qpD44xRx5|U&$ae4<2%Td?kJN#EBG9l{dJa^zd!8neC4TkLGXn#`|U3CKJOtjzo*Pny=3wi z%j~aDu$VEwpF48*m&p&1b7q$ZUm5)K;HzcMZ?H`LAu{!c%H)U1 zk)qb6Z%$F8(zldR<qk0M2*{sQIM z-$I$^e38tw#WM4k$mE|5{!{Ss;G-x~G{Tk2v!7Km_i?q%v^6sG*UIEy2woT5JgmP# zdDh=3^Et3drv7G`Qd?y5TZ2u;zv7PEuLr*w{C4m=!8>H?@02;8T{8LIa?WgzOnz_j zwzBvA6s<4)VTx9i{y0TXl>W4gsGen?%Y?mIq-ccuLVmwY{(ww)P$qv!=KYfKCN3JT z6GG*Gy)N3!3KO^`|nf-N=8Gj^2ibmKu z5!_LxUMHFPXUpU}%jCPr-{d||=6%yuW_+LW4rl4-{q7;(L#AHOk{hq z3HiP<_4+0EPDiHpSDt#UL%o3^e}zoFLCFKsk(+gu@?5Xt2If(vU8_7j*9Bi6d}Hu% znffDS>W!4ikCMsXDzpFFlLx0GaD zQ*SXvibi;X@^rV5sn;^Oc{*|`t(2$U^iZ#j@_9OID^sssa_e;DqP16^@x#LP>KO88 z1)n3cpUyH{>mpPCJemD;P41A6jPIsA^~Q$t>!Ey}&U(t!>y_NS?Co7fjIQ~2K-^b( zu17zarTfc_9}qk+x%%7U>TiK-K98J7d>#jdI#T$xaf8fx z43~3eBZ5cD%pWB)|5lm#w+G*uT>UNgbCk$i{IAjS_g{E_jShA04Zbh%j|Q4 z%nl~XT-Ql5;e#^ux)nRm!}R^_6y@ogDpPM-^5k?(&!h6xYa8m#RGxaXWa`aMo{^4B zoufSChlcAnKjfbb{zLFnGNl&DT))LK>n)Ml-_qoH>B#tH%2Tg#INue@=jm*vOubdf z%gf%?WyI+G{;D3~TIE^)d71I+g4YLcNUr{Mp!!??n$Iie5uevhq0VNR^VlNuyl$0q z)_6Xt|BCXQ*K0E8^@f}?drM~gcA4=zWXA6d-W9w%`QOFWt^PIQM5dOFA^Uh!7maoH zhC1)bT!#;2&hsOgaA&CZneur$`$DGPzT{8Rk*WKYC)^b39aKI~XNP3!RsP3{1F4sj zuT_W6Z`%+yT{O3PwhjU4l?`fDDymZl5^I0 ze%Nwn<+-n2WUkwJGWo7D`EGK~tb1^e;GV&~lB<7xI;pr0XHlwboLBL{m3@6eoxU>r z>L)Ylvf!TuUmkp=Oua!edx`50R-W-gf)~iFzfflVMRLw;aqyDhrNPUB zmnUCbJYR!~*Y8ofYKkKvB*=Mcobk4bjqSWKk^vBePv+;*4 znVyNFY~q~pIER?|xx~5SaUL`S-`zs-4 zSW--0O3bmA7ISXOia9ss#98BUdGWdUkvhK_=RKo)j;cibzbP_1HKc|{2MH<;^~ZxnMr>WLZNEavmmK+LeA`BpLYnur?kc8UH!;Hp%{|S%&3(=N#q>Ww%xlm4A0#>b3>GsSYJSW- zLd^P+VuoYPW6k5m^gBV!Yv22wBsu$=EN1w$`5E)G=I6xJdtS_NwwV6rh`HW*fAb`# z-hA@{F~e8QubG#amx<};b#vLeuf_DUL(Fied6$^^d&CU)iRpiT@aC}P{&*maQXBfO zB4+rL`DgPl=3mA1^P8CAaWVNxF~dK^^m{t^VA$q}w||9EYD2#@#0<})s64Wovzv2> z8RjzQ5z|lJ;Im;5+)wjK&Xf7Y3=4?KFEL+gzRXp-hFCsKOur+{qr?nHiLXFBd6p%o!flAIj@+VvDI_`l`TLxrlE-m$xw*KQeoBa$f2Ekbl$d@>2Nw=o?vrIDj~SH{v%b8T z{Qu0?nk$>DnyUwIp>%j$7rZ9%^}!1h-w-@Cah>47iR%WZYBALJ9Zae(d3ax7ZYXAW ztC;mo#0;B?$(xC(-`w27+|u02+}hm6+}7OA+}_+FIQ8x^iYlki+hcL3nICmCGwLjL zc-?dnGqsz!yO^Ey5Hr8Gn7ogeyuW#XnE3<6%zwoEsClUQG4lv9!;xb4=g0TB<>SQk z^Mv_HG4&>inLk-fK2=QqjQLsfbLN?1hO@-n$NhNbO3wc0iDO1Dn_n@%YF;F!-eU7o zG43tQe-6qlTS{C&lhlE-m$mAQ=hYBBwk6SM#F!6m|$`(6df zV@4Iltgj>{uWGJtzRp}zOubrS>eUI}n7j(=2Cqnblg+<5cutbv5Gv;i z6UkZBR7`)(#N^Gz8!`3TimBJm+}_;5+|k_0+$A{UUz5w(W&9YG z`XJj{GG0?=+2$9_FPdKx)Bk)iuS4&Dq2%Q^x`22lDRxXzE$ZpPQ&LyVbJm$P&%3f%`NKC&4QVv_*R}_?-e*JyLWs=8n zRMcEdOn=42%)dfRUQ$ecm6-lY2Nw!k-e;7Roc{cM#y=&G)dog(jF~g4LPUbG=ZszXh9_C)=KIVSr z0m0?cm!%uhAN{8hqNrCoi4P&-xCTibJXp+}q2|ZTBg~`BW6WdCeuw){Qw`{obC^tWD2{g1@#|6{Z4p6StNlHk)_-mO z#=O(KTg-KJk2rk4K+N&{&-V)?Xa7H%e=`3prvD>i*8gfgWarGn%!ql#kIR}zy~ z5tCOFlV4}P-h6|(j=65|#^hCTQ}C+9HwVv6d`s}e#EpXKBy}HencU%1_n#(`v#6=L znYp>Sg}J4Jm(SD}KLw4c1PwFu7e)9w7&gQOSe%hv+n0ixe ze?2YlZSHICFQ)zgG4)?bK4U2U?+BJZD&}=IM9loh%p=UB#O!~xnDwv8@lk)AXFilXj-!vvpO`-t)9)rR>pwSt zA*R2r!Rx}7`^+}U>Cf*o-$)+E(N6Pj^LJwU+bd@M_hR}v5WGEX*&LLde*8Z3v*d9c z{bK&rd`wI~$HnabWbmP|CBkCQ5otmf?IoWawQD!GFvB|bm+ z@x&Jd4@`VvaQDO)1*aO|c=Y?exS-{Q%!SQG%*D*bgL9|vUis4>N6Q&V4Qt9XoX3(< zhv+JE8S~ZVYs7pW%ZsU3!CX5!3JO=6{K)*Idl}|CsME-)X+fe7BhX+lsjbdH?Mtr=Jeun9&2~&gQOS)^`)L z{y}q3b8j*I_7U?U_I?LQ&i)39V@8jdA2km(KPIN$a542piRo{&nD^h_-#E#sH^Dqn z9LLd9<|*c<&CiJGXS&(%!!spkf3wXmnCFV=f1Y{1n0l|6Up2ob=K8Q$%zb&OnB(#L z@|%*!arBmXrI>zKiJAYdn0$?x{C)EWV)|Vl{Ce23*&sRn`hEIS$>TWMBxe7c#pGX@ zzZ5fno0$1ui|KDi@W)}x&F5Rm>Cf-mdnBj+UUAImd-D(GAH}RcB&MIk!Mnni``i)9 zV@5~Ctp80+e!~2_`497-<}<-Fl4s9_(c_8F<3$(sOPn>hUE=J)4HM@KW=-nfhx+sL z^q)s^jyJD4pEbvGuIHazNVP@H;Bn=i^=PnZ!+I(zQx?g+}M1Z zn9oyFG5z@QG`GBkn11gt-zlbEYccchHs52u*LX8I3THGLJEj6;p4Vn0il$>2IPqe4kOw z{K;bKO*KywGyhrhbLN@m*<$*cW0o&Z%119rj$byvVt!Rj|BJ+|Ut(Tne#5+6OuZFi zt`{rC+y_^QIX-`%^q%B#9KCP;!2F?@{cjMn{uA@3=1pSOZx+-4mf+Q4%Vw+O^zZMJ zzLq?Wqi@VR&AY|ye~*~;`^@{r%s(Ke--E&1!j_xoA<602-zWVdc^pT-nva=Jh}r*1 zG3!r>$xn;v@2}v)VaxlZvy#Wm^oIyopM@8VJhGc}nsb}aH(wAuH`(ij!IKhS6g)Wb z#lf8tUlQCb@uk732Kn05pKGK4B9<2ulNT4WzJ!>(q?pfNDRXIaS#vpad2*?^@8rpMWK!SHuPJp{af7*zxvu#pG53-BV(Rs<{WY|_vH3Rh z?PBUT6H|Yf)o)?>9p*d5)N3uK-ejxSR&wUIGy8que#xo#fVs1|tC-hWH!2_RaDbTO@$3F0lE-oMsClUQF){rQ7qfnpd5n3in106v4-8wrxF<+XzkZ#6 zO7b|4rkI~LKO?5!>0;JDXPzmhzgfW(!>wk=2y(Gis^5WnDtA< z^s_X0ZrJkDeO+?;@qh2WC3zf2Z=2sSzbmGnHDdO^Hh6j1a?x2QdCX|NnDraPcw#&F70*pI6NKg?9Y;Ex%aI`hsHCUn(XqEGA!T^@~~l5Azjb>XkH0 zIm$<6B*#~quQC5qO#KRC)?aI`Y_4jqF6QgFhL}%nO)>lP`$Qed<2b5ozR7&En0gJw ztZ!s)Y`#rQzfFT{g)R4qW|C9i?-T!#JdUF~%y*ja5>u~@nDzIV?-kQu`{3qb%YCAQ z1aWVOL^AqNYV%AR*vwn*C>EKeyv(JQ4k;Ko2QNF~_B_pcE zzmJe|^6cznl)6vo@43*=T**29dFJ`%1?Gk3MdrokrNJ3LjkhCxA9;Y*Gd(8Cg^50w zOC7vI%)+DGO{@HxQEahn@I%fF^^Y7+A#2nvgG4;-v&zaA&KVX4B%Rpv5c z`YkJ_{x#-*is`RHaPhF^zEe?h*86>@s^!(q*O{*u(_bwy_3Mb~=f>blVau0xJ;|Bx z_nliTZ)9$4zD-O&O~vfLS#bTZPp8IbEu+QcV<}#&=?8rempVK>QcONtOg`2;-u#64Ts(iGn3Fq6OuY)p8Bcwm zY^vqc%+H!1L?IrX|& zzuPU}Vcu!pC8pm!V(RZRe{cRlOuq+%w}mZVT!$p5-aza3h~-Dk$IQpY^m|fF{Xfiq zn$L*o_iXUtunliU&!@`Yj}bDTXR}Gp{&R@QbDPf>Ge57G{pSnL61H3-^GnYBJmQ$q zC1TbW3N8?~TxSbQ&iY*9n9=3te~779BDhG{vc9C`)YspKBQGsE=dY}}oVmO>o*q>& zS2R~LR}s@sH8K6v2+sI<(2SqHF37Hu2lIZD*C9bIse@~anSZ00^HfjF&)wD+)0h7J zN7gr#JZ5yOnE6e_%x@}Yelv4(a|?4zb1QRea~m=Jv=!4&yWotUPS5zM@wT*Ar=H(GCrBR0(M0o8<|*c>V(L#5Q}0>xb7J~^K6qT%a{rtq zIraShIal&Hj^>$PHoszCD5m}*G4+<1mx<~3_24;S%l&h?w~ML2LrlG$=G|iY-4nbi zY`OmLm7MjR%|DnAirN1mG3$R8lOGYY|D(bC!35)*`j3d|cW`jGu;qR^L~{D|`{i)S<2V{= z9%CMB9&er?rr(KT>OUo>-^szl!j>2DRLSYr@0ZghkK<^Dd8T=``33V_G5yXHQ~zZ# z{VoWe7Pj0k7fMdQe!pBSc^pSe&2O0BG{0qDDW>05V(Pyurr$Nei^7)c`C7@De~bA; z^GD{7#q58hd6Ss^Zw_7;w%jkbNY45V;+WAkG5K~e`8Q(fe;d3tY?;4Ha=cy~Gumg~ zFJ}G$G4p>Ev%f>Zdy;1lC!_e^>T#}re}(l&CC9&sdA%GLv;UJ~=ASa34(^}6-VaWH z%+J*1L7hz$m&aMF6J79k$owo~jx(E>`(q9g5$PKc6|jxqz5@1;x}W zB&J?rbCKYT-_pwX?W%T6O&+}d{NIbktxgFs`z2_U8M|512cfyPCU+>9@O>etQJB4O_mr zdr3~een0Fdc^pRr%nzF%F+XY^Y91!0-{E5V9U0swY`GtfmYjb5emG9@IF2TmpEN&Z zo??F5JWWi$)5Y{VBlz*K<$C?Rl?=w0)B=CxwhuM@Ms^}(x>XE!9H)csKZUKz)` zQF7{S632`-i1ol=5I4^H}43}`0d4v-#WaIrOAW39k`guW0%$0BWC_yG3Q~w znET}cF=g|JIj%!u>Kzs{|A?6RN5#zlP0ak`=9A`A!N=3@7gE0s8UOYdOOprpAHU!H zWp&P)Bl$mGq^~Su3THRxH0L&-FXp`F74s>|C#GM&-xQEMj-!I+LSp(YEM|UD^X2A$ zn6D7iZ^_{NVaxrdl;rg5_nWek$8l87TwYAS6~xTH)?C?K)m&XnzcqqOhb=FTnv&D6 z-*0M59>>v*=6YiKtuJQ&E#^k%#^&3^^xHJJR@idCX(l=S`u(Pb66KzSn%8n0`A1HxFCxHytIXUw=Q^Me;a~x|zF+>9>cN`Mu43&Hcr!A0VdRfx(@^ zmizZ0$(i3z95Wg!X8o|>!C}jFYq;dB?<0;GjWLfEQ*T`G$gpMo1j%tPam?r`^Az*b zV(L!|o|rs4JsG8beo_BEC;NL|az3B4%yZ0h&GW>ZpZQ|yFA&F!7Md5C7n_#`XZ#j% z#&7!;pj7f;KbvS?9?PxH3NiCniaBqq#N6*zi#aa+?~SZqD|yUloq4@^gZX1I^*4&c z@rbFv*}TQP)x0gZ9hdC*k?~u}y;4kXId1ly@!;!Yht=6>-fjNQywAK}%yAtM^C>$h zCO;&O867tNV*b^9%zQ#j{gYyj-H-QA$=UxI^EvZ*ncsRvS;Xuwo0#LxAtuiyjv3`K zUtqq_e36)X1c`0$ssI>WN^EG1Dmlw0%kLOy; zE1RpDtD9?x*Yn)FFzT52kz^Dvjvs@?)Egq^d=C?o4;RObMh3S@o*ffL4HAzHqY8<~ zhf%@APlVB_g^X(ub7zp zpqTt8G5sADGyjN~`A5yi%qPUGKPhH?H8J^VG5HxW{hSpuKg#@{FrqBx>|*BU5VO3g z)yre~1!DG}PfWf1V(JwTlNS{8b*R@#=3gc`E^5BqTwF~366TU(>J75vD;@Gu$*ilB zQR;8RmJ?IIyqJ0wf(s?jUK>Wa5?2nRQ^{Vd1|LXVJ$Osv>ynjJ|KICoEy+25waqt* zxenG7(|>(2>l>IGidlcFnDtG>tZ!;=W^Qh7VQwjo%SElst<7!BZO!e1FHe6L^4j#r zJ1L#%vB$2X9jwj+=FVc?PjnG;Ub>0dQFn7sb8mBBbAK`Q2Z*VE*7_f0`J?8cV)`E@ zrv7j-^+%e=n8%vOi>Wt3OuZt>D?D}Inq>JD^V4GbohGLKbTRd3m}i=2i&;NM%=#+U z-#p7-HoqdKzlCDzFA`IKv3Z%8`LBza-^BV^VfowUcf|CwT1@>lV(PCIldlt#7Za0j z5R-o*Cf_I~Z*To?mYn%p#LORP^S6b3YclKWFxr&(n`9Kyu~q zh0*fF--prM#6N`5#Kb>_(ZIw%B_pcG|6UVwT|8nwD(1TSo0xu&i&=lte9C-UOufIv z)H^Gtp8n_`<ewh_jhKHV|0`yGcQNyCwf=fp-q+mU{E(Rb28!9=ATj$JEG8c!CNCn_4f5fZ zk1~%DGyich_1f9-O|bk)G3zIZS>MIhPnDec)5OdlVDo2$e0nl#W*ALQJUfiWC4M0p z#eY*Kt~XcA{^p6<-~8ZV$+NG7QIEv0hEbcuuZ2;A#7n}cLgHn~i0c1)A6YIr*Zmb@ z`dcaHy1vT1+PucRR!seMV(PCKQ-6c`WAjGyCi7--TrS#T-fG@v-frFzyd!--IgER7pAb|3q?r1r?f6bxe#U&x zd|u}N)D>kFb9~vv>_3N?{pT{DZ@$2Mp_qF4#ndZgU)KdKztnu0xv2SaF~?I}%>GM= z*?&p%Rpv5c)|VBtzM}PC-tzx5Uu&*xt}3SgYGU?ZL(Kkbnr{#@zqXk94XxjLmfvi? z#oWl;SWLf7#O%MRnEf{slQ$QW7qbJGyZ*Tei<_E;o>m;UL zC#%=Z@(0DN?;&P=4_n_ya_097?v*_IP#ASd{BRhxOZ-SOO8sfp!D8wS5wrhc!7Y+! zM}*OhiARM|xx{0_s6gVeVRS0F%8XAEs>eTnmn5nGo-k2z`k5r=zBAc8)jZ8S-8@6g zeeQWN`@3b>I#{{ zajq1H_X}e3)#8}Z8ZqZh&pY$iNzVNB<_+eL%^S^|%$vD1Sw2IvaM8fV4Yz6FYEILJOk?v9u zTi@$?_Kf%M|IX*LJLfxdp51-s+;h)4!>(?XW!d`r*hC*Umh>q~UOv3xzs9_LG&Eev zCkzg+XAcTjwg-l**cD%;d{ot+uZjL@_Eq8P_7!3OoY`gJ4eU$88`>9zYuNq6HSP1m z8`=HB8{2)t9+RwBcvHJacr&|ucyqgJcniBrcuTu;cq_Y8cx(HN@HTeG@V0h`@OF0l z@b-4Q@DBD#;T`Q0!aLb*!#mq;!nN$y;a%)j;o5e~a2>mOxUStSysO`;eG5q!~5C|!u#3v!~5Iy!Ux!O!w1^6 z!w1>5!VT@6!Ux+sgd5r0g%7c}2{*R43Lk235kAb`EPS}VN%#nRqwtY-jc^ltgK$&3 zdbpWgHGGs^Io#Z?6mDT>;g;q<{!}jc(XRfgfIjD~eE3KB7$5!?ZtcT$;bVQcHr&RC zKZlR=;p%W(AFd1^@5ANc6MVQde4-B*hfnh1!tlvHoF8uI!@1#8d^kJY-iNcor~2^w za0efL8$Qj4Uxz#T@XPS&KKvqlh7UgvpXtMC;Z8oB5SI3awt566YO_;5`4 z93Orf?&`yj!`*y1I^5ldAB4~K;d|j8K71$K(}!<`d-?EWJct#((N%fSD_njn z<%?9|n_ZPJYK6;hy&MD;zSUJZXewMDs&bH3_;y$2psa9tXv;xd;X7S*Eg$<+UcMAv z!gu?ybGUrzI)(4`;ThrbrS2HM--jK-<Q!sSppDg3YxPY9PouWk5IAGQe( z_hIYsV?JyZE{Ar@@Do049)8k?&B9Oluu1r7A082Y#)pT6pY>tm@N+(F6dvKjhT)Mu zJTN@Uhx>=0_u;chJKrJ%grQ@i}R{ONUn zu2nuPe|p28cM8Ah!yUqJ`Ea}N+dkYT{EiQ|3cu^aEyC~laI^6HKHMbyfe$wdf9S&+ z;n6>v>`_t>=j;w4NuX(t4hlM(cTEI<4o4&uKkR%%Jr=@dd5tiJ7#XC%&Y6 zl(Mhr?jgZa^zF~it5^f39(Ycg;$9_Fed{66n;s;vK6SHVNPy9&hd15xL=ZQJA zo+swgdY+g^>v>{6t>=jaw4Nsx(ydC_BD!S>w^-_EUdooRzgY>lWa?;A%9gQT&lAgO zJx{El^*phX*7L+FTF(=!X+2N;MC*CtXIjq_YiK=Btflol@e8fziFI`SQuZrduY_A% zb<{0ozq7w~3Ae=Rs8!1TWWSy#{-X6f@i(pKiGOH4Py9>!d4dyE&l4qD&l8nsJx{Dh z>v^Iwt>=jrQmpqD+!H)E|eB{(=UuW6Q>{oJ9NJUXH+l5y0QwT;;vaCZZxhiC%C>ifc zPv9f7P8*kHyRm-^AF5Q2$F=b^!Zd;1-1XM`hVT>>t4`3-v#nWe2nWX>M_-|H&*ng#C|m zOGN#{v+PjzKf)~(^*@wlhqM0yZn>!czAQVE{r7N-M*VkXSyT4k!7Uy2-1vg{c459F4V`tvM1mi^ao3rqc1W!Z7;zk*v{ z>c1?@j%WWR+#*x|MOk(t`}=cCP5tL**~#qh$1OPZ_sOzT*x!p=cIxkuWv8;gJGc1M z-!;olV}BQJ397$ymYvT2PTWFN{~1|!Ci^>b%TfIuvg|DOx91k6`rBpM+3Y`wTbk-W zA&gCR+>%v)lPv4a{v)`BtNz2X ztS|c;bIVu#jk4@K_BZ4fvHB0pvJ2S1Kev?Czi*aZ$o{>#1+D%)v+QE_H{h1F`s-)e zrR=ZAEpGMK&9ckcUz=Ov>aUe$SF(R6ZlSAxhb+6A{o8TNUH#i+*|qH7s(eeWj-qUl zWdqp18MpLx&L&xQ9s4)pKLMz}MwVUA{tfs~2I{Y#Wkc9swfsm>d~;QmvuqgqD``uD z)+u9(v-P8w%Z)|9hT?y7XE|G_@%UyNSDcyUH~HMQ;hXK_!nfFM!nfMThHtZ5hi|u! z3EyG23g2lT9lpzM8NSaEPTJ+H2i?wq}j#~x=!yOH;fOt{f}~A z`P0KTZa5#Ye~bP{ZQQU9x7S7gV>WJhAGg;={}VQDn4h$Nj{c`?+;Bf_ua5p_Y}~Lv zYp;y{=WN{YkFb|V|417T45RF&(f_=S2aXr)#aZ^E>s0i>@)Er;b!6GgZpQ=9EB5^8 zf7QkV(`)wJ=zraw6Mn;<9e&fs1KV5ntmuE+#sl9w_V>~Mu8jxA_v~+@|9u+|oFCX< zNB@U59#}`)UuM}yu2azi@5l5PsUyoiaXTKEKea!P{?BYYaF4O4W!YHQspx@y96cq= z#?yM>pFmH_vWc`d3?|VN(ihIM$!^Dn!xVd5^iQ?NhNsy|K+|2Pa**BiT&_A!(8)n%ep1;1P zBwbBYmadXzhWDKon7Qr#OKyN zPw}}8U$6MwmR5XjM=L(Jrxl;6?BI6AXDU0|h|isD#OKbo;Nr5&)sdrX9IgbeH3L68}YfPjriQlMttsVBR==B z5uf|oh|m3O#OMAt;`0C-@p+()_&mr)d^WTZp9kB>r;TjH=OMP@v$5+`6rYFEiqBLI zb35Yma9iarl`xTF=Gtz5@imP?s8$a@$4 zW$SsT53T2&zOdX@GF z=#|_6X>OdXS_SXpexNuRQ`<&pX%A zbG1i6&(R(MJzIMOw4Qf{(0bk(O6z%N7_H}>8)!Z6+(_$r=O$XuJ2%sM-noU={o}2) z?jLWXl@D*Hl@ITrl@ITvl@ITtl@ITxl@ITsl@ITwl@ITul@ITyl@A}Fl@A}Jl@A}H zl@A}Ll@A}Gl@A}Kl@Ev0%7>59%7>5B%7;(T%7;(V%7;(U%7;(W%7@R;%7@R=%7@R< z%7-Ip<-?J*^5H02`S5vK`S1l=`S3+r`S2xL`S4|0`S2B5`S4YG6aBLSt^D{py^(&c ziB`URldhp(gQAr`-=;UvuT9a)r|;6$_0J--^6UF_RsC8Pt$h0-U0J`zMJxY)MCEJ{Aft`eSX#}AoLvDb_KsdlCCG`kd@ZpXrue{RP=yyi3Pe|%24%>lea^qMmA8wMe`PCgCx2}#Zzq3aD{m)%Yb$Rje`hOiCx34%Zzum?&rQtoS@xXp zkM`{FY&-U;@;P?wQ{{8**r&?p*|ATR&$nZrDqmp7K2^TZj(w_pksbR~`C|K3eN>cu ziTz4qmM^tm4llD`3NN=`46m?X2(PrC53jOEg;(1n!#~+0!av*3h1b~6hS%E9gnzN0 z4zIJH3jb<98UD?FBK*7kc=!+d*6^S9E#bfHo5O$GH--PPZw&uy-w-Z8!ww6Vlq2}N zgO%(d(Z8NOI9%DjK3v5f6s~Gt7p`Ux3|F@Ygx9z8@CNp^;SKF;!ZqyP;hJ`@@J4pe z@WytJ@Fw=T;Z5!C;mz!B;mz%?;VtZQ!du#1!duyAhqtynhqtlM3U6z73U6nh8Q$JL zBfNusdU!{>V|XW9>;BHJ)5zjEsFvHc?x(Vgt#v<@+P2pHRO;AT_fx5BYu!&}S6l0T zD)nrw`>E_^Yu!(!zP(419(n8r;Rg2Z;XUm7;XUo$!h6~E!h74hhWD}ShWEAW=qO%=ysid5c_q0!@>u+u9eO%zOMQk@qG0kQutukJIDFuudDM@IrRU1 zy~B$04=-Nd*!527Z0!%&JBE+6cL+DJw+}b9w+lD3w+$aDP&t^14PXx(45rFDOCJgxhS6KLIEoJi~b;v`!4 z7bnxYzi3D6{^Ar`_ZRJH-Cvwa>;9qx{jYv^fc{5QnAZKp>9p=I&Y+hlMbNsx=tM74 zilG-O1vRi557aFZJwUfmbgq2? z`daM^(AQ{RfY$xRowV*R?xK5YUx4nReF6Gh?F-P|wJ$(-)4l-RRr><;IocPXyJ%m4 zK3n?&bZ6}g&}V61fbOJy0b2JLkI}lnc%0V##S^sdFP@}zfAJKp`-`V(-CsOI>;B?d zTK5;v(Yn7FLF@ivB(3|4QMB$ao~Lzx@dB;;ix+9#U%W)`u15^Iz8*p7-Smh;*VDcL zy{q;G=(^e$pzCN~fUd250b1+(+jK4M3(z}jUx408`vSDq_xEY7?;p@w-#?_azK^E0 zzJEk(egBx&`u+*6_5D*?>-%T4*7q^A*7vcr*7w$h#}&0wJWIvC!s7p$^6@@zQFwyA zFg(#-5T0bu4^Ot|g{Rna!&7aoV@1iQ*|QU~e7dc5tSI^Cw$`!a8MfB3 zxSku@xSlm^T+f;|uIENJuII)!uIDCpU40Z~Q@f6)Fvm;RXLGlwqU*B-jq9_e+i`ui zvT=R3wsC#7v2}g6b)5>X&vtId_1WIW_1VG3_1V$J_1VeB_1W3R^{Hj!`s`xk`qZ{@ zed^e_K6PzepIvQSpL#Z~&u%uZPkkHLXLlRdr-5Bp-_ms|x;}dr-psY;+g|L~eA~P5 zufC48(hK$0-L7@ED0z7v>iX_iaeiKI*Y!u5V)-*Y{8x*Y_|R*Y|K6*Y^k;*Y`*p*SCp{>)X`E^=)S3`W|KD z`Zl+5eOuVLzAbHB-=l3@-&S^AeH7&wTh}+0);99#v3BIstc_huzc1iA6 z`>0fow{;(t$_cjaqf$B1)_qheC)v7>O66o5`y}mb?30{gW1pnGjeU|+?Jf0Dln(Y5 zN!lNBy>V(kmii?lC5FVwyOy&%gv)AO}2K+n^@0ImC{bLctR z7ocZrUx3#AQ+HbTPv_FQf9gT&{;4Oe`=?&C?w@+ox_|0J>;9=Pt^22bwElhgJbI?~ z1?VrdFF-3F^rt`9z5uQKa1lLC`vSD`#U=Cb_KabJMKabPOKc^Rd zqNs?_Kj%b%Wc`{m(h?aRW?*_Vb# z*q3O(z;!AYYrlZLNc#o!h1xHm`)j{|zCim0^!e!vYu~`_=Y?Of`-NY%`-We$`-ES& zduzYIbt=8mf%XmD-ZT7`-6Q@&lk+Gm76vri9?u{-M4$aN~Gr33v+s@pq+$J?idC)n-76YW#Nlk9fk z$@asrHHCY4!=>>GtvA&+WG18TN7EFYGqqnf9^aFYVUhuk2&OU)!y;Rp2_6 zqtk)*2i)Ew{GEMN_YnAUt>LTkP+r8VD|(VFkeY0dW)wC4LtTJwDst@*y1)_nho)_nh& z)_h+>Yre0gHQ#@sHQ(3Kn(x2Tn(x2Sn(x2Un(u$mn(u$oUG*y|wC4NYwC4LiwC4N2 zbZ1@2D(PaX)_gC~n(vip&G+?a&G*W*=6e-d^Svsq`Cg6Ie6LPxzOPSfzHdNlzHdlt zzSp2N-)qvE?;FvY?;F#a@0-w?@0-$^@0-z@@0-(_?_1EC?_1KE?_1HD?_1NF@7vIt z@7vNX^(aEO(4z|7T#que^1+UDGd&8?$`3o!P4p;5D_`tFAE8GvTKS_6eV88QXyub# z>Bf2#q?KQGqZ{c_l2*Rioo=Xo$s%4G6zzHmOJ2U;P~O?2qCYQpD(|GSr`wfxQrXK^ z-brO|TX|=niq}^(EAN!=Ptr+v{eJ)N_4^mEKj8np{=mZfx>nvwVJb z+sa$1G_sYqQaQxNeM(~+_bG?kxKBCE#(m1+Htti7u$8xtbe)RwRudZcDNWsu`;=xj z?o*Dkai7xM?xv5Tw6MD->F*3|<*ihXwsD`*%Eo=lF?MHt6s5I|`;=pC+^4j$mA6tk z&c=O8TU&W6mE&#YtyE61mA6tk(N^9{0skN_bF%ExKHV1<38mq8}})lZQQ4vZ7XkeajlZ_mzr2A zo)0u`=Tw}Rmpd_TUEPjx>tdGC7`O9njN1h^#;w1Nal6pgAYJ4-73KSjY32J=E^&K{B&8@@`977)Y~}k@F1MBM zQ@O&%{os{$lhmt6H5>PbSKG?>sa#_#-=}h|t$d$KZY$rXGQd{8Pi3HeNRsvwY$WeN zw(@-{*W1eXMaj#x;%A84Q&Id3r4>KJXvNPBwBqMRTJduet@yc_R{Y#TD}HXJ6+gGp zil5tQ#m^nI;^$6U@pBig__>=_{M=LuTz^CYeKd5Tv2JWVTpo}m>#&(eyY=V-;x;KCz{ zdU=YSin<~%{i%I~ivGOZ{D<}t_;r4d7d`Lx-@-50zlLA5*M(oQe+j>AuMNLquL-|u z{~UhJ{we&ry*m7cy(;{sy)yiky(0X!jX`+FUKaiD+DpUl*-OIj+l%#k2d-0Dq~ANB z7p4yV{(;*Ugg>(9hd;LGg+H<9hCj9Egg>)qhsW4IhR52o!sF~8!sG4l!xQZ9!V~Rp z!;|c9!jtW*5>d>EJx_w4?hW)wzq?qSV*Pq1G)8hOu z-99z^l|3c=wLLlfjXf#+tvxaPojoD^y*)nsgFP-h%N`s4(H;|?ZC|ZR=sJ}v(}6Cv z+b>rLu>aENFOQ?fFO>y8Pvf_c=WF~H(Hg&07Q0>Jm&y`b7Eers%u-&$Mam&z};#&4bLR5X6&I7-%d{KkHb$M3Yp z;}2Tn@h7eE_>0DP{Oxv($3Hg4<6j%&QU33dVmwM!)sOM0WMe$mvoRi(?J@c&N)=n< zQPp)S8jotU#-n=SU;Xnap4Kls-@Z7#u!tkfzYTq!#;XRup2n*tjrq5c+cEz(wl!Xx z@O+KerZmQDGq+>BHn%ZeTi6({Ep3d~RyM|KYkQ17in5K3_}SLR{M*jfcx~@G70kaK z+^+H3vFP8x^@ZugB2Qs{?d*PyPp#to@|hZ+U1*I@ZCc|~ht~Mir8Pdg(ioq5ZpZlS zW@CKn+ZdnS?J@c&N&{Qtvxn|1yz*B7K$>!0rJ z^R$FI4pY(i9l+Nkz7BLd;_DzA^Rc0g`FOCc@oVHd6^-8^G{+cAEJ+8V#Zc)rH( zaC%Jo!g@q-JL2m|TjSS+=WG0$(ul8SZpVB)%GUTb=lL4H@;a-oa`{U81nk#(do(>$ zk5)9++hg30`QF;rd_R`w>w2`Ibv=%wbv@eBx*o^Vx*jLcnC~aL9rOJpTi4@cp0DfC zj@I=!h1T_GPmj@~Gp*~bHm;2?&0osxA3_(_LF+p=R|)`yGyv2 zjs2wFcIW8tV`D$5uZ{hre)gGh{&_a`lg_tKkNyknj(TpY6m6bA7rH%_tF$jb>-t?x zU!i~cq;)+nr7zQ#CavpxIem$Kzkt^DzLLI3`vJ7B|J8JV{eA(hc(|56Kg)7j@iBnz zr{6E26))G(ef0YUbZ`BB0j>BMOe=nd(2Ad-wBlzN-Cg?vwBqMRTJduet@yc_R{Y#T zD}HXJJ8NHnR{Y#fD}L^v6+d^{lLpn8rHth})H?QhC%?oToC}R-8Y^*DKB+rxoW< z(2DaXX~lUePq|%jp32j<;`|w&uQ-2}R-C8uoZFG-M%aq;R7Tp0^HfIJit|*Sw-x89 zykINNQ+d%=oTu`VtvFBRWm|Ec$}2YVXF9F; z`J7h#%%BxNU(kx5nY7~POIq>s6|MOBnpXULLo0s1rMqiugjW1~Pb+?YpcOx}XvNQu zwBlzr-C6qrwBlzjt@xQoD}LtFik}6v;%6bP`1z#pqM|-I#ZEhuij&0^{du`r zak7M82YF_x+Z8AK`E{wNe>u-r|BAxPTq}-NvR`quibft_bboW_xr8zPp*;oez%eL{;-kv{lMl~*>P6)&l5==QiiSq)p)rzX$W``w7v``wt<``v`r``wh*``wJj z_1WC*xISChdcRxpe7)bT3RiQj>$5fcb$zxe{Ex2-)}?LTuDr4x&&Rs7z1x*nijtS( zQtvl;NB76|&3Cfn`sO>^aeebzc3j_l7dzf>UfYiMo7b`9`sQ`*h{t?aJK`~~XU966 z?`FsQ&FkB-4(Geu@qY6Lw%+d^t`$E!xW+oXr+u!wD#~7V_ayBb*jR`6v9S*CYhxYW z&&E2uzm0YH0K2n3igKWhb@(6~>u^IG>+r!g*5O7r*5N~J<oUAB_I;IBNV-xzOin{4V18 z8o!HajbAF4xLxCy%B8l(?=qgR@w=SH_+8<4jNg?u#_uW{<9D@<@w>*x_+4vb{BnD& zK8iBH#`q1iHGZjFXKVZhxlTpnSB|4(jmKd2YdnU~8jqp0#$y<*@wkD;c--iAjK@tj z#^Yui<8h0P@wnB-c-&@VJZ`tg>Z2%k*cgvHZH>oWu2a!?+)Zmd?kRk|YsJ&Oh5Ng{ zKfSPsBh0`1-LLU_pg6yLrpD_*8uRZVw`2Z2Y-_w8;rSY`M`?`LaJOT;9h1U4IN@ILpb34Z8bsOXJhCR0YQHGYkc10`5K@1X^qbZg-5x*Tel+Y*LpFU)_Rf3M{Y;H``A{#`-JB! zUO%N3uboJAK^_c4RvHB>= zG+WnWI?qSEe(rW%j~VRO_4tC;^_WRxet+q9U5~HWuj}!3;SXKkssDZi`!)XG(pay* zb35|I_qN9W2cEC-pG9l@f21}3vuTb099rW)mqxyr=XT_a`L@P?0ngX?FQhg8i|Dbs zg`+k8OK6S%QX2DNncFq~%h|8-UqNg9SJKEAtK6>fU(J4v|4)U#aecdPf!Tkn+KRHq z?YAW9R@%Nf{EK~4c%6M?_*eUe@Nf38@bC7}@E`V&@SpbJ@L%@z;lJ%c;eYJw!vER> z!{z_a>40#lx{mX3CHva&diFK{@84N;y{fo9mEn4xp>_SL(T`|ffY$X~pMFUD0<^C0 zhV%paJpx+SyC!{~_62BN|BdN;v@bv_9yX=#(!Kz#_}HAjLw}DzD_*vwZ_~a2t@zoR zR{U&3D}J`66+he2il6Oi#m^43;%7&CsQw;-9-_ZTpcOy0XvNPiwBn~Wt@x=!D}L(I zil1HSTz`*1D}HvP6+e{=*DvZzQtVXJ6~8~YUn#ZbyZgL-!wu|x!h6_zhxfGi3h!m_ z8Q$C8BfO8@AiS@=dw4&)et3U-x9|aWz3_qduHl31y5WX)o$$eS?QkP|m+&EWt#D)e zF@02&{7`#%A;kM>oQSu{f&ATIArx~w#mr4`2Yu=^O)YiO9rJ1dHcT~me zE1EU$%0E~7dhzpREqvbaiqrhLr5!(CcC;NoU)IXj&v%ULRPgh)b~}E)V{QC=ZEXE~ z$MN<0`Pvq4?i%aD@wW0pDks>=3#puFD=(ySlC8Xu%E`9!LMrWS<%LvEv6UB6X>Thp zoa#Ci<%JG3)|J!Tj&-G@jdkU88|%s$HrACh?Vv~%AYcQ?(HH6mu z8cJ(^4Wl)`ZlE>4ZlpE8ZlX26Zl*QAZlN{5ZlyK9Zlg87Zl^WB?w~cl?xcq*MbMgG zchj0*_t2VO_tKhQ_tBbP_tTnR573%l57N1QkAT+vdYIPy>QVTSqHbHvt@3{#E6ua% z=|Cyk=S>X{wTqvtgX0B zJI<5HmoL2nIpcOw~(2Ad#wBqMWTJiH0t@!zxR{VTJ zD}KJE6+hq6il6Uk#m^74;%64E`1z4m{LH2mKXd4z+Ap9LKl5nC&wN_(vw&9oETk1b zi)h8qVp{RDgwC~JKr4Qh(TbmOg_jpKLflbQ@w{&OPS^1y~XUwNPgtvpbZRvy@hRvy@x zRvy@dRvt)YQ@7*!Vl!KLU~`_YJg^0=Jg_CLJg^mwb!2O|V;$MX#yYaCtvs+DUypTU zd$%hOq_TsJb!0~y>&Q+v){&jwx@^Y>DzNgz$X;#YiqBZ~brkj+qeQ4!_ed!}g*?zS0 z!~XPPrR)G&`QkvjaVa~9R{m&6H!5WZ)5<4}=!T{25L)@AF@0buJCs(wIgH-FlpRhh z{~SRp{~Spx|1_bMf11+DKh0?6pQC8ypXRjkPYYW4rzNfYb2P2|(~4I9IfhpLX-zBt z97`+zw4s%Mj-!=-+S1BD$J5F`C(y(7r|`7$&q=iM&&jm%Pdi%q=M-A`r#-Fwb1JR; z(}7n0IgM8S=}0U8oK7qMoIxx9oJlMHbfT4i&Z3onI@8KOXVc0*U1;T>b77p?r$n^ykmLo5IErImmB(aJyP(aJyP)46_+fL8wL zPb>fIQTW25zB=Wein`)HL~&J={34$>T)$sXaeiKIS6p3EyuNJ3RVtUdU2&DlWwzq# za-Ofax`M{@(UopjT$R60`Xc&zSMz**y=!QFy=!TGy`0w98&LQv*H|Y9+ObaN<@c%g zSCo8^`{VuP*W2;_^1-%#o*}N&3ZS27Xobs-eEu8ecKtl5++geHxsm7V=eeoyb*}OJ zce9P>zw)^1`=xTL`}O{BD_&o2*Y~@f*7v)E*7v)U*7v)M*7v)+@GY+Kym^m}=goU< z+$Y>;Yo7kk?`0~!9&mdq!_`k~{2!uY{IiGM9^;=qVr%>#<@p+q;k3r%F|JRkQ>&$~VDoAPqp z#r4l$bbrjJ{3Sc$Eq~dL_|IRl5&y5+@qY8y?0CQV>vmlK{0%#jIv5ou0 zPi)*Lern@B@iQCuiDPWsCyupopE%A|o=#=FohNDEz*e44WumP-U6j0BYraf&dn&`V zUqEZVOrA!t_zs9ap(fF0)C|Toi82dFIhtnF5BWR7sk+jC6361e+>UNArGaKV^l#TIdZcorh zQCip-kCryZ<7gY>(aOen9Aj%dTDwj~<8dsl@n}={P}hp5;|kYz{eF635l5JR$GczS zbwY7|`Am)1i8SWlNp8pdJK5HFwd463uTyA@S9`Z(yiTd}wHpZ)?jqy6& z#(14!BYw`bG5m2uMe7Y9rm(SGrbfYys z-D!=_xwOWo2d(kxNl%FH-^=Y7pWZgcr;m;C>1$(r`q>(v^IWHb@j2h^8lO}yur)sY zdA`QyLR#Z&^4;Y;U-5bct$4kX*7dlG*7dlW z*7dlC#{9n4?U>)Wt?My>=j(b5q$lXsp4RmkMB{o~?{-{|!M3i)5T1{C9qM*nk74ZB z^|*o7^|+D7{JzQUx*j*PU)ST7!WX-KQ~%_`evRL4H0Jy5ZpVDT!^ZgCX=D8EvNe8p zyG}*pcMm-wb?6b(?TF|5Y>nUjJYVDY0F8Kl(Crw%hir}C!#rQ(_Xv&o{;1nAe#32z z-(x&qKkM;jeq;);trAO=U4rpE9_vsJxcL%ht_lNX*`nv;K*Z(8>9sPe3 zX~n}Q^jrG716uL%8U2Q)Jgs;cOTVU&rxicrX~oY3TJbZHR{Tt&6+e?{#m^L4@iUcH z{7j=2Khx&lj}fXC|%q`I1)rd_^mMzNQsF-_VMmHwu4S)csTJRMZvw z)2EbDYyO?jYZw0BJ~{k@eNuRqePZ}W`-Jdp`}pu2yKQ)`eO!2+-6lNWJ~q6-ZXI4| z9}`|=w+b(|j}9-fTZWg~EyBy}=Hcab8$w`<<5VZY{GDr?=YdG`zZHSfw_H=U%f|10}P>%UV#>+Anc%s;$)`bmitP3^lk@_e~O?yO=exJa`y0Ed0bzu`5 z>%yis)`iV%tP7jlSQoaiu`Xel?^u zzYeA~zZ%h+Ux(0|UyW(auS03gufu4~ufu80uOn#9uOn&AuO_tSS5sQ^s~J5~e|JD@ zel@2xzgp0mUoC0PucK+ruU53?*DsVUzYpcR-iu#0NZk7N0U}>JsOb1%R zeBKN#ZTvdZwe-(l7b`uzf0aeX?Cb>j@TE3Q*H(^gz};`xf}vuLavo!yRg<7^x2Mi(3F#yK|D zjjlGvZLAv?*jP9E+gLX)w6ShnWMkd9*v7hXiH&vRQd@ETKfiaY__^HesVIJ~pcOw? z(u$v}XvNRf^l0rD(2AdHX~j=YD}Dyhil2eB;^#VA@iT~4{9I2feg@NupCPp3XDF@s z8AdC9ZlD!EH`0oqn`p()&9vg@7FzLhE3NpsjaK~JPAh)yphxPTf@#IiU9{roZd&nk z53TsQmsb4TM=O5rrxiaB(2Ac2X~oZFg&!*FAw}%`&+j9gTgo2h>$;Uvgk_Jo{haWl z_Sxa#_F3V_>@&lU+oy-0uuls=X`foko^qW^Jl|wbyFH$7vS;jgzR8}om48;! z%0H`U<)78G^3PAS^3Tt-^3NJt`DZPy{PPQ~{IiZ${`r+w{`rko{`s9&{`rGe{`r$u z{`rem{`s3${`rSi{`r@7{;83IN44@#iB|rpL@WQSM=SqSrblbPfL8vgN-O_Vqm_TE z)5<^V)5<>^(8@m>(#k(IXyu=pwDQkJwDQl!wDQj;wDQlUwDQkpwDQm9wDQjuwDQlE zwDQkZwDQl^wDQk3^eFuv0Xoy>_993>_{vB>_jX7>`W{F)S{JtcA=Gj zYSYR;-xcvwr)a+t;iJQfea%?c^141xakXp3`FZ(t#Z^6iUBy)@ySZI)l}dekwDu8r zzT&C@jdg7gx5v7cm%p#RUs3YC+^_Gqcg5@Ta=X6YKKy#4%dO>4`?_7+v9p>t!-V;V|l*Dw+%g7w}Q0ZZ(CaL_jp?4djgH?d7|6n zdS)ltdcP<0e7)axwBGM2wBB#~!biKt^Yy7Vp07LDxGy@*R$l7pIu+a(o$mIyFUret z71uXE)BO=oc_%yKDL>1O_nUXN|^6Tu&<5#z7yv;+sebK zTwvoqu)mG_zzc2M2VP|3KJa22_kow#%EPH#YAX+?a+$3>oXX|4@^De|a;^DtrQ1`{ ze7TC&e7Ty|e7T0!e7Tk$t%0N!KLcpR&p=x7a~-Yt8AL07uBR10gK5Rj5L)pwlvex< zqZL0l(2AcMX~oY?wBqMxTJduWt@yc>R{Y#XD}HXL6+d^-ik~~_QQ9w{6+d^=il2LE z#m~L8;^#hE@pC_|_<4X<{5(i2ejcI~KM&K2pDPMKQq+Ts*!iD4ymKjgl&{;VlntkM zEMN=GTqW?9wuOEKht{#5Ft`>gN zt{Q&Jt`dIRt{i^HUN8KvT`ByYT?)T%XWGuv?r!q4g==TrYJ|q04JzX#1`BU{J>B;(*^u&1mw{9OF{>~oz zpY!sO=JyYLz2^5UTJ!ryTJw80t@%BN*8HAJYktq8HNWT6n%@g(&F_V@=Jz66^LsI^ z`MreJ{9a0HelMdnzn9aR-z#X%@0GOX_bOWRdo`{3{S&SE{WGoky@uBOUQ26!|3Yhi zucI};f2B3Qf1@?Of2TFS|DZL$|D-j)|DrX&|E4v+|DiR%|D`>@YyKBQ<-a>9(VFj- zXwCQaXwCP^wB~yiTJyast@&P!)_ku{Yrd~f&(?kct@*wot@&Pq)_ku?YrbzpYrbzx zYrbznYrbzvYrbzrYrbzzf2k#k*8Ja+{z6L}t$eUG{kirFXyu1(>1kSGY2}OU=_%ST zpeJj;fS#oN0$TZGXL^G63uxt+UFdPzFQAog>d<49Qj2)1TeLT=gm_B(2g*ykR`loP zUge=w>bYHcXE%Ob<(X9KyB+rpyW6-=Xkg>MU=JJj0ejlY3wyav1?&9YZpS*mkF7kA z%Dy(%`TcCH^ZVOa=MS*4&L3!Foj=INI^WR7I)AW@b-s~}b^Z_=>wIGy>-?cM*7?J1 ztn-K4Sm%$hl?RS=oywo-K>G%6|0CSg{yp5x{!RM@eEqN5FQC`O`7PZ3OSq-IHhi?b zCfv&YIed)$Q@FLgI()3XD%{3i89vTl5pHWQ4W+ucI2B2Y~-8%HuBAdw(`wIu2Vt2 zx!CQ1LuD6wM2D?rL`DTdQk#C0D$T!1m<(nJ$dgYrNY2=%m+>U&6vyFUn zi;aA9tBrhfn~i*PyN!HvhmCx5r;U7bm%TzCMY-EXzPZOnzPZ;%zPZmvzPaC4zIniP zD#|wx(#SUtxgGiDVH^485gYmDQ5*SYxQ%@Cm_1t`MS0vtzInn%zIoC{zIn<5?aDXx*suKY z3a$L{DvkW{n%j{-Ubm4y-msBB-n5lJ-g2D^^2gh5NB(%nM*eu$M*euuM*eu;M*jG~ zM*jHFM*bLWBY%8kBY%8sBY%8iBY%8qBY%8mBY%vskw3=T$RFcu~^K9gg z`8M*$0(-VTin7o~{#axqe=N3BVSY<1JtahD> z^2blK^2g7#^2Zul`C~1u{P9cSmtE_AWgYuY-=O`Y-c0?Y;Pm~>|i7R>}VtZ z>|`VV>};>pM^S3oE0VHo7aRGfwvGH#$436CYa{>cYAgTLbDfIv&u%pGPkpx||Lkrf z|1_|XfA+AEfA+MIfA+Fx>!T=p+sHrr*vLQo+Q>iq*~mZp+sHo$*vLNz+Q>f#*~mW) zZRDSWZQQ>!vT^@%h^_pSN@H93CzV5O<)6b`r=t9GIIaAX$`Ni?{z>IXdy1a>dH&=q zYg+hs=Z8t@Z2jJV+b4#PvL}R_+vCIKe}B^RSj*z;m3x%Wj;58*TG6

  • zm~pK8s1 z<+EdHAyp4Qzf{lE3qK$lZl8tw^xBcFA*ky>i`K*tYa^favysovvysov zw~@~-u#wOD+sbDbx=uy;>>?WZ>|(bgpIu@jpIvGrpIv4npIvSvpIu?k)<;pUw2{xQ zvXRfOwvo@Sv60WNwUN(q8~JR2jeIuHRzADVbt=kdgJ|Tl>)nogHrQ4^8^ZII&xX>< zXTxaavm0pTvm0sUvzzEC=?ByA4Y*zTER|bq<+EFPzVg{^wDQ^QwDQ>@v*9-K*<&{H+2c0y*%LPM*^@T%*;6+1+0!=i*)uls*|RqC*>g7X*$5l? zY^1GxHp+D>$Y;;H9r^498~N--TlwrIzFzt4Wg7YH6}Kawy=o($y=Ehyy>26)yk#@e&>QIv5u^4WMB`D}uXd^XWWKAU7CpG~%r&!*VOXH#w5 zpG~uIe>UAlKKtB8KAT}HpMBvv73H&;wDQ@PwDQ?kH15y7c02CRzOkpIFRZs>BcFX| zE1!MuIu+%!A86d4&2qc)S^2&!osWN?o9%wxpUvUxmCxqV%4hRv?03y~yYkrr_A8$) zq?OMW(a2|u-Hv>=#6~__Y9pU5vyso1+sJ1tY~-_*HuBjj8~JRtjePc#jePdAjeNGo zMm}3>BcJ_ZBcH9akM|4tLZuw<+F`wjeNGLjeNG5jeNGbjeNF+JzF0|+0sTn+sZ~h+uBAx z+r~yd+tx-t+s;Nl+ulY#+rd^o+tGC@%4a*#%4a*%%4fA`<+D_Fal7(aDz$CpvpPIq z`K&Ije6}k+RnPzQ6g~gb%4hXy<+D_Fcf0agDh+Jqvpsme^4Xq+e{?+S{%kMykJaA` z6u!&-WAsr|AE{`5?aT8uzxJawzxJm!zYd@^zYe4|zYd}`zZ%k-UkB5gUyW$ZuS00f zug0|I*P-+(J^$01Ux(A0Uq{fIUq{lKUrlJuucox-S2J4k>nK|Dt2wRt)q>XiYDsH; z9ZhR~wW2k@j-fTbTGO-j_X4!$R~uUM>o{8Tt1Yehbv&*4bpoyVbt0|#brP-lbuz8_ z)sEKuI)&E!YENr^ol0wdb)YrBPNOxyI?|e7r_)pQ|1Y35zs{sJzdF&HUuV&pU!7^q zud`{*uP(Ia*EzK2S65o|YoEg1ih8>=*DC6KWECC6x|MhLd74K>$yK z2W`aBLpI{*VHbXU)J7Z)w-HB=*@~l79=DYrQ+dKxetgn(D$0*f(aMjhJneSn z$5fuNl^>tw`O1&a(aMjhjBxwZq~iPP`=v6<{rY~-^Y!|EFVOmasl4cReZTV0l^S*a z%m45ES7@F8YT=h$>*sro{ZsWPRE0;nR{l=q4O{s;mGaMt@p#Mq`uW~2USB>>-|rn7 z3u_KNYj}{od#K`hFi2e$zGX$3C>RUZhgK4*GtneB^$8zmJR8m)oc6PyT3q zzfWm>zt3oWzcIAF-`K*VU2A#@>*t;AIu-rAb7)+jxo+3bTORjxzQ%t(&&PaU z;C96KLR;g%i0AA5ET;8-me5m`qG`RKWwhSUavJfy!tIFfmA2l`DxRhM4Is_?(|%5b)cj#q?B_VREgds%orduh0` zy(C=4*1DQXRa@(7HP@+NU9IkRtgGwWSXVc&v94}tV_mIbV_mIj&(=p#HnOpgG1q)h%qSt6SPySGRJllJ3v?S1^69`1}3K{|l;oYoC8vcpLlD z@V53P;qB~;!`s^zg?F$o4DV?75AS4O5Z>87KU~W`FT9K0FI?O18?Iyb3D>oIhj+Dm zh3nZp!@Jo%!u4(aeL_+4-EIBdRB{75_CxbM?D)ILd{0~ZqeaR0vh{aU<*%EbBjW34 z`?z0Ue_z+B#MjUEb9;RKY=1kxes+MZuYX|0>+`)`Yd`j&!jk@|F^XqC_^XnQ~^Xpn# z^DCz{zXs5nUju2)uj^>duR*lt*Y&jK*I-)nYY46RbyDG>MLi_Vt%|y0-MsO?ypa#{ zdBaNC4Hf6-<v(pz9qV{@j~(lHcCQ`lcy^y1>v(p*9qV}ZfF0|2_Mjc>c=nJT>v;CC z9qV}Zh#l*A_Nc8qkjij7*758yTX`Ur$8F_-CtRnZJn$r~Jn$5)Jn%G)b@myzW1W50 zRvvhc=PM74pp^$k(#iv)Xyt+DY2|?zXsoj@x*hB6OSbaB%RFCs;1ybV;8j|A;58cS z?CWk<9(aTO$^)sq>2~FTRNk_&&c1Cc52W&rjdk{28|&dZ?y8y@3ivIAGGq%pS1GNU$pYi-?Z}2KeY1CzqIqu zrfKr2R{kl`%0HE8<)8Iv<)6y5@=q07`KKza{8Noq{;5tY|Ey0d|7<`j|7=Jr|J0zB ze`?aoKO51?KVysczj4vt9&cZV6@R~~INGG5KQDJHjyC1j!8*2?+p&&qZYz$q;Q5N9 zEosG3DqFc-aa5GN{C)NPlDBcczTdVLug}Zv`hMH->*@QYvc22&{mMVre~tS8ApF1c zccOLv&V_eyt)H(J`}OnfQh00Eit|)z+lup4>eyIk>)KdnceNGgsnoL-=c$zc`$+Nr zv-ZCNudVm9AJ5nO*`Lz8>+>$n6my*&#N5-o`e5-a~Euyob3?ML+N1G{*l3 zw`2T|wDt2g;raS`n-*^98uvNPY~`7wcs}lPn!7#jbMkT=$9&0Kxre4-Q{o zHw^c;4+>vs9~i#KJ|KLty?^);dq3?XxK3r?=)cVE`-CsI_YPlS?-jn%-ZOlay+`*9{M{>x8ejYljEhyM%|>wMyAg*DA-aUs0zx z`k()9fA>;0tT?ZHRyXY<&|Tx}-stvo!Z+Dn!Z+Jzhi|bvhi|pd3g2dT3g2#@8NS0l zBYdZQdiXB8WB6|SwD3K4hw#1jsp0$V_Tl^OQ}p)>u2X3j{SUhR6T^?# zCxjohj}H&G+lC*rj|)F;w+TOC9~*wsZXJHgJ|_IM-75TyeRTL)yJh$}yG3|}-CX+! zu2s^1&pX9YMQc{E|8&EDuggdI{9)ne?V;LVsQ5a0`E^4|*^9;3Eqm~P=Vvdu{rd3B z_Mq@9_I2S`?SbLf>;d7|?L7R3t>@8H-n6d?zhz$?e%rn({EmHP_+9&o@O$>8eEiHePgfRu=UqKK#{x=|$T?m)T)R8&Mggovcw zfh`tx$9vy%X8C;ozw2Gsb(s0AnSIXLGwXiV%&^fdvMj5ruiE;$woHFSE9I-#{MT8^ z*G7jo@D3xx8`>knmFz3SmFk7A-tn~Ot_xiKD?9NE?nPk6K-I)3OBS{hIh7`hj+1? zg?F`^hIg|M4exF@3GZPy4mYw74DV_0AKuH}H@vsKcX%Ir&+xwX9^w7$-NO6ZyMzz0 z8-@?G>xU1r>xCQJJA@Cmw+lD1w+SC&Zxue&-XeUMy;-=aT{nEVT|3;&t{FbUt{!e~ zR|~hWtAtzH8-QCWgy!u1sL!^e5`>+tbj{W5%lSHB3K=+)1{CwcXg@X22ND13@n*M>WK z_5JXvUVS&*$*XUNPxI=V;m%%tJ$$-XUk!Kh>dWD-UVSm#&8w@!XLxmGxVu-EhkJN+ zS-7WHmxgXh&xuTBbI;MED?!CoB~zR;^lB-WR^u ztM`O2@#1r=Z`Fg&Vzcl|P^1jk2FY;fy!ugNN-(-cyc#$8J3g^FCeh@2strz)0 zuW%GWNYK8N+o)3Zw-{?g?Xeyi^s(g@D_+~HiL0RGa(B^}(%z*hrHS@oDc0b;qhK=6`tVLmf?wBZ62QF)n?(zUTqql;?+aLQ@z?GJk6_( z!_&QbV0eaC_YcqX>b~JwUfnzVuvhmC&-Usb;W=L2Ej-t&yM!O{YQz6ZP$}P1KmRlT z<1znPFJI07nCCxt2tV%C?ZQuZb(`>$Ufn7@->X}MpYrNv;RRl;8-CiWwZqSNwPyHP zuT~G2d$n44p;xPf7kPD~@M5o43NP{M2I1$5)w1YPJt@ncr$?4yiqVD5`iPrtZ%e3w%UZHhA@hYwRiPvb| zPrOd|Ez91Z`;=jd*M{C@*<0M+vkaF6+R(i$dxzU~Kk+WD`-%5x-A}wv>we+`TK5xc zY28nJNb7#$BU<+pAJe*@_=MK|#HY0GCqARwm1UpPZOU+or46mhvUS|vvJ97G+R(f# z`-waQAt^0}ZXx&eIPwRf-2U_we-NTK5zG(!QTyKy^P+ zM(ciJ16ubJ8`8R;s6^|2qB5=fiH+zgW!c8GHmWO{zFAdTKV07ett{Jw)|=ChjHZK8 zgZ_iJJlgHIEUU%sIyrhv(R4EF&^q}#1kp5DHl;PVbjYGgpKXetKV({J;ZQ@g#HWjk`aij#&^G!?U*Xca#V!DuR$ z4QLft4cTZa#yiu?dCRQbmSx$l+`g2z*xIg1WOr`Yq@xH$E6=h<^fSDL*KP~4Y%gxt zq^XET(G}?Y!mbKva zTlh#v+i%LUBf0$sJ_6GA>$0phw_n3YM%q3)%i41LNIqiH_7Pcj6t`c=M^f5e%Ce)m z{c=9S()P=;>{xCe%12(>esPu^$L&M-h)mlDXW0qdK8TOhwEes+JBi!Rd#5Zr zgWEgu5v8`DoMk<@{X{;})b``EtQWU;;3H6NKPJo0RS>Ty1ZfW#@4Fp?u`4?MBVuhoFw4&8_Wk)tS=;x` zvJ1FH_C4biXW~;r7Ro6?HlNk1g%|`7JKVg&)c=db{&fUTRZb=q0Zx4)~eVu zD_!S(JA|*dj}6~o9}~XOK017p-9CJ?eN^}syIuHJyKVS3yG{6ZyLI>uyH)s3`^fNJ zcFXYHc8l;mcJr3C?)5Ugf821~=l<98ZTTPf+qhwQz(f6qROl*J5&TrH!`sUdSV*BHE zexqN}_k=w^wm)g-4+<*!=G#xi_NVOpK|@8~0()L;f7;F;R8;gmV?P?(pSAM`9Tk1$ z_FTPXFH_Tll123Fv?0qDyZ_ta(Q|K-e*BSOM;ScSd!ynn2!&3X$uA{#u@iMhkjpa|>Un6bMU$5AkWZCE3zp8GV=#8^% z9bH)wqcvW?qBUMq``Y~)uc>`wYrKBT{WV_K(;Bbe(HgJc(;BZo&>F8l(i*R+{p5a) z*VKNtHC}(={u-~p(i*S7(HgJ6(-^ORxF6&7Pg~>lFYd4L`Zta7`j7iDUjMZ*UNgo(ciH-4jh^_H?sF$f}d>%$? ze5Tga{TQEz+ZvzExWC3{UMJIwG#;CCyT)T`E!?m1m|9C)q)kMVe- zjq!Mrjq!N0jq!Mjjq%vg#&|r{#(3;xV?3T_V?1`YF&`|TjK^*^#^V__ z#$$I|>*aop$KKqo@pvY!@z{sfcuegq_iH@%Ew&%wrRwF`g}3lB z=CYEM`7c=d7u$2|erEvvnw}BRuj&~A{feFu&@by50sWGm5zsH{83DaU&j{$%dPYF6 z(lY{jrJfPcEA)(jUan^Z^b2}MKrhoX0{VG9BcPY+83FyAo)OSX^o)RBtY-xDBHc*R z3-yeEF4r>xTK79w(YoImLF<0!YFhU@BWd05jG}eFGn&@@&KO$vJJ-;<-?^67{mymt zV_9}R{ir?`K|hjZH_~(U*ppU0yqQ)#yoFXhyp>ixyp2{pyq#7(yn|LfypvWvyo**n zyqi`%yoXjjyq8uzypL8ryq{J*e1KLxe2^Ze$Ii6s;X}0Q;W%3La6GMgIDu9@oJgx4 zPNG#0C)28jQ)t!0skG|hG+Om=I<0y*gH}D9Nvj^tqE!zcrd1DT)2fGaXw}2HwCdp_ zbZy;I)2fe;(KU5TPODx%PFL40J+1orBwbC91ZdUMr|2qrq(G~_K22|=M-sH^?Xz?x zJ<_06e;3kPUqq`OFQ!$5`xRbNEbqz-^M8Mo^5<2mOGPU^=lxWdl9$@5OUcjMixRWa zGFx@2Xr&kI^2DsP+>Ylkr4@EOhbgVJ<2g)el^xGvN~`U74pUlV$8(s{i*`JRDZOOJ zbC}Z0c07kEy<*36n9{4Z>Qd24ui2_g$*Bms0z~ZjfbvdYM{%Jr|&N z(#LOU?Vs8|?$`eRa=Z3V_pX1tzn;E|mVaMu-(bVkqh&qqUbxK59b;Smef|B4R@vL@ zCFx^F#r_)?udn3g4zaz8y?wZ15R^=~^`^>2Gx z^=}7S^>0U7^{*bS`nMCU`d6P;{cAw0{xzgk|8}NT|8}8O|8}KS|8}EQ|8}QU|Ms9& z{~FP%e|yrZe|yoYe|yutR08OpDiL%Kl@PkSN(`;~cL1&WcOb3$cMz@m*O*rQJD67e zYeK939YU-A9ZIYI9Y(AEHKkSm4yRTBn$fC%N6@N&&1u!Y7PRVLOIr2sNLux;6|MT$ znpXX5L#zI^rB(mh(W-w((W-y#=>~c(K-brE0b2FvSh}8`3(%@p$I+@k$J44mC(x=t zC(^1vC()`uC)27wr_icD9ck5{Q)$(oPPFRJX|(E3XIk~=bXxUiv%+19<%(2$DwY-3 zWvWB@^;w!%Rj0b~`)Kk%u2m%)v2PDdfTc~ z$!FTCQ^|d7)v4sOY}KjczP9RA^4T`7)B4%CPU~;uI&FZB>$G!h)v2PD&b3vik_Xz4 z=y`#c>3Ge_*YiKlcmKnBUQn^UV#CbXeu4X^hX>nJ!x!3z&BqixlN)W+DV3)gs=S_3^Vpj8)AyUzWp3q>pC=T+zPhKl`5 zdB4tQYB#!H=kq3hKb+5--H-Emi;eSntBvz{n>}973%pDX=kpHt<9yy}<9yy_<9yz2 z<9yy@<9yz0<9yy{<9yz4<9t3~<9t47<9v>_aXufiaX!b{IG^KfoX-h1&gVoM=W~*+ z^O@RYyMdn1d6}Bd&s19HXBv(3Gu{0-KQnBcpP4q!&n$bqBJE{rI6t%9kMlFf#`&3R zI6n()oS&y{oS$cGoS$cH zoS$-A=VzgpsWs4)P4o~JhOK$HXr(-dI=|0X>|e_Jb$(M@ z=6;>u7b><_bmROkcR$YW3LEElr9EDs6Yw%MoZr>%$N61j?6BgmVNAHYC6B46kh73uCr46)Yf%YYMZQSR5V{fjnqJ3*`mZZ-O*qesGv+L>*dYRfI`n&)=SDzQ4HNX5s z&(5-+X^r<^=vn%_0Il)=8$Cmx7oe4gKj>+CE!8^`G=mQ=K{3zQz^Y! zEhp%&Xz1~JEuJc-%GWlu^0h6k zd~HW7U)$5l*ABGuwIi*3)uWZKooMB&KCOH;pp~zNwDPqxt$gi5D_`Fi<8RlZe-wtF zRx9pfhW*!B+Rgi2rczk3e<|N`sY)fk@6iAD&l)8E4zERwS7jojomHW*6tc^ zXLku7WuG2yZ+F)70xwfLEv@Lef%`j!kF`$?cd$E#kF!q+A8(%=KEXaIe4>3~_$2#; z@X7Y^;ZyA6!X51n;ZyBn!=3D7!l&6shdbNt!>8Lvg}d18bV$8St!-M-p?81l@EP`z z;qG>ea1Z;4a8LX2a4-9?aBusN@R|0(|GAW}Deq^wKQ-mOFRi?vO)KyHXyv^>t-KGQ zmG^UK<^5b*c^^nC@8{9V`}wr;K8RM{FQAq8!L;&zA+5X*p_TWGXyyH4T6w>OR^Erw z%KN3X@_reuybq(5_sePJ{R+Cb3Nfv`52uy)D{1BZD!RKqM?fp@SJTS-NLqOxMJw;4 zY2|$kt-N1DEAQ9R%KLS+@_s$7yx%}8?>ExQ`%Sd+elxAS-$EQDl^ZI$k>mTR- zdi@iHAN5joE43$W)veU#+p1fsJ!PwIrMAFU-Ae6g8`mk%*tkx4*2Z;8xsB_Tg*L8J z7TKy>i@i)ub!!QY>y+o*kL#4BHm*~iw{e}a%*J)f3wCdP6>Yh#x|P}r8`mi-ZCt0U zvb*c6Xsc~pr>wDYo${isx|P~XHm*}%wpF)Md&O4WO6^r!bt|>kY}KvQUbj`ZQhUQz z-Ae6E8`mjs*|<)5+s1XuJ2tLU-nDU^@}7y!^|T&H|w<2vPI z8`mkH*tkyl)K=a4%uBUYztqK6aetuW_IbsArFexP5EmxUIKw+`hAM+`hMQ+R@N7;56f)T`_wkDRqs>V z&{n-qt&**JpIT*G^**(Y>_d|DJi$isu41d+r&iT&oRpRFHI1K5+@G4pPjy=3rv|O@ zQd+cLb!m;CO=*pv&1j9E&1sFFEohCOEoqIPt!Ry(t!a&)ZD@_3 zZE204?P!gk?P-ml9q872jzG84a|BxBXD3?Yr#`Lm(|~TS=LodM&(5^Q&n~pa&#tt_ z&u+BF&+fFw&mOeKPa|66XHQz=XD?dgr&{5?i{;m8>{Ki(>e3r}j!?0^ly|?b=Lq~h zuf>b@bN{R1{q0x62iPx%542wjA7sB6Zfvg!A8fA53`qto7yjg z54V?vo7vBYkFb}9o7>NYTi8ofTD(kcu}TlUC~Z)Qa{t0`Yr8z$#(p;3)_x}3&VD+4 zl)WI_-hL{4v^_t3jQwQzSo?`^2mA5xarV6M@%Cfk6YNLBC)$sMPqOD~YVtC*IhwlY z*=d8OIQKssKGmM3OA78kQ*i*w@?J41I_T=yx_M~ukdt$hU zJt5rF9v|*yj|=yomMj>mO{FZEL6>H5Nby}U5J zuoy?k-;Lg`<8@Q9f4--V*UdEYcZ>Uxzguk`uiLo4j@Rupj@KRT$ML$;#__t#o~W;) z-EHG|-DBf;-D~4`-DhL`+;1a)57;_h4|_ zq1Zp)Q^#i_t>ZI^*72E4>-bEeb$q7MI6l+dKQVrv={Anf3>(L1rj6q>%hvIE*vr&# zd}g~}$0xNpwvNwS?yuwX2(9DuXyI{Qo}XT=sl*J-a6Car_p!ALDDWt>d?Z`|J2UM`L^~ zbwBd>yshK6jQi{O<@2mI+SBb{W}}alfJj{Jn09!YwZ7{ zjps=}*{8?$pKU!)8oNRCIX}O;KefyBT!7a3`<))D&lk`-pMTO9>v;gJ^ZPeFM4vC9 zb-w?l2kRHvDE%^8>in0{gY@|VTH|3u`aFHUfY$h^OrNW7PiwqvOb^iK3+Vp(d;zWT zQ;pX6*@V{ksZMMB)S&z5xd45po(s?#KecI%pE|V0PhDE$XH&Yno(s?#KbzAUKU>fm zKU>loKU>inKU>opKfe^`Yn!6~gyM7!+%R7$<$tP9rnas3Q(fGS-$!+Dds=mG2O9Iv zj_y}otH{6k(@)cL1NV;)H?TF%Q)_5zobSxnYn<;wYn<;&Yn<;!Yn-RHyZbS( z>|tx1H{$*p=X=r~=SADg{it($+ZyMo?PF`4r?#)Hah}?Kw#IpC``a4lsU2WzoTql6 zt#O{(LAJ(uYK?8w$%Ac;^Cn)VhB|qO`%xzkwfm$U^=Qu4I8UvqjXHU_t#O`OGaGgC z2pe^>x!qk~MQdTBPPVjBCy%sICtKO5ldWyk$u_pe`G%FWOikmbo%>VM_&JK!_-RjT z{2Wbd{2W7T{2WVb{B)o-evYFxevYR#eomk@eomw{eomq_eom$}eomn^emc?`Kc~_f zKb>fepVMfKpU$+#&*`+rPZwI_rz@@T(~a(6x0qcwio7VcjxPfBB_Vp+=no>1dtK*jb_-mP(R4!;lT z%(?E@ILZIsui|^;e}8x$_t*CG3lH>C<7g1KYaCrbqYe#rf7GE;{(I^7DOzcWx9j)0 zsN(gdyk9@>VtzmUywomnzkXh7Lv8)MOS!*(-erX^^b&P%n2ox3xsAGag^jvbvQhVj z+Zwm|anowv^!r}r{?zdMj&MJI->YpLpOJPPpKO$^-*>c^spU>8PphunKx@3DcBA{_{A4%TIzKmaf4$#ZXuaQCX}#auXuaRtX}#Y&Xq=xr z-H-Egm#z1EH}}{3y{GWCUh4eZ%k4Tp_Z7a^0%%?A76=_Nwsn_R8=wdqwyKdwF=d z{X%$!y)3-aem=a)UK(C)KNntOFVS1@GPT8ei}a$jL5ITq3&XG2<>6QDXTz`A&xBvM zpANraF9^SBKNWt_G94>>_@|E?MK2N+H>_B!OPU< z=s5yCJ8jVO1ouB2{?wkO=Lg(>raoUl&xrlMaR2o1I(u69OM7bgD|<@#YkP9|8+%gt zTYF-7y*(lPojpGMy*)1cgZ+^Hor0ICJ(yPX?-ksCzy6&9x8EDv^W&)Fm)fu1Psi^! z?yuwbJFVlF+8^%M@k{MbTgUG&?yuwbH;v=>kNa`_{8ofP*f@S0 z+BkldY#hJJHjdv$wvJzF8{0a5RlH11$1fj8$vPg@xLwC%6I#ckI<4bTgVyn=N#l6b zazBnoZ5zjv(MCWokMeThlro+Z3+q zrN+~?g@5t+yF0zG7)Qw8_TEp&YlmY0d`}&(9ckpRp8Ju%oopSi`rKc~s{xJU)zJNu z^PiIcv9pciwTq48wX2QewVRFOwY!buwTF%I)5u2t_Ox}p_VO|{v-)`Y~Rky zJJXIuokG6$^L8Dd{fqtcJ#~Bzpmlr>q;-4_qIG;4)06cP8XCu^iTiPU4zY234z+Q7 z4zqE5n%X)(hkKbCj!!f9>-eO0gstP#ocrtew4il-S{B~d%iHzb+2bxX%@?h>zvhe7 zTDu?hu8pmF*OvQhytbn?UXP-6KHAedA4k(VAIH$h_p$CrzB|}DAIEY3$$DNu>wKI* z>wKI@<9wXtew>e!ZJm!(xIe~gNB8S|oXYJwADw8OkJD)6yR-XsK2GO$osTYskM#0Z zeFTl$b^N>0n6J-pKk7wyTgSf#_t)|7N$dFcqILXx(>nfV(mMWqXw-|d+&@`gMeA$p z_@B-Fb^QC$I{y7>9sdEej{iBdj{mtd@-Wc-I{xQzyN>_)w2uEE8uj7=_v`o%=5`(b z3k!Gk@@Cyyar=$xE80cwzadGtr1tgUOYG~yL+xwBm)h5aFSEylhuNdUm)oPlSJ)%N zCHv~|aC=1fO8cttRrZzP5%%!#)pjX7(!L@*%D()6{+>nWYmEC-8>_cS>-=3yKcGWI z>wI2M-={-N>-^qG-=ohF&^q5Y(|75)0IlklNd}>cTr-rlz{^E{%ERJ@;c?dEdso@_~(cWvz{QnHIZPkU;zOq#pidM>(%Kta+Pfhv%mRA1P z)5`yMwDSKwt^EH$EB`;z%KuNa^8Yif{Qp8L|G(18|8KPN|2wVx|3NGNf6~hTU$pZ7 zH?92tLn~kZ(qna?s-%OZrSerqD_PjezADqo*G9DRwK1)HRiTxysUz z%#8I0-fw!ip*>Yo8^7=5*uIPVCu(Zs_VJoJX^q?6>9O(pJ=~9Zp^=SwVNYA*HnqKM z%nN(lm>2f3F)!?EYuu)`pRIAbzn7_L+#WzF)ti!V_s-tV_rDK z#=LN-jr)zmY>nI0n%Ww-hkKbC=7nbN$GmWajd`KDjd`JkJz8HyYiVoTrgo%_d7+h! zd7-t9d7+Jsd7-V1d7+(+dEqF#q_3j2w=pjqZEM{ApU*33{2c54)W+)b1+>P`akR$I z@wCRz3ADz~iL}PgNwmh#$+X7LDYV8BD8X^o#Aw8l?QTH~h|t?|>F*7!M-9J*7zAfYy6x;m-M`V*7zAnYy2Ej_`G6?A?{IBaldh7S$2NKc6~>+7G?Rue++W} z5#bB$!^4B^!@?KZhlGdN2Zt}R4+>vwA5fNE;$>=a|C0@Mf876Mm)ddvlU-)V{ZBT` zj{BeNay#ySvMcPk|H(>r-2Y_5?YRHRuC(L+C%ejy`=4xt9rr)k)pp$fWFzgk|H($# zasQKzw&VUM8)L`)Pj-#1>z`}AOzk0E64PVT29-$nV;;G|RvoyJ`>PJzM5_+mOsfvu zLaPqkN~;dsMyn2_cDws=e{qMcI&dfVR~@*ERvoyTRvoy9#yoPb`!SE)XJa0@-&P%X zfUn0q@}T=w2T~hrV;*_P#ym34#ym3K#ym2?9<8sUO|%iRNjBz@$u{PZDK_SjsW#@3 zX*TAO>Gp7a6>WxHO447s*_cOW*{TCYE9Fb&eYX2kYgv}fp_Tu+bn~+85nA=&QMy@K z_86`DFpq9pmOW0ZUOYh`T9!RYtA5O0f8M24f8L{2f8M86e?Fj9f7a5fKOfSnKOfPmKOfVoKcCR5KcCX7KcCU6KcCa8 zKVQ(QKkI1KpD$_EpRZ`upReiBS@sRB`tvQV`m>%^{rQeo{rR3&{rQ1b{rQnr{rQP5 z>2n0M>d!B<>d(W4e=U~7Qthc&R$PZ@TotYKoA(>5&lgneU&{M6uKp-qpIhT9wLjgj zah2L%w#L=p++XAB9~$>Z|GHn}D*ry|o9O$MRZXb)ejCvGejC#IewAo_zskv8VxHW{ zj(M__zfZltqLr$6d%V9=RXg5ashX|#w~3c&2GH+Qy~24X@BeDJU%yXkHEsPqwYa~2 zpW1~t_7eAhb!^=K<;PV&FSSj*UGIOh;`MpIe%|J^e%=w1a% z&24PlZ*FVjI$=9oIsHGM%hdSV!TqU?)pG<|$G;vO$3NT2{c-%W`nHaL1MaWm(U8{h z*qPSx*oD^d*p=4t*p0^V-`)K<{(IOu9*wxaj>n#~j>lfKj>q1GxAzj)P5aoYTl;c< zTsQ6K{M~Sf9U{kk35wQv}3%L4zgqXmm1p`{|DRgeoIa4c)z7X>^T3WL+v>K zrNitv|D~pOTql+ex8wbmn%R24MJpX)$NMccxAlHoc&U8t?Vk>)$ zzKYh`9-XBBPlAo>#I`oB6WiIiPCUxSbz*xP*NI2lxK2FAR-I1mSi6*@=LWXwbZW=h zs?$X)e$=c{!O@UQVHvmyWc?&#AP=PbXUA=QLX5r!%ebb2_c@ z(}mXf=}K$-bfYzX&Y(4Zy3-mzJ!p-ep0vhKFIwZLH?8q=Cav+)ht~Kxi`Mw*OOMg> z0$SszAFc7zpVs&pKx_P*Lu>q;OKbcLq&0rdqcwicr%QSsKx_P5Kx_OQUwCk_#E|zW zs+i~h^lIKp7ka-x3TgWg`*;0&go@Xf@-4spw>`Vq{lA7Uv405FSv z%>E&Kx&3|k3j4co$zC5GZhsrT(*7oVmHl;ig#A_cYWvIZNPAs)l>J3`wEcN_jQyEP zkC&-^s?tP%qS8fwtkOn*lwPP3=>8AGH`;4eI=TM`Dy{VUvHvaZe=mHi{ciX+`d)3Qw@-hbP)kh9}uigeTjNho{){!c*f)LUHP9!EB}wv z%KsCz^8X~Q{LiPA|EFl>e*vxhKTRwD&(O;Mv$XPGPAmTlY2|+rt^6;hmH#EQ^8XyI z{4b@~>GK7&^1qB${$HS#|K+suzk*i&SJKM=Dq8tpO)LLv=(YNH2ek7460Q8dOe_Dd z(8~X-wDSKNt^B`EEB|lM%Kw|R^8OaByuVE=@9)sc`@6LA{vKVf=LNL#{sFDLuceju z4{7E7BU*X?m{#6Dp_TVfY32PhT6zDRR^Gp$mG^bD^8O{QynjW{)f7Z4|KHHFHHFct z2kYrsngVInhwtebnnG#Siy!G}nu6)6nxg3`n!;(-lV9mc`n));`tmzHL6;D;>dl|@ zIF-c0e-+FB?{&$JWqEnhAEm#&-wxq_?Cs04e=A;JlJu9>|83V_@XOnV%j|8!8`xWi zH?+43SF*PZSGKnZZ)9&C-q_wOT*clrT-B}{u4dN>Z(`RDSGQ}0YuGi*vYK9|R--Jd zMOQD&YSWvPWp(Ik@%`$$ziN0>yGmKM8Ta3~EZdykDE8mN{guO8+Lgjv*&Bwpwl@fG zW0!@uwX^Vc_P_3{XxrQWBxxwve}{Lp{|eW$RfqCr>QXjq8&AZPlsN4zN|H4)iiLT$dc=e$}DW8r!Nn2XlYbnI`m9{naC_I+EIeh#g?pGzz618L>`JX(1_pH|)n z(aQS;w604A)5`yaw605r(5eR)(Yh|Vm{xtbgw}P*P+IlkQhJ*HI|a1r$1qygC708x zCs)wAE-BHfFT-hFmt0A!-dsiNx}<605ykT5VoLfy&#nI~s+1O2d%r)zBkkYAqwL?p zqix+Ur8dUa{ZeYz*t%az?OI#+OQ~IF>wYP<>uo$Qxxv=`QffEacwTaot^1|aZnp8f zxGs9k{kSfgXP2iJ>XO99bm}^ftd3wwJ$kW?4^7M|4JiTipPw&~t)B85^^nr~$t+kP-5AEgp zD%wXj^7OHdJbhv#PoLVz(`PpF^tr7(ec@$l%F{X;dHT}*$kSJLd3vErv5h=^VcJ)Uc6{nl|!L%SJwG+sH>98~Lbfuh3V~Hno>0 z=@EyGd~9wbA6wYS$Cftov6YQ{Y;7wa+jyCp^06(Ad~D}_{s|-=SB0){@gxY9|11B(%Yw{AFSsC?w=aY=ON{*F<-BIrFO9U zm9Hk;j(i>Be&p*=8~HlSM!uTb%GcpuriOeqbHDO+1h*?+&1vMTh5M1OmNxQrq>X&F zvXQUWHuBZRM!wqG$X7dig}#b*l#P6~w~?=-ZRG118~HlcM!q`O$k%bU@^!qIsVQG4 z(8$+`?nk~(vdhy8^?3pt`8vf$zB<~-*Qqx0)yYP_PP37(&NlLOx{Z8wv5~K?HuBZY zM!wFlk+1GH^3}t}{L|CM{L{-;zEbOLD_^OdX)9lSyi85`I*V4mQtRvf=}CG%U@Kq! zxWDq%zwkj`PKo^o*ptKgeU$QcE?=*_4WyN~^JwJleD@=7gKXsO0$X_-%-17t7rI}0 z8^Z0%+eI|;cCq`Bw@Yl~ZK#dBU1}q5m)XeMFdKQh+(zE6uvh4-XeAqY8*U?SSK7$i zRW|ZA!baY%wvo4yw(>U0%hZ&&(KPZl#{J0KHFkM=p*}ZYBX8H)$lLWc@^*ubyxnLc zZ#UV<+s!uec8iU?-D)Fmx7oyU$kM zQoG+)-cozOR^A@;GPUWt|EHC=hiK((9Id>Kr;EcYY758KM`Z0?Wz&T+r;JD1y) z-$!WV_fhvFzmM6-?>rm%ecVQVpRkeNCvD_+zK#4oWv|dz(H7Xq@6$H&`;3kJK5HYt zxW&_;e2*~;%?FH=)~m(a-XbM8lem)hm&h5FGp^1IANeqXSW-{m&)yTV3(SK7$$ zDjWG-Z6m*HY~=Sv8~J_7Mt)zmmETvqOilTHmBxJen)@+dzHTeOZ*YI*_f1;)eT!Cp z-=>w{cj)Q*ya28IruLrumEY9fx0T-yxWDqdmR5d0q?O-~3QzKKLi%pHRkX)z$X1I! z-#@1IskbAqpV`Xm=U%3U`SuIX3{?SAF;8*W!#zon7a_3lSr zzq66o?``Du2OD|)(MDc>vXR%HZRGVAdxgG=_N$G&{$?YuzuU;`A2#y(r;WV+Wh1YD z+sf-dUZ$qJ{!1gTS#|A%yq4MJ>4o|$Y8!do&_-S>*~n{U8+qNxMqW3zk=H6V@>n1kxTHQuoYuK1?YucD^YuU(aZ5w&5V=J$9y-ZDc-IP{dH=~u;&FLBGh5Cqu z`=^Juw2{}XY~*!oTY25a%hZ(DZE58-we8%myr#Cjt-S8Q{gu}p3xDEqFz$aoucUnK z#Ql}8`n2-ZfL6X5(#qG)wDPqJt$giDD_^_O%Gd6+^0f!8d^Mt1>i(Zzq5FSY`P!RS zzV@M&uYGCdYd>1~+Miaw4xp8<18L>!AX@oqOe{Ra;pXbUt+4?+3YNy%wJXdEMpXWN=#^<@Z*!VnGR~w(_>Sp8fTxZz$ zJXd!cpXcgf+>9`^|keRjSStzZPm#O`FhpKAvCT-E^yV)~u0t-haUF7*y;5IA8)mCcrgpiF z>yRsKT!)lwT!##|aUF7{tvZ?7RW`0eM%b#8S9_V7>f}fo*CC_azcB5n=L2?mc#Mtf zkZWvQhg@spI^;SV*CE&2xDL6&#&yVzHm*Z%vT+@9vyJPJTWnm1+-l=G)D^Jt-dgW<4jXce8 zKk_uwMxJKb$kW3%@-*8`frj*UD$Ya>tPHuAL4MxGYg$kSpQd0Jv4PtV!N(^4CG zdfrB!mf6VD3pVn!+(w>O*veCCD{bW|wNM6^`FPd+$j56o^6|Qje7s>RA8&e@8uIa$`;m{gZRFz}TlsjGuU9_aqmhsI-H&{H zU?U%EZRF!a8~OOiMm|2aSL&;1pV-L9r#ABOnT>pWZX+LG*vQ8^8~OOsMn1l>m5;Bz zOilUthDJWVbwBd4-Y!os)JMc@3kDqMh<7XTB_{By(ezlR0-)!XL zcN_Wm!$v;-w2_a$Y~PYe$MsxQ_bXr3xE=Z0#Qn%ubsPDrVJlxX`FiB5 zmiv*f+BWi4$5y`T^7YEsrZn=knfsBi&28js3mf^`(nh|vvRCP=Xj|JWlXR(XBVXIv z$k%o@^0mEU)`*^3{Mwz8bn8`P$hoPcPK-0UP<+)keN{ zvyrdeZRBeY8~JKvBVT*k$k$#r^0l{(eC=Z+U;Em~*M2tgwZDyg9bjYrInc)ZbCB(P z6|J$Ye5H1P{C2dF-&1Ym zx08+ho@OJzoo(dzbQ}5YVk5s@ZRNL{m#HbgXVA!RclRT|J?!%ILjAh|8~N>JBfq_E zsJ8}n_++mYAdw(@$Vm#JaCy~_Q{>j-XFUazK+*OBf=UPsx;>u6hf9mCfv zuh-DX>$UDjUazx}*XwQM^#&Vxz0qE!ucF;#Bd<5x$m=aO@_MU{yxwLbueaOC>m4@o zdZ&%N-eoJVcYB$d@_G-Ayx!}6*GFvR^-)`Seay?$l-GH*@|xP??pI#(c{O$7dFPYfuK9I7U$4AAMJulh zXj~sZ?SAF;8E#izpQV-8avFJE=zipNk&V1Aww2ci=Ya_28+Q{oiHuCzh zt-OBXWopXnr?m3=8Lhm2PAjjeec^uPHMMoN^7(7O+@N(S${CiC0>sRiteEmi%U%%7J z*B`X<^(U=-{Y5KZf78m>KeY1oFYSEQ)K_Y&R5EDgYXe&O+K^VhD$&YUWm@^#h*rKf zrj@TMwDMJzR=%py%GV~e@>QKyzG~3rdV93;Rf|@>YSYSB9a{OSODkWS(#qFnwDPq% zt$b}kD_>jE%GXx3^0hUsd~HK3U)$2k*LJk>wLPtT?LaGEJJPfC_m=d`EZd1zzUtG; zR|8u4YDghl2pjW8a~tzU z3mfxBOB?gXkv8UyRyO91*0#oV8!y#v*q~xLq+*fn>+_{{f0_Q!c}xGl<+k2m^)R)3 zKW#s%_&#~Rwzn_b&P$!=2mO6glaDF3AMGWsPmZ-?9Aq7AT%R0g$GFIjw^!?{XeZb) zPO=kiT%Vj|$GFK(wsC!OiXG!9>uBTppNTjMOX&bG$c>0YL$`q_oX zIP2GJ~IU-fekt@@eT1@2e< zOfCPp`gy5c=L->0Ayo=~rX>a{^2HdZom;YX=Tl)|FpZzbTwf|*>FY!{p-!N|1 z?{|6O!CtDqr*?&{`kq?;d*XNu_x4%(?+bXDntt9@G>*pz_v3h6ZR_WaJC zrS{73^Y)7HGJARW1^b2Ya(h{Lh5dYZrM)z~%6=}q+Sa_9+8SH)>Wf~chI#cR_hVjt z*)C5z>Jr7qy!xt*dG$3L^Xlt1=G8ZB%&TwOm{;GjF|WRDV_tp7#=QEjjd}Gw8}sV> zw&v9jyi`lqZ-XkB{;c@#>Yno7pen8P{wIe&v`-3uWS+GY$U)t@%U)e{6zqZ?jzp>kfzqQ+h*W0bb-`TCg-`juc`GS|}zTgi% zU#M{2$^VYdPwxM%qT7G|Z2ubm#r`GytNnBMH~XjX@Ai-3KkOgEf7;)N|FXXe|81`i z|6_j}{@4B{oYm6$*WohztMCT)m*EZVb>T|(7vakG=X$>2WonNtq?O;DY2|kpTKV0TR(^M*mEYZI<#!KS`E5iizkAZk z?_RX>yEm=;?nD2se|JDDzx&b3@BXy%djPHc9!M*{2hqxJV_Nw=m{xw9(8}*2^m_ff z16uh#j8=Y|(#r4QwDQ}GR(_A5mEY#H^4o$|ep}MY?~%0f)rwZWTGPr`8(R5lODkXP z=(Q^8wDQ%SR=$p=m9JxHP9a}+ z=6~}>c9Q#hg-^D7gio>02zRu*hEKIm4|lRp3!i46s{c-*m#M`(o}KRgn8&j&cFf~h zS3BnMteYM4cy@*z^LW2oIJYKR@2T~hus}5Z0WooJeSJA2iBWTrut7*)$Bi;XD`bK)5V6P32wlU9+u~i4I z@iH~lfoo~Zv)8#F^X&Du>c9=$Uv=O{`hVwH<^5)E?^BlDLM#8b(!I;F+i2B;+v%QV z*&Vd%!<}^Zvg|He_2O>2TUmAwt@?2<-K8wMk5)aopYB|iJwU6zJVd$yu^=AUD`ZJMM{h36o{!FG-f2Po?KT~PdpJ}w}&vaV#X9lhMGm}>RnMJGq zJWQ+p%%)X;=FqA?b7|F|M`+cbM`_ic$7t1`d9>=!J;G_Csc46XX}EUo%ePOsN3GOhZvh*tesOsoDZp;dpLqg8*F(yBkt)2ctqXw{z= zXw{$PwCc|aTJ>iot@^WyR{dE`tNyH^*Xnrzt@`s4t@`sat@`r{t@`sSt@`sCt@`si zt@`r@t@?9Q;WvwA`&8?SW&Zzf(fdyAE$^rIo!Z;B-uH!7^PW=vC+59(y&d!3dp731 z_ifC3AJ~}p*4mi&KD06KePmp?^7G|-e>lDeHHC<8}r^5Hs-x`Hs-xA zZOnUL*_ijfwlVL0V{6=f>t$*hck5}~zkcU_9RKfa9seJ=zmETph2QZK^WIN3=Dqyq zVxIlQ+x7E)EnY9>Kkqjh=lgf}>*uBRhpnIYC->LS`>XKJUan1h>vIV<=Gpw`;{E;W z?f!YS<2C+yWi;O32JXlE+t6OC&m(Yu{k+PB|MB<7{qsid$Nh7DT=D*@c)NaHRenGH zylS+5-X^quUUgbOuLiB3SF`ZOUgA2UmaQD;<3_7_)A*^w*WH{<>~ zzMIoJzFW|GzgyCJzgy9IzgyF5^WQ4}V;lG5d~R#&{cgwo^?tXf^?rAt^?r9OT-!_4 zt<>t-s#~e;WUFq~FZNIW|CK>$hl*vzbH_`{Qg5k&_rEyY(7q_Vvppodi+y2uS9@@H zH~WI{?)ISY9`^a+M)rB(J?(+vz3g+td)w!P_pt|r_qF?n_p|$j_qWduA7J+lA86~} zp&#UBdhV!yhu*ltc_;rJ`oZqUze8_gH_H7G3UA!+oTTKVlsE5E&H<+nGj{GLfGzkO)s_bgiZ?Mo}aXVc1WKU(?iPb*VVN0HIi1oM$yXG zXj=IiLn~j`(8|}fwDNTwt$bZiD_=Lz%GZsw{=XnM(aP7&wDNTet$f`|D_`9T-&QO~ zrEFC!E9T8>|I2adcJDVv&k-v2FXg+9`fq!7r~9uC-(_DFzS|xizQ?{Ie6Kw$e4l-( zo+EgfTFm3w1MZJ`JbTcNc|03y$2^`rWXC+7jk9AO&&Jy^k7pC?n8&k;cFg11Bs=Eu zY_c8mcs9k3c|4nHs}7_#%~l;qZMv;GklGAebs)8ww(3A?vuxFY)E>4~2WESj8vc7S zbKI{wklI`u^Y|k+=J7{u%;S&Qn8)YYn8zQtF^@lCV;+Cf#ymdX#ytL%jd^^5jd}cO z8}s-xHs;3Zmh;AQ%M=UL_bRc^ngEPIVs z{$Hm@mt}9zst0eyY!W1*?Y9=$NO}tEc<{~Jy}a%UY31G ztG;|hUsjfVOsn2}LJuv=KBZNEKBHBCKBrZGzMxfq*3qgzU(%{SU(u>RU(>2T-_WW* z-_oi->uJ@W?`YMZ?`hSaA86H|A8FN}pJ>&epJ~;fUue~zUuo5!-)PmJ-)YsKKWNpT zKWWvUzi8E;ziHKd!{B>d(fs z>Q5C~^`|PW`csWo{n><8{i#l?{?wpVe`?aIKecGppW3wQPaRtIr!KAfvnj3mvl*@W zvpKE$vjwf|kS%G|pRH)spRH-tpKWN>pI3_Ww{6ki0jJNaHLK|l9k*~jk;aW)^SU1CtJrY|Nm*?{x@xJ;O&?<8`_vRceXKa?qXx!+||asxtoo7 zb9Wo_<{mcY%|j^YyVb-TX?D8uQj*p_iIzQsh7%CYHe-hDz$bt=Jlg& z%vR<7^$D4x#`!tZ{rY`V>tpNpJ&XJ6_w8G_tCy;KXBY17`li;P zVv+tVonJ+(E%h9sV!u+pp@p6!6tBzOJjTfY_a6~H$8Hur*FHQv&~6$&&ps@CzI|wT zkbOw_0=r3guzhg&Lc4Kzh<#A_BKyGb#r6T=OYHr_L+$8hfYk zwRSx{NAObZ6^|FSG>%U6-*2SWN6#0E{qn9e^?ZTu9pCpx_xB3lWcLi;Z1)J?Vs{VU zYM&9l&F&Vy-R>H`!|oEk(>^_Xm)$vhw|!dp9=lWcUi;MWeRjw2{q`yPJc5_0ogCXA zbpJ`=vG$4KhwKx=|?RMDGNJpZ}ozt@#!d;c-vIreCMPNCxal=9b&(&rS4 z@0)w%fBR>Ty8r6%WA=#fJo~Ef!b<`J;8= z68Zyc!X@QZ;ZpL-aB2C0a2a`dxU76%SpQzxvhWV_(r|hC?(mNCo#6`d?ctr|Tf-IQ zo5Pjl#bNzSvKw;QE_zILU7WA1=N5){mFI_dljntZm#+!$AK!syrpUuRJMSO`Z_mPcF)3)%BQaT%50==f;L>%45Q{|-@~Wr@YnF^I{Z02 zM2A0y&(Pu4@K7Co7apd=Z^FZM_*M8!9exo$ONXC@&(`55;d6BOQFw$7w}j8t;rrq9 zbog#~qz>N>pRdC=!=rTgdiVkzz8W5_!hMJ(G@i<`7XlaS@VW399X=DjM2Am> z$LjEj@TEF@EIdw!kAyGN;U-#u9#d^d2U#{=hwH)-Gjw=o_zE4~9-gVgTfM;N0_UH1a^8ZJ+`&vENe|i4@RQ_Lb`}qZW$p4Sp?)-ms z{)e#Li}aBHp>22mUp4F}&@{#QRd zyhMjX!}&{PNcbKdo)*quT7$y(>Tp0fe<_|4zF&tYh4Ytg|L_VO9v{wM>c@s3)Zx+L zl{!2!{E!Y053kZ;-|%W3_71PnVbAbd9d-||(_z=}dL4ESZ_r`K@J1cB4{y?8+wj9W zY#n|?hpobo>aa!lF&#D!Kd!^3;U{$1IQ*mz8-}0KVT16~I; z8-8AgwZkvyux5C(4y)%QIG(H)hcD@{YWQUxRtdkN!@a_<>Tr+nYdYL5{JIV+hu_fQ z&fzz8STX#T4l9J;)?xYZJ31^EepiQO!td#@RQP=zmI!}PILyUU%Cw6{Op{MMEUjJZ+`yD zoR3iX_1+(TetOPFuKarMFF!ve=ObEvz4wowpOEvBF2COU*UyjRx9=Cg*j$!#kIDJS zm|yRe^z);0K4RwAd!_yS$efR)`So5|KYvcnN7(#&ZwEg=Jm({Ce!aJ&pC6L*5jnr! z+sV%l%K1p0U+-1&^QYu|1kbPccJcH5b3U@?*L%DA`D1fF;^)_UyZiYgbG{_VulM%! z^L=x^gvhV=_V)8VbH3!rulM%x^IdbkM9HuB_Vx1}bH1d>ulM%z^KDuGyIbe78tzs( zUoz#_d$s(0^PDfS^6R}ie!g+emt^_%-u`~RLC%+O`Ssp`e!gDLmwfs4-a&r8ZqAp8 z`Ssqxe!gbTmz3mvE<41}SIhYlG{4?!*6?49X&ad}c`uXxXUvlTydx!b?GC5zO=hu5}{CtU=FX_4Sb6Gn-zpdC7Kz_Z~ z!O#C$Y|9|O-s|M&e=WAfkYDe0@$)|x+mgty_qzG{?}}|<lMac+hZ+d|5(_fGKh3_V0p6hqR9ZicFp+zeqSyBXSuxTsB8cB*?ru`RItdT*eg zUsG($EWh3x?B`b&+hWVF_fGfo%ZqKv<=1;>`1xhUw(#=nys-hu-KMme!X|TpPyH3i!;C8yTH%SF196_ zU+-P$=dUcbg_>XQUF_$l7u#~pulFwT^HYj#(dO5Cm-_h$Ea}`uS$3IwTz({o>tihI zem_5^*p_jAy*JU%k1n>woL}!v_VXi)ZAs_XdsF@VImNcH^Xt87etvkdE${q#Z-$>A zQf!Mnzuuea=LZ$rQqQmVX8HM3@*`RN?>{NauJ-f&i*4EG*L!pP{ISKh`19+%xqkl0 zVlN5g*L&Cc`M$+oLddW87Wny|#a?pAulE-D`L6kqHU9lOXW8|BzGMC+jX2*v%Wm}Z zZSx~>oNt|FH~INi#a=SWulH{8^UaIB#FAg{-R9>T7kf!2zuvpU&o?Oc5>9@-cbA{9 zSL`L9{CaPRpRZf&C8GR#?;by2v)D^Y^#3fo*Uwig_7YTny?4K#uTtzKtNeOzg`eM} z*h^gb_1=SizH+gb#PaLChx~lSVlScP*L$n|eEDK8x#ib;YyEtg{E{txf2{Y``}q=C zw!zJx+*0_{RSSzG%V&nak?AR^zLke1@$!Q_H2l4MMtG}C z&q?)ze0mc9)8x~_Kgol`Kg;x_RKLgrllalf1Nf2aG1aN*fM1QCr)Q=5Lq0i)U#(0J zOZAt0ViK2z+&}z}d_s7ee0=y{`M7X?|A?NKDo4I#$0V`6Bp)3vDIXOsCDRjAm6nf4 zV*5%yoLfeZsrnIn?mpaN?p|z*xqEO+y1Nmp?k?QI?oMpGxjST8C3ibw+ubJ1c5xrZ zww}9XmhI|3l(yh*mSwxUo3L%@Zj@zvx(`V|2-=UHKRCR%d{DTGd|-GVdH--#xlVXr zxmLKETqDc&(_^arXq)bR`6KRq*j9D#&HqXFp0s86?))pbcV*kwy-WHp=3iaUr|!ey z0rF09{y=%ha6Ng4@Ii9faDBOS_+YtYxPh!|etQ+=AISfPdOj6v_C{{{L1QHU|FPgh~P4`eY{iB7OHGfMt{iKzf*L)9i(_dP<*%N5vrr)%6^8;w-rvJ2e z(|{`cG#!{ilnY{?pY>|LNwY|8#fLe|os-KRw;_pI&bIPj5HF1{Z9PXz79O0(_9O!%hDg z>Zbn;bJKr@yXik?y6Hb>x#>S=yXimYxamJ5-1MJw-F>s{JU9Jlq`P;Po$sbUjdJ(Q zvJ2ewtI_W6S$3hD{&kVNYnEN?rk{;*vj)7xO@ABf?wDnly6Jc0-0j&TaMS;a+-=z- zaMKSbxLar0L^u6$lDk!wO?J~Sr?^{W*;F_E^Ky6dESu)0pH6o-&9WJ8`s)?$##uJg zO~1X;-7w2$x#_=Gxf`%Y;HDqXcGu6cId1y%HST&@HrGwRp65Ow%dU0PzvsK_X4wKa z{d}Rjc9t!2)8DUi*JO{tO~1dvU7bAwH~oLHyBd20ZsviT-Bq*f7B}<3t?nvWcAJ}d z;db|4S$2n;`Qc9Y9$9vmn|b1H_ikCX#LaxM)Ll8t?r}43EOYP79)X+r<34xAEW6*$ zJhI$fAYm(Q{X-OMX1-Q}|EAvg2ODtDPITkU3^S>rC1WozBcH|yLSuXi)= zY;ZH@98&Pc!sF#Urz*BCo3;Hp{pTkAxhumD%QM4|$XA3Pm1l$>lc$Ftm#2lFkkh{e zhfm5=7iV=DS$su%SoR>Y)f+N>G1Z$geKFNrGJP@C+cJGI)jKkMG1a>= zeKFO0GJP@C`!ap;13jjqFK%(u2S0Sv_dasd=RS7R*FJI6$3At_w?1>zr&4{c=jlsd z_<8zJsxS3CeJ9mdGJPi1*D`%2)i*MIB-OVveIwO(GJPV|_cFPVYO72Rr20YTzEAa| z%zd8fCz<;?)z32bajIWr?%Pzq%G{^@^q7kK?{_!%-yd%7zdzmFe}B2T|NeG!|NZ0U z{@do}{`=Rh`_KLzb^qnu+gN92&CUI{yPNxO4>$MU zo^I~Hz1)4XY;QOBV-5-IaX-H}`W*cW3qm z+@07LaCc;1z|H+$*WI4|05|vl0dD5f1Ks37JvVddL2mM)zMHx9U^jWuz|CBGh@1Rq z=w>c$NZ z$*Z>Razw71{A%xJF6rPV&pNtGu`l4}xU-vV>Eb5uy1I#*-wOHDt?>LyJf|wQUw;Vy z9}Cr8f388ehkS6jr(8eWOFk&vTdo)GBOe&6`@M-d&;nU?m!b9ZW!)M69g@?+&hKI?&gon#N zhtHIM3ZEta7(QG6A$*R!mHh%erlK#UI#rjg@UKyi}$yr5Y#Gmr`9OlgmYVOhpc- z8n5Tc-Bc4~ayHdOnOsdZNhU{AO_s^cR8wSfGSyW1kR+B2a)a!W`GD{&dH?WLa^3LNa-HyOxpsJtTq}HyTr)gZt`VLmS7*OKkE!-c2ZiVH zg2MUvdaM@b7s~sF7s*w_*U9^Yua~QYZ;W(E8O&h2i)|72i^38m2UdMLvH%PDmVRLwVT&3*0|{pYu&tlvCd7uSnuZb ziw$o2$3{1=Uu<&IPabyj`o$w|`pctkUcY$EO}}~E&FdFWxamJny0cvNl>6T-d)iGu zdd5vZde%)pdd^KhdfrVxdcjRU+U%wuz38SNz2v4Jz3iqRz2c@Hz3QePz2>GLz3!$T zz2T-Gz3DzR%ieO+kKT6EkKS?9kKT3DkKS|BkKT9Fk3MkIkG8n!M<2TBM<2QAM<2WC zN1wRqN1wXsN1wUrN1watM_;(xW!abRw!BZkO@I2@-I}+dy6IQnx?Ax+0XO~Ydv^;) z0yq8a2X}Ku3OD`jCwEgu5;y(s7k6Vu8aMs#H+MruA~*f;4>$erPdEMWFE{=0Z#Vt$ zA2=G8 zf92iuza8E5zY1>p-%f7&Uqv_luacYox3io6w~L$pSJ_Sf+tp3~+s#e?+ucq7+rv%& z+tW?|yR-27*sJjT>cTH$^Y&4_D9?7=q2s;v=h}v=$Zf*=$b2>+RaN=0@V;`Za5cGQ zct5#CxVn63xQ5(3TvKiqt|d1O*Or@v>&T76b>&9k{pE&y)0sH`NeyIMCmkZ&I;o*dUrNq{z2OHh=gH7)0S@y7-{C~tv{y*v_{~vRc|Bt)L z|0mq!|C4U=|0y^5|FoO@f5uJzKkFv{pL3J{&%4S07u@9kW;gl&qMQ6bui%#ok9QWb zzu26(5mSzasA&epRj-eofvd{JLBv{D!=D_)U4Q@LTep;kV^I!tcnthu@WV z3%@7t8h&4{9R5JwCA>x6neQObV=9}|KhpCyr++Nloc@VybNZ*U&FP=XHm83s+noM| zY;*dTGP#`UE16tQ^|eecr}{=Fms5Q!lgp{TlgZ^&-^=83s;x4)oazUeTu$|)OfIMT zNhX(5{VbEqseX~kzhrVb)!#C?oa!H$ zTu!x3CYMwFE0fFrRcVgMmh# zWO8{2J*Mi%eu29Wmz=v7`vvYET%zu7>=(GZuwUTr#3k?Uz%Ai!m;Ob#b@Y6j@UHS< z;oanx;oap!!+Xfh!h6b1!h6Y${&P|Oi2SLd=TnhC`?$%Us&4XUUpM(v%}xI7=O%xu zyUCv#Zt|z5oBXNeCVy(X$)7rI@~5tw{Mp}4{v6;Ye-3n$KlR+qr3bmmqxx><(u3XP zQv)}1=^<|Ns-c^?w2_@+n!1@wo4Lui=5FTFL*3+E3paCVOE>w~%1!*gl^y1U80 z9&YllrpD%Ee@1xy~`Fw$!yuZlZkdd(9iwlnz6|(>H7}a>u zBQ*p*f1p42UHB6D+wfTVoA9Ob*Wq#USK-U#FT+Lh7vb^p=iv$RXW@zRr{PKRC*jHR z$KfgRN8zdRhvCcRExaXJkEzIoRMYjm%|$b0av{|fvdu*^WpW|am9ou6vt)81)m1XN zkm_ogTu3!rwz+7IOfIClMz*Xo-amvld?dJYl;9De0yp>H zdN=pq1~>QLMmP80CifQh3*6j)kGQ%29(8m7J?7^Ad)&?a_k^4K?@2fJ-&1bxzo*^Y zf6utN|DJVo|2^mC{(Iid{r7^K`){+G`|m|J_uory?!TAa^rcta+>fui=}WJrbS8neAuibTMd2a6iZ{6Jg-?_Q}zjt&0 zZ*^B^zp&sR3Xk^|?)_r>Mg4Lq5waik=lX_!lKX^zmV1YPk$Z)Im3xMNlY4}Jm%E4m zkh_Kdl)HxilDmZemOF?4kvoOA$sNQ0${oV_@4IZzcNUi6`Bb)FR6@_&eo;x;_KQl% zwqI0QX1$fFjBNWwWo6qhDkrmkO0|RhZxZ_k^0x4f@;~7U^55Z|d54Bs=BhxMf=O- za;gJln~M&V`=qms6mqZdL2{39eYsosV7W`Uf!ry4h}z zO=X*dn#nc?HJ5D;I#jkfsD*5EP)oTk{}if~TqlW`xuZu0hGH+eh8P2OJOCU3{O$=gfaF|O&(8l_vWL(Zt{7uyC)wdc9Yjr-QD?Ift&oE=I+Wzncd|140mTf3hgG}XSzG` zQEE4NKg->o?;~)N|5v-o|JiQxe~z2{zs61e&vldk^W5bBwQll%zMK4C;I7O22;AiV zA~*Seotyl>-cA1B;3ofXbXRA;xZuTw$61B!-<-=I6y^W()a}smP5N`Egm0El4&Nf5 z6uwnHF?^fcKYY7x{2R|;`9Q=fAbMTWgx%`wK zQ`sE+w4S#)_!-&e;Adrds8?w#8Z^||Yza`ro{I+a!@H?{2!SBj82fruV9Q?lAfq%AFQT~DZ zeT$w?#r^)FoBRDEH~0I;ZtnL_+}!VwK1*KY3j zZ`|DP-@3WqzjJfHfA8jg-|A)#{K3ur|D&5Z@FzEU@Uxpa@E14v@T;3S@HaPk@w=Nj z@DDfn@u!XBdfwkP^Q*AeMU>!GkRoBfNxWBs_OJjFemde_PY*Yf2Zx)> z1H*^PF^3hkkYnyDYAMH@Rn$t3xvJXeN>5HjO zm+6bChRF2ARAP(rwnCdKoo$z$|t?&%_jqnxnYvGylE8#2Um%_8;v^eJQDtTs{zgnIVo-I!c&ylBwuaPH* z=gJeq^W^d2YvmE)`SRJ}1@f8Uh4QfQBKeH)b@J)q>*c}W8{~oE8)fFAREuTipj0=> z%sr`YmYH)>-6AvBq`FmRj!AW!%-oXdc9}V)P(}Gi`tO~3KGivVAAy^Ge7E~7zK_67 ze_rYy&i4_x>DSBLL-{@e_ZjR9xQFn41aA8Ia`$O`AAx%?-$&pc#P<=n>Gvz$1Nc4y zH~oK=n|0Z0H}k+6H|w&sZsvn^Zq{Y%-OLLc+^owsx|ts~xmlMz>}H;L#Lc?wQ8)9& zV{X=EkGq*So^Z1+d(zGP@szu7E_>R|Jo1dYcP@L@&3y8lyJs$Y-p#!7f}0_5v%6a^ zd(quBm%Zd>o_X2bIhVcS?v%@3b$85Vueq6bUU#?8WpB8df8KPr&1G-7nTOtXx6Wnn zxS5aMb+^i8@41h?s>uR@%+HS;oC{E-UM1{w(Ka{@lUM{8`@3{JEo> z`LlwX`Ew^X^Jhgj^JgV@S^kbgH}mH%ZsyO*?ozpIS9eMNjzc%|=k9Li&pq7CpL@EQ zKlgGof9~yO{;c9={@lmS{8`n_{JF22`Lmjv`Ex%v^JjH8^Jfh=^Jh&r^JgtL^Ji^0 z^Jg75^JiT*^XL9<=FbD%-?7AUGmqADf5Vc@&3szl{S`|%H}h%(_ZKYr+@JH+2ky^U zBD$Gp8@oSYN$F<3ZR-ArC8(Qux4C-@OIA1YZwvSPEOFh;!>!!!vLtphAGdbD%@W$p zyxi9PCQEKN^K*Om>nzdT%+np+ud<|fGhcUhzsweZn|Zsd`$e`4+|1wI-B+^3;AS50 z>Ar$32{-e3Z})VzFx<@RechL{<>8*n*1UTPBdVKu{z&&EMp`%X{n73TjKFT@{bSul zjLdH4|Kr`~Fk-t|5A=7R#YpaEeQ=U{I3v89^};Fcp^W_QGx&Ugdk9MeH|vQ(?$cOO zxLIGE<{rcn#LaqRhFnNhq+mg40p3Hd#0Q9$ysjpWzTlAUOC6jzU&A$ z>z8xg?8~0#W<4{~&A#mUZq_%W-0aI<;AXuu+ReV~9fkgTVd42zv|ZKbwBrAYTu611 z{v5fG>SCE(NHs<#7gAj!lMAWF%H%?-OJ#B))i{}4NOhS^E~F}w$%RzoWpW|a1esh& zHBlxPQcaS{g;bMeav{|enOvBv$5fknAA$RMmfr4Xc^`rMX_o5lCwU)%`*D``?nika zf%{>$0^A$f4{)#NeFW~cypO=Wn)ea7AL4xk?gx1vfqMn-BXHl(`v~0krXK|FC(!fv zgcrz5!VBfQ!i(fP!q>^Sg|C-y3Ev>!#QO;JnCiwXTkO7`KjL1*_Yt@k@PE>ME#F7r zp3A?2dk)`6;J!Nj7xS;K=PwK2DPJ1COTHw0w|sGUiF{#rseD2B9{K$6GWk5dk3f&9 z$mLY`>3MQF)%`NLoNBpDE~i={lgp_dkjdp#56a|ns+IE4BtB;#lgp`A$>egX)iSx9 zYK=@Tr&=qM%c<7M z{ygC(f1Y%cKTo;IpQqjA&ogfF=UF%T^PHRfdEQO_yx=B(HoM867u_!qS#I*^W%qML zo|}An)%^^S=_apUcRxksy2-CM-A@qNZu0DH_hUr9n|yoM{Rl0?P2Rol-o#d{oBZ42 z-oVzan>_r;y^gJ3H~IL9dktH|Zu0UoH%;(!H~IO6dnH@XZu0ai_XBKIyUEvY+{@Y8 zc9XZ?x$k2u+)e&&buVKp-Ax|<=w8ZJyqkRf*?l)#`EK(1SNEN~6yPSme|O)`O9^iB z{7?6-ycFRk-~V>s%u5+=@_w6pF)xL<$$$Sl8L}IA>kTYZEpJ0ZtgL>H0P#Y?cu(Nm-5{7uf5!(*%xrr&#Jgbu`l4Jzg2aQWM9Bd zzpLgxmwf>@{ja*4{#V0I|EuYy|J8ES|7yGGe|6l$*cWgQWnaKe|2x1<|2xo4|EuSw z{~hF}|J8TX{|-hDlvFK{p7^9Ak&e7?YaEuSxN&*k$4?m2wEz(hTp7OMCFL`RXw>&xAN1hn&E05>%1$s<%8J{n3U&`kT+?VkA0{6vy zzQBDUpD%D9x=kxgj_j!E2z&#@V{f^i3yk3^-1o`Z6f0@_IQk^KD89qto^|Dka z%frH_$h=;b>QwoR@Bo?D%Tf)LPY(~0dA%&vV0m!(G?~}SQk^aj3=fe9bogw!WcVDJ{o7O{^O_Ob1G+nlJ z(F~cskm?HA)XsW$LEg>L%6Z1=icHpfkWxW>IEm(6w4 zFXp*d<+5wt^pE-OmAPzzn|`v;{Xi~TP-xNqkD z0&e=E%4b?(``U%*Yj-Qd28_Y1h`znk1w@_qp~{rD006}(@-O@DsOJ)QRp zxarqVxG(4Z0&e>EQ|>9eU%*X2f5ttD_Y1fu@_qsL1l}*;rr*EdF5>+HZuk;AXyf*FBQ= z3%Hp#-glqN`vu(0A6wkaA0N7zKR$9Ze|+p_{`kbr{PC%q`QtM;^T+3I=8rGj%pYI6 znLoaAGk<*T9?bg%+{_=}x|u(|b2EQ@?`Hnk>aNK91>DRdKe{XMegQZ0$FW@f0`vu(0JKNkD?-y`u{wbfDACJsG zIXCl92{-dkNjLLPDL3;^X*csv88`D!SvT`fIXCmq4sPb3@^0pz9o@`772M1}JGq&E zD!Q3}D!G||c6KxW?BZtrsqAL{+11VbvzvQ3`vUG^>r{d?|!OhRVqnn?9CpSO;&hF{FkHF2( zzpI;{e>XQj|L$&n{yp6M{Cm3j`S)`3^Y88E=ikT8&%dv`h}TQp{QM7hvtM|Go8QNg zZuSe0a`XE++Rc99F>Zce$GX`sJkHJU?|3)+g(tZAefD>=UwERM-|tCo_6tvT^ZP!< z&3<9Kf=?|x-kE;)+aHS#RcU5pBTk_oNAz+XC6*9NM`;`HCSfeO?8^ge4FZY znRzzV5SjV)3_Ye|UK{FWJ{#s{9vkjv{yNjmymgkF`RZ&p^VB(R=BE*E=A~5U>Urj+ zROiXeOQ}Z6%uA`xmzkGRjgpy{Qe7Z3FQpnSGcTpOP-b3Ab&<@xlDcgR{ESY&F z)m5_X*IX?#ucVqS+kVX)nRzAEHL~s3%$1o}Qq7Z@SFY7#D(j2$^}O}P1+w+Ug|hX< zMY8q9>*VSDQ>g1@>x(zY))#M-tuHQ?tuNjrTVK3cw!V0aY<=-o+4|yb@_7C!)a`N+ zk)zy6MgH8W=Tos>zspS?-R)+*zQj#FEp@YAzsF5pEpxM8zt>HE-REY#e!rVMTkdAP zzQRquJ>X`&{-B$@Tj^%K{*aseTjgfGy4p=1u5q(oUF#+v*ST4*u6L7{8{Di{H@eBs zO>Wk!54*|JN8GGeA9a(jkGWZ|KJF%OpK!BYebP<-KIJBVpLUbK&$!9oXWiuQb8hnY zc{lm{f}8x^>?VI-bd$d?xyj#`-Q@2pZu0k4H~IUToBVy&CV$^_vtE76O&-7P zX1)53n|yxP&3g4cH+lWOoAv4kZt{DJoAv64Zu0yicPaJ@+~oTwZq}=xy2<;`+^knW zca#5LxXJ%7-Q@pQZu0+YH~IgKoBaRQP5yu9CjY;8lmA=Y?7zg)%tY6euAD~8?GX+4(}sB6s{^i7~WT25w0fRAKp*CH(Xs_7Oo-R6Rs&Q4cC&F zglo%phwI39h3m?;IN4vmBhDWn-yS|tzAao&zBPQ1d`q~#d~^6<`KE9Kc`=_m&||6_ z(?Q{R+^BHAp&oCD^Nr=}!%gJt!cFBx;b!u}aC3P<_)vL%xP^RexTQQV+)ADsK1{wQ z+*+O!ZX?eQx0SCBx0A2pmiV9hv4fsZ#r@dP&HdQP&HdQf&HdQL&HdQb&HdQT&HdQj z&HdQJ&HdQZ&HdQR&HdQh&HdQN&HdQd&HdQV&HZ?|oBQzy_XhS0+}xi>x!18@;O2fk z#=VC90yp>Xaqd;@7r42fPjIhfzrfA?eWLpT_6ywH? zf2^DP|57*i|2Q}I|7C9O|MmqJ6&?o`?)~~uKDj`)KDkh~KDkJ? zK6#yNee!zQ`s59=^~oFM#r!`#rsBRxm46=JTsVJ|9=T6a-6C_Jq`Fn+K1p?(%zcvT zcA5Jm)g3bTNvb<#?vqq^$=oNY?v}YvQZ13WPf{(FxldBvBXggmS|)R!q`Ft;K1p?- z%zbjx|J)DD^?WMshZSz_hX>r;4-dMzA6B}#A0BdZKdf?dKdg3hKdfk}W#)+auZtxtR^Tc7w$wm$K>Y<=Pj+4{tnvh|6tWa|@O%ho5p zk*!aBD_fuVPPRVry=;AAtIU1zgC0|DW*@=*Jo^alXW2(^Kg~XZ`$_f@+>f)5;C_^S z1oy-2Be*xFe-ZW*^!)npU-H`U-}374Kk`H2ZSsTRf8`b7tOCdPGq0Dn=lK1X(DSMI z{g-s}`!D6@_g~u0@4t+j-+x&*zyES>e*ZhT`Tdu7^ZVb?&F{a0o8SLVZhrq2-TeM5 zx%vI??B@5si<{qnW%mob@4(IVu$%ii-gn@BmiHaFpW%H6ZmyTT+)uGj;O6?N;(mhn z9k{uks=6QJeFtu?uWIf`_&kD}>#e$b6Q4(LbN$tHZ{YI?Zm!4L?sa?~!Oitq*S&`K z7r41z4{)#I^9XLP-+Jzqd>+Bg^<3Zm0G~&2bA2~(FK1uC&Gp{UeINU11ve@@9$&cj zr~S`;k#32``g4|xO=Qc(rn2Q?Gud*nxoo+3sBF2|LbhCNDO)bKk}VexlPwop%a)66 zWXr|2vgKks*>bVH%zcrngKW9fQMO#_BwH?ZmMxdM$d*f8Wy_^*vgJ~Dncq{Y9adZ8h;pX}q>gM_z=H~hv?k1PcbaOqPb(2fyxw(Evy2+*U-CWP3+~m>)Zm#dqZgS~DH`n_`ZgS~jH`o6dH`o6q zZm$2a?xpM_xViqvxw-x?b94O{xw-zwySe@+xVio(y1D)*xw-x)ySe_SxViqPy1D)@ zcQ0lit>9^e$Kwju{@!KMO~51fMXKrgbC%09WXt6%WXt85vgPuXvgPtD*>d?R*>d@6 znfoHuY}s;oj%>MnjcmC*SGHW9CtEIGD_btlmo1kU$jl?D7Rt;YsTRr18>z07nJ-dZ zFF&5d7J$tBkm^R6c_Gze*>dd=1*>d<6c`5%C>Q>ou_%_*c_;%TH_zu}}_)gh! z_%7LU_-@&9c!_K|yi~RvzDKqkUM5=(-zzUB7yjpZyI;?z;(A-|=6YM<=6ZX;&Gq)6 zo9k_*o9pc%H`m)LH`m*0H`m)5H`m)*H`m)bH`m*GH`m(+H`m)nH`m)HH}mnsZsy}h z++2^3x}Reo!OivgxSRR-2{+g4lWykYr`%k>PrILBAHmJ_{H&Y#_&GP%_w#P%;}_gq z@0;D5xaHhj|1Y_@{$F-;{lDT~$}R8a`hU&M_5Zq?>;DZm*Z-StuK%~(T>o#ox&GgA zbN#>T=K6on&GrAjo9q7r_hSBD(1N!V9uF;C`^Ekq3d`XS_2(>yKawqnKb9?rKankm zKb0+qKa(woKbI|szmP45zmzS9zmhG7zm_eBzmYA6zm+YAzmqM8zn3kCx5}2oKggED zKgyQFKgpKEKg(RVseX|yhkup1E-RHye}YGT-oNYlRQ$aEaP#y2)6LKOFE>B$zuo-2 z|8ev4-sa}#{jXa;@14?rIgk9jb8dd#CEWbHOS<`amvZy-F74*$UB=DNyR4fWD(5DL zc5pw>K7#u>_7U9tek!=hp`G0PzAC!Op-OIke>=O$ph>BMf=F{KH;LO za=cHtXkR(rCtOrb=6!R8D%ww;6|OE{8LlDE4A+#e2-lKlglo&w!*%3o;kxqW;r->Q z;REC;;REH#;d=6<@Ims#aD90~_+WW_xPe@h%MQ^aNBRHceR8Q9>Un#gawD1d$)##6 z+xwK8$h=Q3Ra4pCr`$~DeR8Rq%l1CyLuKA4m#T$4B-~OyJ={v>eR8P|lX;(9s@Af- zPq~fE`{Yu!m3g0BJ3Xf2eR8SV>v`T6m#Txz``}V_lzHDAfpX!*zRG4c=LW96;9A3=|)zE20dFG0_L7d}D$Hr!wSCVZm&b@(LttMJM4 zm*G?7FT$tFpN9v?pM?j?pN0p?pM(d?ABRtqKMJ2Ne;6JjZ~4zf`6KdgsGd*7`;>>d z$;07p-lu$~n|wUW&HI$kc9WOqxOt!Q2sin8uABEMpXVk|N4j~R^7(G^b(EX;DPQ0w zZ%4a%pYnxn^7kS)`FpXO{2k*ae=l*9zhm9x@1<_?cbuF2z06Ji7P-ma@ow^Wf}8xE z=q7(Bxyj$jZt{1EoBW;XCVww?lfTp412=iR&|QJ=HE@&9*SX8{y#{Xb z`UZD7zSqD_elK>H;d>3-B-Q@oxZu0+8H~IgVoBV&= zP5wXO-onUJ@RNnd`GxG?ynQq%$}{HT>?!@Zn2WQg<(P}JXXKcRvuEX)i?iqCn2WRL z<(P}J7i4mJvmR6N`H>ghd|u=wH=hrA+0ExcUUBpJk5}D%-s3elpYM3x&F4AZaFdIv z-qiCpm%b&Fi>cn0Z7zLBCKpq^E8AT9o=h&LdSAA=^aGh(OtnR}x%5MsTuk+mY;)9S@Q?Ct;h*GP!#~TF!@tP8gnyNH=KBuxn2NrT>UTX)A4v6wOzx-p zQzqwA{Uwv@ss5J9@l^lF+UP89HyrgV%c`4cE^3t-+ z%gf3(mzR@mF5f}6xxBn=bNP<4&E*wjo6C2SZ7#1Ulgk(Ce|IYG|DE-GDn389 zi<>;C?B?@hySmAT-Q0YBYP@~4%X{5i}`{!xc$!;4bbDV|ITNFu6!f=1$s<%eViYu=NE;~mluRb z$=8N2kmrU+%X7jP%2$Ukl4pf4mahztk!OZ4k*^4km1l%6m8XZt$dHSIG3K zR5NAzRH`dw`c$e}GJPu5RWf}l)zvb6D%EV6K9y>YOrJ`1jZB|PHCLujrJ5(xr&3)j z)2HU^G1Vb)eu18E5MC%B99|^X4__xA6uw@r7rsG0FnptYKzOmdfA}W3Zun-oPWTqN zcKBAgR`@o#X83lwM)(f7I^S!c$5fT#{9Sr}r|{kKj^QQp4&kM8+3-Db>F_eSWcXe= z7rsxnIr)Cs=H%tF&B-fdo0A`qZBBkrwmEsFY;*EMvdzh>WSf&$%Qh#kk!?<1E8CpB zPPRFDy=-&x2HEE1jWT_rP(}I2d-+}iJ)esFf5d$c-)rC|{~vQN;d>3-z)k)?O}}`<&HnY9Zu-YtZuYO=cGFMZakGE@uABbyo}2ya_ucfH58Uiu zZ*kA!dkx(5pO4%#`CbDz{pS<+48GUEP5=4KJ&k<Mn@y6HbZy3gi&5Z&~jpWSEjJ&11l&#&%b>SmtkMP zU7CFXcPaJ-+$Gr;aF<|Tz@1}Xz@6o?>Tdd94LAL-rknm(%T52Q?WX_Lant|my6J!W zyXk)ixaoffy6J!Q-1NVL-1NWtZu;NBZu(yXH~sGrH~nu@;eKdXcz(X!4mvzn=gGxX zjr8Zp#Z--DaxqmCnOscOR3;ZwHIvE3RLx~_G1Z|mxtOYjOfIHsDU*w-TFK;Ms>5V* zF;#1sTujwQCKpq+mB)wM$>d^tJ*FZTQ+3euMWCssk+GIVydn( zxtOY(OfIJCE|ZI?ddTErs-7~rn5vgdE~e@&lZ&bP$mC+GzVg6uKbc%ST#u>R#rY%j zeB1Dma+~l`a_jKX@?qg)&U_cd2%q-Aer1tHCQI+Qk^D~YpG6`$+1*JWO6Ij88SJQYN%{; z`7qh$^5L@0E!$jvp-e6nn@h=`i}ide@@I^j{JF$U{)}~#KbN}6pK)&T=Q20>Q{*Op#=FU% z32yRdqMQ7gn=V~|kGuuu6%yE-H*SN`_xo+}jo}2u+)=mD*cauL0+~m(fH~F*3P5xZxCV#GX zlRr1O$)6kDRmC42A=VWp* z`FWXKOnyNo7n3*3V!f(qLhu@Je48JR15PnZSKm5LY9{UA){D0nGyG75F!>K-$$>CHV$>eaV zk7aT=)h9AJoa$4V98UF_Ob(~|TqcK8eIb*>slJrS;Z$GARXu{ zPW7Ek4u7x5RQ=-oRz2T0{Da&l{G;4E{FB@({IlFM{EOTp{HxqO{F~e@{JY#W{D<5n z{HNSG{FmG*{I}dO{EyrryiIP;et{lSk;DJh&K!}usd6$oo2rCNuBIv}lcT9h$>e6L z(lR-js*Fr7rYbAj99T}aIdBKr=D_l@&4D}0HV0OaZ4TT?wmGn(Y;#~G+2+8VWt#(c zk!=pFEZZEot88=NZnDjRyUR8Q?jdjGpY2tYe<1Jn((|dvyS?4yT@^QZw~w2=tLi52 z_H~nY)!gLWes1!vx|_VK;U@2Dy2-m*Zt||So4l*z9?$y-+~nQ=I! z-_CCGw~Koq?;~)NzunyAZ+AEO+rv%%_H>iKz1-w)Z#Vhd$4&nBb(6pS+~n`!Zu0jC zH~D*{oBTb>P5vJ3CV!7{ci`_$bd$fwxyj$--4%1$32ySZzq>*%JJC%(pX4r|%T9Ka z*QdD4<+4-VX{Jx>m$ zDw4^eRO4lGDAfd+97;7&CWlf@lF6Y|lVx%!)fAZ=N;Op`hf-ZGlS8Sd$>dO~=`uN# zYKBY>rMf~Uhf>Xy$)PLtnCjFxKTFS_623}4IefKzQh2s}Vt9_+KYWdRLU^uxe0ZLG zT=-h~*zkP$nD7Gm=2@J7k*!@04v0yi2w@@NU`Wz$LQHflFnZ1MiV- z4qPVN9C)v6bKrfl&4KsJHU}=3Z4O)^+Z_0SY;)j)@>c%YUPbu_=Aei4d@AOkRc`WN zwVOF;jhlQ}>t+sG=O!=KyP1PFxXF)=Zswp(Zt~<|H*?S?xZt~}8H~I67oBVm!P5wORCV!rHlRq!G$)C+`^5;c2`SX&S{CU|;{=DKQ ze_nNyKd-sTpV!^w&l_&?=S?^H^Ol?ZdD~6?yyGT+-gT2d@43mJ_ub^r2X69bi<|uU z&`th)?_Zt~}IH*?SzZu00$H*?TeZu04CH*?T8Zu06|H*?T; zZu0AUH*?TdH+lAhn>px5H~IFHn>pxbH+lDqn>pxLH~IIQoBaFTP5%AiCjb6)lYf7? z$-lqd_YbrL_f|f4Rrv2KQ+R$; z;lFP4_SR68r}c%h`g7J7%E{Ijc95+vl$Wh9>?m7bs32Qk*h#j&P*JwNP)WAFu(ND^ zVHer@LS@NJc}}>AJUiS}zB=4Y zzKWNE^_c21UMhBvODA|qS&hmNTF7mnIu5!C@H@R)NyWA$+Lv9`JDIXT@CASLqmRpAV$SuNs zxwoftT#u*YlE|`#N6o zb1&j0LH9ylGITHCB}MmqUUGC_%Ui|V^N48oTq4|k4H562Lkn=vrbW1~riHk#qQ$r` zqXoIg(W2a!(!$(hX>smLXo2oAv`F{Gv{3g&d~c!qLOw#{9?eH=+!tioP3}>AM8|zT zAK`J2RE=Yh>^PuA3<`r<|9h(!}ti3yA>aCa<}9oQ0^9d zM9O_A-$&qX&WPe}#t7qX%827`!U*JU%!uS}#0ceX$cW`8{~vOb|Et{O|7th+zs61e zuXU6E>)ho3dN=vM!A<^ebd&#^+~ohmZu0*TH~IgloBV&wP5wXbCiI6C{6yjL{{Yd@ BL5ctX literal 0 HcmV?d00001 diff --git a/models/schedule_world.3.8-rgs.bdd b/models/schedule_world.3.8-rgs.bdd new file mode 100644 index 0000000000000000000000000000000000000000..a4e3e444d0d0d25ba351a6868a75f94e5c72dc1c GIT binary patch literal 97456 zcmZtP3Ak0``^NFLkA%!KnL`pXha`yxN#=Q~G>?QN$$BEDI_tgPXT59heJW*HwxxdUpkMoy=(niSe0SeCSnz?p z!}(st@Mb>0N4T85TX=JOmvDJ|r|=f`4q^YfvhBiK+1rFG*cHNC+gpaWvCD_IwabOK zvrEF;+gaFuC)vhAwxhSHHspT`xpwmJ>%u$Re}#9k*M@hs{|N79uL|#OuL$p9FAwi& z{~E4nFAMKw{}kTaUK-xV{yw~~y(GM!y||F=?`^8D^b4X?OEX?>>1%|_O$Sk_LM?)l((tgit|VN z*c;(v?AOA_+OLF%y1X9mAK|?ZcPbZNpdCt;1K^EyGvY&BIsQ&BE8%O~Os=tHVw0E5ps~ z%fr{&mxi0$7l&Ke7lvEf=Z9O_=Z0I`jl*s1M&Y)0!*DyhLAbqLFWkXCBizxh6YgZ! z4qs>23SV#640pC`gm17<4Bu#158q@T7w%#o6TaC#D%{ns7QV$kJlxH$8ot%867FtS z4&P=U6z*YH3g2$;A1<}`4c}q!9qwsY4EM742=}&k3-_^i3HP;k3g2n(5bkGh7w&Iw z6CPk!2oJQk4Bus!58rK<3lFkO!uQx&c(A$Azip~}z1dKNHbcC-E_|PN{|XQF?%MGE z-u)vy%)6_?4|sP)c(`|$hadFrui+8iT^4@GyFZ0TdUt8~Veft)9_8I7;YYl?I6T_B zUxy#{?w8>)-u*oMn0G%7kM-`y;m5uEVR)Q(-w!|G-FL&|y}Kwp!Mh8>6TLexJjuIr z!cTg4R`@CJ&InKT?zHgJ-klPD#=CEYpY`q=;pe>jTKIYIz7l@HyDx=b^lpA|)3N+i z{x8|&m%YjVqnn)ntMWhOCco-U{y(+J`F}e9OW5Ssy~+R5HaY*J=6}hX{H8bgH)WIa z|F!&^xXEvOGkVj$f1l#t@{7bfSvJ+bVq z)4qStzcoX`v%NbwoPR3^h39(ru5kXX9}u4J-G1TxQt2CB=-uAo{L<k;SQ^KSQWe(81#f8gD&;rvqX68^}$H-67;cvX#B)r(WSBJm#?v>#s-n~5hop&z{fA8Ik!#{ZU!the> zo*(|vyXS^~@^0hs&)#hmUgq6~;a|MlApEO$>xF;w?iu0b-mMe<-Mh8JE4*7PywbZh z!>hboBfQ$XCx-v)m6*e|q<*@L%4o7XI72hlkgBw`zF3cdLXqc(-!+ zAMYL%-ss&*;eWlme>k5C_YD^c+TS~zPn{LRoB8}6;e4vyExftU?-I_Z=AFV@`1}rK zrf|*waegbG-zJ<-{T0Gn`}~&Syj3V4-qz>Kh4a>;B)q-PtBRXV(0a#?JfQUsv@H30 z$Id*j^$xT+`Fh8$Jg@Z*v_$!O$L>6@^$xU9`Fh8mJg@Z*v|Ram$6h?I^$xUX`Fh7b zJg@Z*v~>D^EXnrc`Hi~&r#BR`1L$=Hwv73D$ALV*w!juMU+*}W=T{ZjlIH6jhw%LJ z0$bR8z2i`xUshnto3D2q#`8-HY?1Tzjw5(}Nr5eOzTR;p&ws7^fBMTpb~OEYfh~K! z-f=9?e_UXTpRacu&-3pWcuSD4cbvfUi*)}_FDPUu(enzt<;d4NPUiVp1>U0M>m8@? z{ImjZY4Y`sQ+fU^-T%{X6tdIk*9yF4%GWzi=lPckyv54bJL>ZMa|Pa#En>djaW>D7De#suU+*}F=SS)OpB`Dr&Z9>Zc*~lv zcU-{p!wS5`&DT3F;`t#3-V*2Q9hdO@paO59^YxC)cz!^Ex7_)9#}z!^x4>KUe7)l; zp6^-UEq%V;aShM+C~yfNU+-wj^W6$uGRW6EuI2eI1uik<>m4n4zH@;~68U;ZE1vII z;1Wi@-qD8V+ZMRwk*{~O>5>me2aSP8kEO5ywU+=h;=j#=?M3t|1 z+{W{D3YoseQ5x*G)3pj-0*k6y$nKzP6u4xTuXps~`RWBOvE}O>eR%$u0+;0S^^QAv zzFL7xc=>uqf1a;e;F4dy-Z7BpD;Ky#n6G!-&GVHCTvE(D+12Qsd$psO~_O9k}R7{FV3>3>96xCDX#x7v+P-(|GdnSeSWj{JkNhz zW{E#v?|6~t-!Jn>AivpsndcXkd4!O!cf88;^U6GO$k#hw=lNM>9#Q1$9dGjdG%ab+ zQ?l%B`mMYri2wgLbl=bOua$XZlCO76=lPe)JYvb$J7)6yb7daMzR< z$6TJDSmu#WzTPpP=f{mBd#{HVMoi~s+TS@s^!k0|rVDqrvT zfaiymdBl~kcYMV2L&`i7%hx+T;rT&%OBw(DyRz&vo*z)=kz2mr@deNKE%S&jU+?&e z=X+{Nk1ox!Z|EL*OC10G?pgLN&vz^H$S_~;_>SkhlzGIMuXp^w^PS5)lFZjTe&qR% zWgcPX>m5JyeA_aQJoELAUwFP{nMb7gddF`(->l3d)qK6nx%p=%*y<;WMUtZ>s zZNA>In&&Sr^N2TJ?^wh0=a+dToUeEM$@7iNJVMUbJO1YRhGiZ(=j$EodA^>OsA;Wt z{6p8tA8E(;w|18O%k#B#sw7T(y`wbDlq@%p>`Hy<-cW zua@7^#y_w1j;(mUYMDp=`Fh9JJYPA#WsZMd>mA$je5EokDbW0%W!v-ozWFVA{PTNf z*^WG4vCK;r^7W3Ld49M2mOuV^t#|Co^E;J!NkqQhu{+OimoF*AKd<$UJ$b%@9=Xt3 z@7RkjU*;tm`Fh7bJgHhwAHx3_Q zpB=7bHwqtUpA|mHZWun;J~Ld|ZV*1it{<*q*9#wN*9}*-&j=r8pB_Hkt`k1OJ}q3$ zu3cyQBfU-k|NF&X%27VPPxxqiukbPUp5bHd-NVP(yM~XqcMey#cMP9kZy!F^EGYtl&Ma!w+h#?@l5Shdy6<<+g8t+>NI=va2*@Z+)lSQi}PpL z>Pb`8wF}{TwtCe1`L_5~`9l4s{RTe1sCZnPGwpn#f77vs_JU3O{{1XFe^9XLSR;Gh zrhWf@ww*s{*mSJ1J!jLte?Q00A5?5QcCI~Z)4qQ{&(0roY&v$nJ!8|pf4{)aACzo5 zcA-6O)4qSd$etR$*q)MQmw21%nzW?^`o+M3f+U1eXE zq<^)2Y4{rZl5i7Sb6%>Z_C-m0ykK7#zSh1V+}zfjn5u<+UXmVP*ym_Ug4hhp5b2h?&03{uHioR&f&iHj#+l6x2ZJy^rN@Uvi|hedRY*? zRhA8;x6ra0y?K`1O>d@`9nld-rTHI0`9GNFwPtWHt@&UGy>}tIkJkJ!l-8QU{j}zb zVYD6yJV0yy7*1==;XzvS$p~5xBp#wQzl@}{Ch;(>`DPTY2H_D}^Ur8n^UtHS=ASXN z=AXxC%|Byl%|DORnt#U8ntz_4HUEsKHUCVYHUCVcHUCVaHUB(GYyNqP*8DS>*8KA{ zt@-B}TJz7dwC11ZXw5&*)0%%?pf&%zNNfIiiPrq{GOhXN6mKquTJz)gwC*W?pfz7ErFD<_Bdz)KCtCNMKhv5|m(jWh{e{;2`YWw_(%)#! zx6A3Hvg~(S^Y03}T9&P(H6O2{56`mIwC3kO=&D(^hSq$&madXzf6|)2|Dr2r+26G0 z^L4cDf!EWT-#5^ev_?Q{zTZgiuQdYN^S|cE|4R4F1zP<;39WnR&1m%pNmEdchD^tt^Q*NdONi!wEB^q z=xx-}(CSZip)05bqSdeLMsKN>iB|uz2VGt*7Oj4!B3(``8Lj=jX?<_^q1Er~OY0kb zPx1S|U-9^R#c%$Rn~s;}JV*avvG(`ZH4YzOpB=7bHwqtUpA|mHZWun;J~Ld|ZV*1i zt{<*q*9#wN*9}*-&j=r8pB_Hkt`k1OJ}q3$u3gBE^fndO1$>jE~{1#GSh*jyK|xh`OHUBKqLfX#IQo9hBL*9B~@3)oy2u(>W^ zb6vppxd zYhLK&Z7R(Rsq*9U^~Lkod8>IMRcBlCM5-HX%@e6^v^7tpy2;i&k*bTWc_P)#w&sac zU2V-1scx|~Po(N*Yo17TtF3t=Rd?IJigla!(F63bBM!6M@wGZrs$W_x(`yHKT=PLs zTJu3KTJu3~TJu34TJu3)TJyo3wC00;wC02UwB~~WwC00>wC01mXw3(A)0z(k(V7qL zp*0^2rZpejOKY8A2(9_yK3eMpLut(y_tRP@7)ERUc!1VA!Ejph$%C}k2}aPGUml{h zPB4+XT^Ve+pM6Cjwy>3pskXG0 zi>bD;R6E$pg;YD*%7s)r*~*1f zJKM^IRJ+*9g;cxR%7s+B*~*1fyW7fzRD0OUg;aam%7s)FZRJ9$y=>(|s=aOHLaKdi zVoLsNz5SG`_}pa7^)d{;yFU97`(? zj-!a_CU1X_7;BCR|)iB=xepp^$F)5?RIwDRB-T6s{5Rvw&6D-UYZ%7fGB zYqZotD-TYml?P|g%7ePJ@}M5AJg84A4;s+QgEML6K|@-3a2BmRXhbUy&Zd9<4k$pH?1RKr0U}q?HF3(aM91Y30EswDRCmT6u68tvtA#RvuhID-W)u zl?PYR%7d$E<-s+y@}LQ=JZMU5{5PXD{;#Dq{+rVp|1D^Z|CY4Ie=Az!zcsD#--g!s zZ%b?Zx1%-w+tb?bKx_PWq&2oX(Hj5P(Hi7O7kzzkJ0&0ZdbGbO7fN#;8Nt!nUso-B zgMCE!M*HyaP4;2oE_T)M&Gw<;u6C91E%qVdZg%DHt@gp;?)E|9+w23wJ?u*1+wB9w zrS|?>FYq=M)>C`>IM!2p*;r5QZDT#PkB#-zzBblV@3gU=+Rw&%YJVH+sRL}Rrw+8S zo_d#!_0+pM$GYsSns# zPaSS+qIxjjdS9hnPBp^EmCLCfvX#rJM%v2dR1e$AK6Ye3i4dkyH*^*RFjwDcd=x`B_Ms`Ub%KPApT@8c(jU$9RKzi6Kje#t&Q z{IY#)_!ax;@T>Nb;n(aV!mrzhh2O9b4Zmq05`N1*IQ+JKV0emsz<(~y_mqFrd_0x% zZ#u2~n?Wo8X41;PS+w$RHm&@dLo5I0(#pSiwDNC0t^8X+EB_YK%D+Xl^6wp5`S&iZ z{Ckg9{=H9Y{qO@?dH5l%^}~;7<>SY+)(=0Sm6xB=T0i`ZR(^g?YyI#GT6y{l zWuY}6>_XR6i$iOE*p05CmWbATu?KykS}0ocM@722S}t1i$=>vFYSC!TFZ&`5Jch@X7Y^;hOew;Zy8m!?o;V!l&9thiltM=`#qt zO@;e{IzEp3g41o>7o1_^zM!s+`+|Bl?hESMxG!j6Htq{9wsBu@iH-Y$OKr_d zm*rdUt8ia%xsT(%;0hb}1y|a*FSyFaeZkc>?hCH5abM8H#(hCk8}|jxY}^-IYvaD4 zxsCgR7B=n+TH3fTXl3KRptY@esg1X(G%vNKH4n9;HSe^iHP3XQHLrA}>uSA#*1T~Y zeVSfJKxY31M3wDRv6TKV@Z zt^9kAR{lLtEB{`gm47eN%DvaUQ@^>DszI;Bd zJYGPnFJDM2pBK^U%ip1u*YDEm%ip7w-|y4v%Riu%=O5DQ%Ri!(?;q3Z%Rix&_n*@0 z%Wo3;Vb5m-esWuk2sKU)#&V-`GEg7u!FDzqNl1 zFR_<~zq5Y`e{X*u{=xn(ywqM2{?Yz6{FA*{e6e z+(uvYyN$kRg^j*wrH#I5m5sh=wT-^$4;y{a8XJAlS{r@QpEmlUzij0~s=saYCF^YT zCF^bUB^zw?CI8syOE%i*Oa8Udm*mfAqc6z{<#m2P{VG<8y>F6QUVEQ#IeYK$=JsCU z@^;1W7WSUuE$uzRTiLsZE7-e*x3+f;Z)5Kg-qzkZyq&#Mczb)t@DBD4nm0<~Sf=r} zlaHs;_}iJ*_}hin_}i7%_}h)v_}iV<_}hcl_}i1#_^U`S)n6(7gZ|2CjlX?pjlX?q zjlcb9jlccr#ab_*HU28m8h;1U8h;1T8h;1V8h@2(jlV-^jlU|i#^0f|#$Q!h@plZZdFfbM?>`%g%+aHIous;f4 zX@3~L%KjjHwf%nh8vDI)6Z>7gzQEg5%7s+Td|bJZ>RMa5kgB<@Tu9ZzRxYG!X)6~} zwX&59sao60g;Z_qOOmv1U@I3=wX>BAsoLAhg;X7EN;Dwkm`C{ z{dlU*w)*c>H`wa8Q{8B*zfN_NT{lUeOJJ*iPIa@bemPZFTjMa*Ew;vCs&2N%VX9kg zjl)#kZH>cJx7iwpse0HNhpBG2H4ami+8T$c?yxluQ}whp4pa5AH4antwlxk@^|3V$ zQ}wkq4pZG}YaEuDhc({%`*C;!Y26=Bpml#dk=A&eMC<X+?-yy^AHPIvyuVD>)_MW0 z@&78V@&6jF@&7ul@&5*`@&6{R@&6XB@&7ih@jr#u_@7E^{7<7b{-@I#|1)Tf|CzML z|14VLe>Sc0->>L7#qB-Cv0vu7%{7IT2-#eJ-PPfF_Eq8e_LboU_7&lU_T}M4_GRIB z>`TM%+Lwgivo8+6Z(kJtz`ij2p?yL4Bm4aD$M$*QPwaE``T}oL;knIcK91)$pWAqD z^M#G)HecF!Zu6Cm=Qdy4cy9BJjpsItZ9KR6*2Z(2B{rVhd}rgi&G$B*+x%eTxy@1= z&uxCR@!aMo8_#Wiw(;C%nT_W*zu0(g^Q(>LHow_;ZnNCRbDQ67ty`|hx87GNms73u zapiKVRkm_D)oNS0oazr-xtwZ^tz1sE)>ba3`qNe}r~1oQE~on2RxYPnXDgRet+$oS zsW#ZkoWZRK*Re{JRRf3=17&<7Q4^g$&y`k>A1`ubI@ayI&)&298S!@xBeM@xCps@xC3c@xDE+@xBAC@xCLi@xBwS z@xC*y@xBYK@xCjq@xB|a@xD8)@xBMG=Ola58vhk(Jtx_VRvzq4>p96jwDMtJTF*)L zqm>u?(|S&F0ImF}MC&=pfwc1EAX?8!4yKham1#XEIfPc;RH2nOhtkTMsz#Cqm?&D(#o5oXywh(wDRT{T6uFUt-LvoR^A*>D{rdP%9|5t<;{t-^5!I3 zc~gT{-keM;Z)(!Yn^S1zO)Xk^b1JR8sZA?yPNS7Kb!g?y>9q3Z3|e_pmsZ}?qm?)H zY4w2(X!U_-(#oTTwEDoaXysEQT7BTzwDPJktv>J^y1pWiuBV8km1pPE>H{yJm2Vf) z>H{yLm3J4@wKc^ReMxcKBw7NeU}@eDz4PC}(o6ky3&WS$^TU_hbHi8Iv%^=~Gs9Qe z)5BNWQ^VKTox)A*4&kPDyKpnRP54^7Rk*p`BHY5hHr&!~8g6ArA6VMjj=rz7jU9bn zX+G|`*V|`>JKJZ5Z?Nl!Z?x-% zZ?aDhcd<_k-)x^6?rNVBzQsN{+|52Ie5-vzxVwFP_%{34a1ZJ3;okPa;Xd|(;lB0(;X7^3i~YP!eS+r2RQ-Ki^J1z2w&ulD18vQVsqV5h zFQ&TN*1VW%kga(!)jhW6#Z-fB&5Nn-wKXrM8e(f+Om&~Fc`?;cTk~S7`)$pOsfO8_ z7gIf8Zyz3RqYr=3-ZsvUu+fJ;9kp0R&R(z=2DOZYka=kW9PkKq^WAHpx%--TbYzYV`^e-nPi{wn;c{YCgS z`?K)t_9x*t?2p24+8>19vfm58ZNC$qVlNC&wdaSY*>l6w?b+cO_RR21dwO`5JvBVr zemgwJeltARemy+Tel>YGx1Y^zTyR%yO{N9zJUo@$ZacR(-H`wr*@dfx#(U++7h=jnY1 z^jy8~fS#lG9niB2*ApmksT53PP_Bdz=5e`)tm zTc(RoTiq8IX!TPiwC;;Hqt#!Pqjg`rIjw%HJgxiUE$DN!EjJd; zzdh($S{Igb|9cXx{VNCd>VHqC)&HJBtN*P_tN*P>tN*P}Z?B(mNN-oj z&ZM{1&p4#FDP(8STkB^W(iIBX+4NTW8He1OmFx}~SpUpJ>$X$gQMm>vdY~X!Ymq zX}wOX1Fe3&BmI+>Jm??w{sMZb-d{kgpYKfTby_#j>hEu)^*XJaX!ZMD=*9ZE1+@DA zuJqS>eJQQ`fo}AdS#~R}`-ATE=UH|et^0)@^r!my1+?xTO6iZY><(J@6Funwae#Jw>k9SQO|KWAb-y%IkVPOlfBb-y)^ zenzhspmqN>p4Phf1X}lF6KSoBPoi~y_9U%!@uz6ruT7@4F8(yF`?qIkt&2ZP>wfMz zTI=G^)4IQVf!4bCi?r_dUZS-w{zMdK>MXI-LT^FgQ*t#xKO|^Ahq?%^yx=1zM)^(9; zhOO%&)l6I0MXFi0u8Y~;rg~cMJD{J^(gQt7?>nF;XsLpJLhn1EAJ@_b{g~c&KtHOb z5c(0lbdG*lODFV0dg&egpq5(b2lUcC`hG3V(D&)3fAqb&F6evGUxZ#l=;L>XKeGph zKezjbzp(ENe`)s#e`WUye{J8Pr6_Mx-JWHO>D#n)Mc=Bo1kkr=sf)f@Zz-T}($W}x zgWi%rU!VTNT2k}zPT`;I4&k5ecHw1qoA57otMIROi|}vuwc+Jr4r8WNkq&5EjqBZ{hrZxW7(Hej2X^p=Pw8q~*w8q~? zTI26uTH}wO6_-uc%I#L^FGE|6#}fKUEd|pWpXKO@T1uuTXsMYVucc^O<9ADXoc=hp z#&ZRFtd_!Qjqh#fFrpDL{BA(E z)T38gdESt2u1B@B^1Tt=OpkVH<$Ytii5>+N^Y5JEas83-(&>#rZJzN9~`>u~_B zzGNV+>+>#JeaYSQ1g#g)-rr`t1r2i*7ZDuR$p=-t?PRzt-j=bTG#t9dbBQC zTG#(@T7Aibw8p~-T7Ahww8qCsT7AjGw8qOQT7AhQw8qb9T7Ai*w8qmIT7Ai5w8qz1 zT7Ajmw8q;wT7AhAw8r0fTJzEbTH|pdt$Aq@t?~IJt$FDwx{cNgXiWl7(;C0e(3+Q? zr8SR2%csG#m5MbQ|;13>)*(OdIpkEL*vd zYPPL$F~{3fm>1^yIOc_UwyyJ3^KHxv3vA2_3vJ8`i)_pb@7VX~SFzr;)i0-d&%QfJ z>jt*^3JEsM7L>I<9@o$sk85d-$3JO}$G>Qe$G>Tf$91&E<9b@-aRaUK_z$h|xRKU){FnB4 ztdRa^wbghm(3+=8XpPU!Xw6gQXpPs+Y0XpRX^r14Xw6ew(i+cO(VC|!&>G)c)0(HY zp*7yOr8Q4&M{E3VPivmqfmR;uNGlI^qLl|b)5?QgXyw7K^Z>0F(EYVuKr0XSpp^%E z(#nI1wDMptT6wTHtvuL=Rvzq2D-ZUgOSN7=Yy2NTYy4NDHU1BzHU1BxHU1B#HU2Bp z8vlpT8vj*jjsHVwjsL2&#{Xfo#{c28#{Utt#(yt#Oj-GF#&$)#bLvNvbPs zjgwSY+8QURuCg^wQeACpoTR$O);LMk#MU@D?SIBYGapZ-@o+7z@z9*scxXXuJhY@W z9$L{F53Om9hc>jvLt9$op&hO9(4N+K=s;^cbfh&NI?);r*U=ge*V7sgooS7S8)%J( z8)=P)n`n)PF7yC>OKFXVuC&I(Ewsi%H(KN2R$AksJFW3>8?EusgVuPsoz{3LrAswL zXkGt3XP3!s} zMCi6Q#yl~?#ys(mjd^0Ejd|i>8}q~{8}q~?Hs*=Zw#LPy-llq5>j?ByT1TKK zX&r%{pmhZL39Td0k82%)eoX5K^rKowpdU$p5UnTp_`~5z_Cw((?FYk8*$;##+xLf` zw(ko+W8bU){{MXcpY!ol`u;yp>-+x#t?&Pfw7&l@(fa{|2q^|C_YF|8LRy{=ZG@`#*)&_kSv_@BcJw1_$KdHAK(7HZm(G&Ih z0$SJ09D2N7UqI{nnMaS)>kDXIPYdX=dVK+{>uV7`Mz1fRb-leykJjr8XkCBr)1&nI z0$SJOhxACjzJS*C`7u30uP>l=y?#m$*IP?yUB92x!}L}ZTG#WJ^iaKZh1T``H9bV@ z0<^C8#q?lBLebwAx0e>L{W6~`h+JIauR|_=XCoKCw~>oK*vQ4DHgfSt8@c$Cja>ZM zMlLS1k&D0B$i-i6RZeF0QaOE>f+ukxQ#==(Z8>^(Z6r7(ZBy=qkrFM574h-{cEFt&wrL3`uD=tI**0ip_ZD?J8 z+tRxJwxf0ZZBOg^+kw{gw#riMT-uA)^|&{!T-t}$^|>#tT-uM;^}0W;TsnZ(^;?NnE*(hgdOnC&E*(tk`mRhX zmkyzIy;q@?ONY|B{;Sfu{tu&d{U1*2`agmmsC5Kd*Z+~UuK%NGUH?bZy8e%$b^RYp z>-s;A*7bipt?R!!t?T~;TG#)Hbg9k@4B8>t%E>Mv5AZL6P1)!0`5km?*;{X(j9ZRGHIHgfoU8##P|jU2wv9;jc% zy2wTjUu+|XFR_usm)gkT%WUNE=fZ6k-Tu}lBwbJdi?O?^C- zuD52iuD5GxU2n~4U2iRDU2iREU2m;uU2m;vU2kn@U2kn^U2pAZU2pAaU2h#|U2h#} zU2mOeU2oUX>W{Cd)gO1Jbv@odt3SSxo}_gIdZN}5XkD*2)9R1A(zzU?@#OcA3*E+A4u!^zl+xOe>bh`e-N$f{~o$jue&LFaB+J=@!Bu*xmFq%sqXdH zA%}<9$l?2J;juPy_;DLKJkHj2dBPUjrqbW{cpp!tzwZgO{=O&D z`um+kzXT7Tb9(fa$IOzZFaX+kzzS~>IztsHul*7xHzS~>JOJxP}!tsHuj*7xfzS~>JKt?%CyS~)b8*7tK7tsI(8 z>-#%{Ru0Xi_5GejkJcqq^z7nxMf%=v+HU$>pSKI?Xz3h(-J9XL_Uqw!_N(Ff_RHY~ z_KV?#_VeLI_OtqVir(fkrG9@}KTmO!^Mlz_`gw}g-^dV+qQBK?GZo+ACYex4%z zn0}rj{iuGPBK?Ry*MNRlpKCxrq|Y^=AJpd>&=2Ty4e0yzxd!xo`dkD0UVW|seb0Y? zKiOg*zdQV`Jutk)?jQcnzBBy2-6#Bm-7CD*zC)jD;BBhg^|=Q0ZTegT`c{3e0ey=; z*MPoRpKCzhq|Y^=Z_wu&(AVp84d_ns=Ud_9`do!nEA0;9Rkl7?A=PTTUHA`MpR15+ zjol`^*4F1Lr25lt75>ZC=PIQ7+inqFXEzV8x33Lvu=TkLss6E>hBw+x^tlG!rqcVW z|Eq1Zr}t5(D%g78bgB|t?~_ionXUIlrz&UbebA{kxAnf~ROM~G&pFi=w%*sAYD-)1 zYfiP5t@kyjs$lDV&8fDw^}gm*+t_+vbE<7^y{|ddcDCNvoN9Yp?`uxAgRS>9r`plp zJiL=#F1)k7S$G#)?`uxAtF8Anr`pZV^tlH4ws`cNbXMyLKE5ctr@b&-(OwYV%bp+J z+nyKR$DSMB*PavJ&z>FL-<}mdz@8bdWX}j6XipCxWKRnpY)=hWwx|5((tJ<(U&Y5$ zy`|4Jpfw*F|COuJ~Yd~whszs02=NizO zziQLt^tlGK=CeBVSbeSmt@-T?dW=5TfYy9hj~=bhHJ~;BHK0f7a}8+ChYjhG`dkBA z^J61=gg)1R)_mER9m#a}8+CuNTro^tlGK=G%+u!TMYS zTJ!Iv^dSA*L|XIl<@8;J>O$Znw3U))G{E@U^+>NmR3*A=pxY4smnY4sns(CR&^rqE+^r6*%^rh8*+)1nd z=tt{wUHa4NM+VUPT$h2g`jfk8eXh&hwEC4nv_9A69$Nj&U|OH+axblZW(cj%b-9mL ze>0TU=epcatKS($>vLTmpw<5jr`7*FNUQ%DL973Hh*tkIl2-roFs=S)6s`W}5nBDv zXu7<9ZX&JzXAG_W=P_FS&sbXh&*QZEpK-MMpC@SbKjUfjKND#6KNIN%S{In z|9Og5|1+6Z|MN7h{^uE5{m-+s`k&`$^*_(k>VICK)&IOmtN(e4o}%X{MZa9!{=fId z`<0}QsPq+o-9F)0?Y+XU*?Wdxw|5V}VecA#)80A!mc3(1_O`d_zIgkRY|19*2eWNU zvZ?geCD}B3tCDOwy+uhjgWkL(n@Mk0lFgzECE0BH-$FKr{-==5rPmj-dGz0fY(D*G zAzMJNDP#-j)rD*ky;AD}^zYHPzw6__h2OJ(3BPau9R9%mG5n$ZL--^6yYR>Mw^|qQ zHq|#;7ofk=x&Zx!)&=O#v@Sq@qICiKBdrV2A81{GeoyNH^gHqATkPYD!r$5p!%OT1 z;qUDE;qUEv;UDa|;idMR@Q?QF@K5%v@Xz+l@G^Tw_!oP6_*Z*c_&0lMc)2}A>jK`U z(!7*vg^z0 zZOsd**4dgDQmwZ&FQnRFYhFn8kF9wj)ka(MLaKjl%?tn4w%XIYkg8y7UPx79YhFmT znXP#tRXJPpLaNPe%?qi@+nN`)$hXC#xG&z)$8lf0mA$a|b=p+07lgOAabLWRjr-zl zZQK`cXXCzjdmHz~JJ`4{-qFT=@lH1Gi+8qhU%ZPwO}~n@t35SIuQRZx{O8hqPx-%x zkEhzVB-@kLd{B|zyCmC-*8H$HU9lwFht_Dum-(B z>jJdq$C~sytqahaFKf|%X0h-jKx=+(L@(320Im7DG5wR)1!&FR=h92HEjJdq z_Y3JIS{I---(O5G*17<#`TtV-Ypn~=>IW{Tztp+_t^VLj`g5%d(CQbira#rX0ImL^ z3H`Cw1!(mX&FBxcESXx$gzPOE<@rFCC?2d#dlC$0P9UbOm~-n8zE`_Srl z`qH{DzLQq}(~nmF)1OxVGk{kAGmuvQa~G}t=Wbg4&mda;&povIpTV^HpL=QbKSOBs zKljnZPf9|K%{|uwm|2#me{~1oJ|9OyJsLwB;)&D$1tN$5EtN(eJR{t}KR{!$| zt^Q{;t^VgxTK&%$TK&&swECa1wECaNY4tzj=qYNMi+-ZGeLD3%o3@*t7vJ+=-&i`{ zUpFW`!M-~@(Y`A@$sQPf(jE|g%I+VYZ1)R4ZQmJw#_k(_*6tI2&h8z4-tHBC!R{G; z(Y_=6l3f~p*}g6OihXPNRr{9kYxd3I*X^6aZ`e14-?XnUWN&$!)&V;GcRqXD$2*3n z*d4-C?e^hmcDwL&yKQ)e-6lNKZXKRww+hd;TZZS@Ey8o{=HYqvwc+`8v+x4DX?UUC zMC$_Hrow&TJ3g-5P4%v=oK5wftz1p@zO5Wh^?|M2O!c9yoJ{qRtz1m?v8^0T^@*(< zO!cX)98C3@tsG4Cxvd;b^@Xh*O!cL$98C3}b0yxjgI{JZ^ec!m8@c%}Vec$NJ@c(wh0_z(NN@EZHw|6H2yDewOD@l?vYzi8#% z-?Z{>9j&}uPb=>>(8{}iXyx5TT6y;`?Y!GA{b96K-W6!&T?wtc+l*G;m7|q+o72j> z^7QRm7odCS=OEC^!>#D<`Z)-+@^Ndrn|=-gt-Rcp?y8@IKr27Dr@QFqAkfOw9qAkO za}a3d>&|p%{Tu{ZdAlopoqi4it^D1cR{rimD}VQ-mA@5fXj*;nF|_jiSXzDXakTROcv^jMbz1p<0w-SY6I_HW_K>|eu|+rNabu$P6ew0{m?W&aeu z+Ws+ojlDG7#Qq`N)c!u)%>FKXt-U1N-2OJ)!d@J1X@3=NWq%QFZGRSSV}BBEYkw4O zXMYfGZ@;JY0&i0v{LX*pvyMK#DBQ_j7{1P45We1?AMR|=3*TVR4c}SZJhP&D`!nfGd!`G_2AcO<>ec+)`Q=qm7j0XS`U7k zR-R6wwH`c`R=!T7wH`d3R^HB_wH`c^R{qYSmA|uTG3?*dx+ zyO37?E~1sc@6gKMcWLGCd$jWReOmeZ0j>P~kXHVFL@R$krqu_3LMx9yrPT+2Mk}8` zr_~34K`XDnq}2z1MJvC*rqu_3Lo3f0)9Qo2rIqhXX!XJ0(aQVpY4yQB(8~X%wDSK) zTKWGIt^EI)R{k%emH)rc%Ku+!<^ONA@_#w4{QsR+{;!~w|0`+b|0-JfznWJ5|3NGN z*U-xUwY2j8Pg?o^7p?sNn^yj>qm}>bY32V0TKWGEt^D6eEC2tco&VdXaMo7&U!ax$ zCA9K?Gg|pyj#mCZUi|lOUOfH`{`K13v>yD(fAeN(d4Ju*;VtZk!du!8hPSdG2v@N0 z4{vSX7v9FcH@vNVPk1|fP*F26``I1B``hiq2iWbxmF%|R z1MN29gY4GfgY8z~%67}}A$E&!6}x%(Q2W|&Rl8aEFuQ5^aJz}t3%pH*zPOr?qc1+v zMqhlCjlTG38-4LHHu~aYZS=**+31Upx0Q>js@uxNR43TV#Z)KS%EeSC*~-OKHEiW# zs*`QyVyc?9axv8@wsJ95EnB&m>Qq~~n5wp|TugPEtz1l1$5t+;I^9+-raHq`F4oPr z#iPHcv-&v+KE6C$-~KJ!!2UISru|E}p}j18mi=?Mk^NKnZ2QM>V|!`%9Q%jxx%T(r z^X%`!=i5ud7ues1FSHl`=hA#n`FF98ry8Z#5zxxROX-n%9RaO;yqq4P*AdXl%PZ;O zdL03+{JfeTrq>bB%F`zFP`!?TR=zf)hv;<#wDPt&Jy@?Jpq0NZY2|M#TKU_WR{pl3 zmA`FiTKU_ZR{q{bD}Q^?%HP{*pi{Jn!#{`RDmzrASXZ*N-p z+lN;E_NCPq-$^Tv`_bx)`_szj0krz!fwc1aE?Rx@-L&$15UsxW9$I-mm{wnWFRgqZ zLaQ&nk5=9frPUYTPb>e2(aQe^XyyNKTKWGVt^6NBEB_y&mH#7Y<^RL9@_!Vq{C|X2 z{*R`W|Buqj|1q@k|1nzmKbBVhKTiLy*AdXl|0ihW|9D#YKY>>MPo$OqlW67tleF^x zDO&kInO6QkO)LMOp_Tv7(#rqmXyyO&wDSK2da>4vi+-`V{eS!7fB%~|OJDNW{S$uK zULStN{yY4t{b%?!drkOtdv*8?du8}d`}go$_VV!C_HW@S_OIco_AlXS_OkGF`{(cs z`={_s`^WGsdue#K{X=+;{e5_@{atvTy(B!}{x-b8Uaa*3Z&P3V&VT2#MLxbL{Eoda z{I0zq{GL5O{JuRe{DD0;{GmN3{EjmDXLSOu~kE1XC#ztSf*hXLct&P5TiH*MaI~#rR_cr?CA8h4fs-?DaG1ZT@axv9U zwsJAm&$extQu#Te+C(H(R-wYPqdkO!d31TuimXRxYMmX)6~~t+JJi zsaD&{#Z-UT%EdMLws`dSbXMyIK90WlPaA#lUpD&Uzisrz>umJJ>uvPK8*KE&|Jdk@ zH`?fn|FzK0j<2;hW<;hBcPR^+tF+FIs#gGx&!@(UPnMHUw5Kc>2(CO@^%+`g(#qe%XyxzWwDR`|TKQXzR{kDID}Rro zmA^;R%HLyX6nY8l1A+7vB zi&p+OqLu$=)5`zGwDSKPTKRu2t^7ZaR{oz)EB`N`mH!vg%KwY#-?QvuTKRtot^B`~ zR{mc`EB`O2mH$`J%Ks~A<^NT*^8adD`F{_4(3-zm+1fBL_9bJO~<+-lR;m$vkF@_*;E*7j53HujU@w)UiOJ9}cdy*(k^ z!5$y(Xg?9|WRDA9XFndk-X0t7Y(Eyh!5$O7(S9_1lRY}z#l9zevpp!>)xJA?i+xwP zn>{dmt34px-R>X0&F&ZOVc!|P-R>JMwfltcuzQDl+P%WP?4IG?_8s9qcB$42yiL_9 z&fn?d9mDkF?RJKWw8= z_(l7#@Jsfe;g{{T;aBW6;aBZH!mrt@!>`+`!f)6s!*ALv{&Q)*r~G}}$5SbPr_jpZ zskHKU8m;`DPAh+B(8}MLwDNZrt^A!$D}U$E%HO%P@^>Ds{GCrLe;3fo--WdDcM+}p zeTP>5zDp~A-=mek@6*cP4`}7@hqUtdBU<_UF|GXlgjW83N-KXqqm{p()5_m3Xyxyh zwDR{WTKW4mt^EClR{k!gmA~K8%HJil^7lJh`TISs{QZGe{w}4JzdzE--=ApZ@6WXI zcNwkx{e@Qk{z@xu;EK`Z~aq?P|$(aQe{wDNyzTKT^Xt^D7XR{n2CEC08rmH#`?%Ksf{ z<^N8!@_%Pq`M(RT{NI&U{_jRB|97XA|9jBN|2=8te??mPzZb3i--%L4F4@5`t&Y^v*W=^c!sS={MTw z({Hj@>Q}M4*vjc*mF8RJZ&x2rrTo2xR{nOQmA|*r%HQs^^7l4c`P+k5{@zY2e@kiQ z?;W)Aw(;{Job}{tls)zxUC~-=Vbf_kLRWJB(KTK0qsfhtta62WjQ+2wM635UudNQp({b^eH{S2)>{aIRh{v547{drpX{sOH&{Y6@N{}Qb} z{bgGD{|c@Af0b7LzeX$nU#FG-Z_vvBH)-YnTeR~3ZCd$1g;xGgrIr8FXyyNOTKPYN zR{qbVmH)G7<^OD2`9FtN{?DbA|MO_&|9o2czkpW$FQk?Ki)iKlJGAouU0V779(6?&6fWAfR1@z5YFQ9MIdI5cd z)(hzCwO&AXia*~^KHf3>v)v)Q%x)k4#cmh=)ovU9&2AH3ZnqBqZnp}ruv>;#+AYGX z?B?Os_O;}KILcGK`$yNT8dyiIjvod3(mtA+ozj|i``4-c=m4-0RwtA_uv4-Id$ ztAzix4+&>G$(6$e``~bieNcEa`@nEHyHa>_`+#tHdw;DLc$*4+`IbJ8zI-bieR%~N zeficl`togT^yS;y=*zdW(U)&;E0mFSUr9RaO;I*1;j*AdXltIG6ny^erZepR7|>2(CO@~kR7RIekT zm2ZdBL-aZVT6tHE9<0|9(8|A~=pK3<0j)edhVHJ{5zxxV=Nf6t(m zzjbNlZ#`Q1Tc1|`HlUTiXVS{whP3kcEL!>7h*tidO)GyJ)5_m-Xyxy@wDR{nTKRiE zt^B=!R{mZ{D}OJdmA@C$%HK<9+>BOVel4whZceK&Z$T@sThi*wThYqz*0lQaHnj4*Ev>%19j$zCPpdEQ zKr8P%((21Q(aQhpXyyO)wDP|*t^B`%R{q~eEB|kzmH%C6<^RpJ^1mys{J(`({&%C5 z|F_c0|L(N%|2A6r--A~E?^pEg#qIz0rTOQW|Eu>MY&u_>&kX;>JaUJRuMhXM{|@)E z{|xuG*M$4ntHXWmm3rTSx2e9-dI9~F)(hw_v|d1eru72)6Rj7}A8EaS{y^&m^m|$_ zpx=o<-(VkK6u#G97#?CT2;XPV4-d8Hh3~iLhKJd6!VlQ9!^7=a;Ro%R;Su(X@I&_W z@JM@F_+fi$c$7Uw>jmDXQZA+%?c>VDRFB%q#Z+T#vpIbZL%^Pk0U?ZXhqI6P~P zkG+pfm(qMA)1@@u%5*8scQRc{^Sw-$()=LPr8GavbSceGGF?hDPNqv~#>;dm%>?!`D3(fKHf**reE{idwCy$o1QIj@8Nv}Zu+*!y^Hq|xar*z_YU4i;HH1e+-rCrftwz# zaIfNh1aA7c%Dsa35xD8)8uv2J1>E#=t$PXQ0&aS`&b^3p0XKbJ?_R*UfScZKaL?mh zz)gQQy6NvGH~ro0roUU<^mnV9{%&*A-|cSt`-hwU?r_uJoo@QO%T0fGyXo&9H~sz7 zO@H^g>F-}|`n%6ffA_oT?*TXcJ?N&thurk{u$%rKans+UZuR$k`};_Lk8{)C#vzMOjrr+nd z*-OuL)ARG(?4{?s>H7t4_R3>c){m}L@f^vV(3v^BMIOhfK?wl96yK-LO?#y|CyCdfX z?)ID)xZ84G;BL)%fxBh=`AX`3i*PBqdAPLPEL=uz8ZIk0373-_hs(>2!WHC(;Tz-z z;fiwoa3#53xUyU~e4|_^Tt%)OzDcgdd4aBJD#Yuz=zjU|t#Y~WZF1RgRk=*Knp`@3 zyId-Khg>pzr(7a@ms~ttUA{hiw_GfIk6bi-uUsU2pIkUxLoUR5fv#!jVwwkZpAM#Z zP^Non9+K%?nule&mZqjm$I?6^)2%d*%5*BtV=`SzQ%j~xX==-KDNP-jE~Tj})1@@^ zWV)25zD$?WG?3|1nuao6O4CTDOKBR*bSX^}nJ%SiD$}Jj&1AZirnyX)(zKB2Qhj~i zE)9SGTOGTfxzU630yjNqy6MkLZu-;DO@I2k>CXT+{Tb+{KZD%#XRw?83~|$+ zp>Fy!%uRoWyXnseH~ksuraz=idXdezNd_L`eMz3yf& zd&5ny-gL8BK58U+bLpOWbM{fG}v77#V;--I} zy6N9%ZuEBmw`uDY){(a-7f8V<4-*;~M_r06`{otm5Kf38(%Zz`@ zT>tN0b~L&;GWNUfqQQJ`R3{VobY^kc6fn2E4)yi8D1pM2rrhWhnL9H z!b|0;;broa@N#)_c!fMEyi%SRUL{Wmua?J$*T~~IFVHm&T}-o9_iZoxMW%~s*2%V) z{VLPNH0x#C%YKvTVww%I?Pb5qbTQ3F+4izcGF?owS+>1wi%b{OY?W;<+a}Y+G}~pm z_=m1(=wg~3x=#nw?3C$Vnq4xTOS4<1Yiah#bS%xEGTlnESEf^G{*rAk+b7#zwqLfr z?0{^0*+JR%vO}`%Wrt@S zH~lH#rauMU^yfM^{VC+8KZV`&r-+;W6m`>|Vs83#y_^0NchjE|Zu(Qw&0bc@O^-^u z*~`kf=~G!Zds#U*y(;f!FRS3DUpKhf%PP9*StU1nS!FkUyV1>FR>e*4ZgR7i-R!1+ zx47xwt#0~vo16Ysb<@9UZu)n-oBrM5rhj+3>EB&$`d8ge|L%6vzkA&D?_M|kyU$Jk zYPjj&$r<0Dxqh5UU%GTqAII|k0{?xZ#cOZMYy%xJlsZZ7H%sy4Y!k*)P7wDSi&Un42?%xsaCEp%?Lar8mQmz{AE#DUI zBi|Z+O1>rhw0v{;8TqF0vvQU2bMlSh=jF=bzH+7T3v$Kqi}DSe7wDRXE~n|I`*b)> zf0^#486eZyGy`S2nr4tpN7D?J>1LWCGM!8_RHln*hRJj>&2X76rWqmA#WW*jx|n8^ zOc&FPmg!=eF*03DGghXHXqAWA`&(HHPy7H$8mUJ&N-JH+_8HJ%aNBH@*DOJ&f}LH~swBJ%sZDH$DB-J&5xH zH+}uwJ%IB9H@*GR-H-DEH~szEO@F^})8B91^!GbA{r%oee}8b(-yhxd_a`^~9p|RM zF-oG{hj8fzti3HcZQq(&UDk?S#J6}+f9Gxxasd) zH~pRGroZ#u^ml=q{w{RW-$icvyVy;Cm$>QgQaAlw=BB^P-Sl^boBpnJ)8AEY`n%dq zf7iI_@6T@fyVgyAe{s{_b#D6mtDF9=chleB-1K*YoBsaproS8A?8Teh^mwzIy?Be8 zK5uoi7jJXZ>+Np#;y>K|Btxo|53O4e_<*#SNeaPoBki~rvE3n>Hmpt`hSv}{{P2K z|KG~|_fO8;|KGj%I(_cpDCY(K`wqu*%v#azl5)p*K%HQ$(iAX^P5pIZZK{ zE~mL(rpsxH%XB$S37IaZDJj$CG^J#^oTju)m(!Gy>2jK~GF?tnPNvIg%FA@Qg05+7 zFTO$dZ7;4U+g@Brw!OHrYm@weD1(aU+cMd@$d9*dfULggU=nf>2D)9{cY@~zfIiqx2c={HgnV8=5G4i!cBi$ zy6JB#H~nqxroV06^tY{>{+?WX_lxat49ZuHmjr`u~xe{(tPI|DU+&|EF&H|CyWqf9|IL zU%2W2mu~uhf5u;Bt}mqr#NXGye@;~Qf1VpXzt-1v4Syqd4u30m41XuL4}UMW4gVmw z=Da}H^mG1O{_}d4pLD-Pc%0lkJYH@Vo**|3Pn4U4C&`V&ljTO?DRRT`RJlQTnp{6T zU9K0NA=eGhlr^gMQbob!TX z_cM2Ua9-f12m9UKI4^M1hlB1eoENz1#bI|R&I{c1f4;1lKZ`E(pR8{BbCH|=TQArEdCjnVbGx?xsIixarT8 zZu)bToBm{T)1Rx|^e4NU{^W4epKILoC#ReK5-_Ml>JdUw5>J*c>w{*`dkzmjhHSISNQ zO1tS_88`ha>!yF@-1M)!oBmaB)4vyg=79 zbSTZEx^H{XV=^5|Q%klzsJ2Xp($tY{52`EEp)~bm+k@)MbSO;&+4i7@G960ONVYww zu}p{3G?8r&YAVyAG|gl>)LhpzbSO;=-KRTgTFP`LO)HtMq-ibFku+^&x{;=>OefN` zlj%a5_Ok6k9c0^sI?A>Ob&_ol>MYwH)J3*EsH<#yP&e83pzgBmK|N&KgC3V{59%q~ z9@I;=J?IJ9_Mj(aI*_Kf{J(q9&zu(=yPx^0HJlf?>A^GZRh$>N>BDpG6`U8i=|x}n zGR_O!^y5YM63z?U^rWAA5$6SN`ZB=1fb#-3y&2@5$9aL9{tR)`pP_F0Gt5nYhP&y{ z2siy1>83xU-1KL(oBoV()1R?!`t!1z{=DL*Kd-v!&ueb_^SYbCZ=Q`tz}y{(R!5KcBkk&u4D>^SPV;eBq`) zU%J_YzH-x}uiflH-?-`1w{G^J@7(n2dpCQ~4{rMPqnkbGCpSGC=VlKY@1}1P-0VRU z-Slpfn>}c1`Zvu@|E9a?-wZeXo9U*1v)uG=wwwOVanrxKZu&RRP5E8l3{afg!e~aAouTRE{GuLsM^!@Aew)R|DqOY^(!cy6u3(I7CE-aVrxv)aE=fX;jL-!2ac{~-?!?~n(Dcgh39yW|1k z-E#l%9=RX?l&x!;$N5po?jGp{{s~<7yNCD5-NO6juHgf6m+(QkbNGliP-mm)nF-kXwgOlw0vFrE8j&e9O68q!;+s)cxk+Q{-mhQ{|@N z)8r=M)8)qDGvr3$Gv$Wiv*ZThv*r5XbL4vAbLG0>^W-|=^X1y%3*=hi3*`#otaADA zMRK|D#d6v3C32bYrE=-;Wpb(T<#NgJ6>^F2m2&a$Rr2-WY;v*i)pF5rcDYD6hg>*( zja(?4Q@$>oOD-7BEf)yqk@JVImGg!3%6Y^2{~O)(zlxjw-{hwMH@oToEpGaMtDFAc=BEEu-Soej zoBrSK9?Dxo+(WWtxzkPm?{d@s>Tddfx10Xod|Hf|m-^5M-o4V-0 when left>right, 0 when left=right + * } + * + * You get: + * - some_type *some_name_put(avl_node_t **root_node, some_type *data, int *inserted); + * Either insert new or retrieve existing key, if non-NULL receives 0 or 1. + * - int some_name_insert(avl_node_t **root_node, some_type *data); + * Try to insert, return 1 if succesful, 0 if existed. + * - int some_name_delete(avl_node_t **root_node, some_type *data); + * Try to delete, return 1 if deleted, 0 if no match. + * - some_type *some_name_search(avl_node_t *root_node, some_type *data); + * Retrieve existing data, returns NULL if unsuccesful + * - void some_name_free(avl_node_t **root_node); + * Free all memory used by the AVL tree + * - some_type *some_name_toarray(avl_node_t *root_node); + * Malloc an array and put the sorted data in it... + * - size_t avl_count(avl_node_t *root_node); + * Returns the number of items in the tree + * + * For example: + * struct my_struct { ... }; + * AVL(some_name, struct my_struct) + * { + * Compare struct my_struct *left with struct my_struct *right + * Return <0 when left0 when left>right, 0 when left=right + * } + * + * avl_node_t *the_root = NULL; + * struct mystuff; + * if (!some_name_search(the_root, &mystuff)) some_name_insert(&the_root, &mystuff); + * some_name_free(&the_root); + * + * For questions, feedback, etc: t.vandijk@utwente.nl + */ + +#include +#include + +#ifndef __AVL_H__ +#define __AVL_H__ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef struct avl_node +{ + struct avl_node *left, *right; + unsigned int height; + char pad[8-sizeof(unsigned int)]; + char data[0]; +} avl_node_t; + +/* Retrieve the height of a tree */ +static inline int +avl_get_height(avl_node_t *node) +{ + return node == NULL ? 0 : node->height; +} + +/* Helper for rotations to update the heights of trees */ +static inline void +avl_update_height(avl_node_t *node) +{ + int h1 = avl_get_height(node->left); + int h2 = avl_get_height(node->right); + node->height = 1 + (h1 > h2 ? h1 : h2); +} + +/* Helper for avl_balance_tree */ +static inline int +avl_update_height_get_balance(avl_node_t *node) +{ + int h1 = avl_get_height(node->left); + int h2 = avl_get_height(node->right); + node->height = 1 + (h1 > h2 ? h1 : h2); + return h1 - h2; +} + +/* Helper for avl_check_consistent */ +static inline int +avl_verify_height(avl_node_t *node) +{ + int h1 = avl_get_height(node->left); + int h2 = avl_get_height(node->right); + int expected_height = 1 + (h1 > h2 ? h1 : h2); + return expected_height == avl_get_height(node); +} + +/* Optional consistency check */ +static inline int __attribute__((unused)) +avl_check_consistent(avl_node_t *root) +{ + if (root == NULL) return 1; + if (!avl_check_consistent(root->left)) return 0; + if (!avl_check_consistent(root->right)) return 0; + if (!avl_verify_height(root)) return 0; + return 1; +} + +/* Perform LL rotation, returns the new root */ +static avl_node_t* +avl_rotate_LL(avl_node_t *parent) +{ + avl_node_t *child = parent->left; + parent->left = child->right; + child->right = parent; + avl_update_height(parent); + avl_update_height(child); + return child; +} + +/* Perform RR rotation, returns the new root */ +static avl_node_t* +avl_rotate_RR(avl_node_t *parent) +{ + avl_node_t *child = parent->right; + parent->right = child->left; + child->left = parent; + avl_update_height(parent); + avl_update_height(child); + return child; +} + +/* Perform RL rotation, returns the new root */ +static avl_node_t* +avl_rotate_RL(avl_node_t *parent) +{ + avl_node_t *child = parent->right; + parent->right = avl_rotate_LL(child); + return avl_rotate_RR(parent); +} + +/* Perform LR rotation, returns the new root */ +static avl_node_t* +avl_rotate_LR(avl_node_t *parent) +{ + avl_node_t *child = parent->left; + parent->left = avl_rotate_RR(child); + return avl_rotate_LL(parent); +} + +/* Calculate balance factor */ +static inline int +avl_get_balance(avl_node_t *node) +{ + if (node == NULL) return 0; + return avl_get_height(node->left) - avl_get_height(node->right); +} + +/* Balance the tree */ +static void +avl_balance_tree(avl_node_t **node) +{ + int factor = avl_update_height_get_balance(*node); + + if (factor > 1) { + if (avl_get_balance((*node)->left) > 0) *node = avl_rotate_LL(*node); + else *node = avl_rotate_LR(*node); + } else if (factor < -1) { + if (avl_get_balance((*node)->right) < 0) *node = avl_rotate_RR(*node); + else *node = avl_rotate_RL(*node); + } +} + +/* Get number of items in the AVL */ +static size_t +avl_count(avl_node_t *node) +{ + if (node == NULL) return 0; + return 1 + avl_count(node->left) + avl_count(node->right); +} + +/* Structure for iterator */ +typedef struct avl_iter +{ + size_t height; + avl_node_t *nodes[0]; +} avl_iter_t; + +/** + * nodes[0] = root node + * nodes[1] = some node + * nodes[2] = some node + * nodes[3] = leaf node (height = 4) + * nodes[4] = NULL (max = height + 1) + */ + +/* Create a new iterator */ +static inline avl_iter_t* +avl_iter(avl_node_t *node) +{ + size_t max = node ? node->height+1 : 1; + avl_iter_t *result = (avl_iter_t*)malloc(sizeof(avl_iter_t) + sizeof(avl_node_t*) * max); + result->height = 0; + result->nodes[0] = node; + return result; +} + +/* Get the next node during iteration */ +static inline avl_node_t* +avl_iter_next(avl_iter_t *iter) +{ + /* when first node is NULL, we're done */ + if (iter->nodes[0] == NULL) return NULL; + + /* if the head is not NULL, first entry... */ + while (iter->nodes[iter->height] != NULL) { + iter->nodes[iter->height+1] = iter->nodes[iter->height]->left; + iter->height++; + } + + /* head is now NULL, take parent as result */ + avl_node_t *result = iter->nodes[iter->height-1]; + + if (result->right != NULL) { + /* if we can go right, do that */ + iter->nodes[iter->height] = result->right; + } else { + /* cannot go right, backtrack */ + do { + iter->height--; + } while (iter->height > 0 && iter->nodes[iter->height] == iter->nodes[iter->height-1]->right); + iter->nodes[iter->height] = NULL; /* set head to NULL: second entry */ + } + + return result; +} + +#define AVL(NAME, TYPE) \ +static inline int \ +NAME##_AVL_compare(TYPE *left, TYPE *right); \ +static __attribute__((unused)) TYPE* \ +NAME##_put(avl_node_t **root, TYPE *data, int *inserted) \ +{ \ + if (inserted && *inserted) *inserted = 0; /* reset inserted once */ \ + TYPE *result; \ + avl_node_t *it = *root; \ + if (it == NULL) { \ + *root = it = (avl_node_t*)malloc(sizeof(struct avl_node)+sizeof(TYPE)); \ + it->left = it->right = NULL; \ + it->height = 1; \ + memcpy(it->data, data, sizeof(TYPE)); \ + result = (TYPE *)it->data; \ + if (inserted) *inserted = 1; \ + } else { \ + int cmp = NAME##_AVL_compare(data, (TYPE*)(it->data)); \ + if (cmp == 0) return (TYPE *)it->data; \ + if (cmp < 0) result = NAME##_put(&it->left, data, inserted); \ + else result = NAME##_put(&it->right, data, inserted); \ + avl_balance_tree(root); \ + } \ + return result; \ +} \ +static __attribute__((unused)) int \ +NAME##_insert(avl_node_t **root, TYPE *data) \ +{ \ + int inserted; \ + NAME##_put(root, data, &inserted); \ + return inserted; \ +} \ +static void \ +NAME##_exchange_and_balance(avl_node_t *target, avl_node_t **node) \ +{ \ + avl_node_t *it = *node; \ + if (it->left == 0) { /* leftmost node contains lowest value */ \ + memcpy(target->data, it->data, sizeof(TYPE)); \ + *node = it->right; \ + free(it); \ + } else { \ + NAME##_exchange_and_balance(target, &it->left); \ + } \ + avl_balance_tree(node); \ +} \ +static __attribute__((unused)) int \ +NAME##_delete(avl_node_t **node, TYPE *data) \ +{ \ + avl_node_t *it = *node; \ + if (it == NULL) return 0; \ + int cmp = NAME##_AVL_compare(data, (TYPE *)((*node)->data)), res; \ + if (cmp < 0) res = NAME##_delete(&it->left, data); \ + else if (cmp > 0) res = NAME##_delete(&it->right, data); \ + else { \ + int h_left = avl_get_height(it->left); \ + int h_right = avl_get_height(it->right); \ + if (h_left == 0) { \ + if (h_right == 0) { /* Leaf */ \ + *node = NULL; \ + free(it); \ + return 1; \ + } else { /* Only right child */ \ + *node = it->right; \ + free(it); \ + return 1; \ + } \ + } else if (h_right == 0) { /* Only left child */ \ + *node = it->left; \ + free(it); \ + return 1; \ + } else { /* Exchange with successor */ \ + NAME##_exchange_and_balance(it, &it->right); \ + res = 1; \ + } \ + } \ + if (res) avl_balance_tree(node); \ + return res; \ +} \ +static __attribute__((unused)) TYPE* \ +NAME##_search(avl_node_t *node, TYPE *data) \ +{ \ + while (node != NULL) { \ + int result = NAME##_AVL_compare((TYPE *)node->data, data); \ + if (result == 0) return (TYPE *)node->data; \ + if (result > 0) node = node->left; \ + else node = node->right; \ + } \ + return NULL; \ +} \ +static __attribute__((unused)) void \ +NAME##_free(avl_node_t **node) \ +{ \ + avl_node_t *it = *node; \ + if (it) { \ + NAME##_free(&it->left); \ + NAME##_free(&it->right); \ + free(it); \ + *node = NULL; \ + } \ +} \ +static void \ +NAME##_toarray_rec(avl_node_t *node, TYPE **ptr) \ +{ \ + if (node->left != NULL) NAME##_toarray_rec(node->left, ptr); \ + memcpy(*ptr, node->data, sizeof(TYPE)); \ + (*ptr)++; \ + if (node->right != NULL) NAME##_toarray_rec(node->right, ptr); \ +} \ +static __attribute__((unused)) TYPE* \ +NAME##_toarray(avl_node_t *node) \ +{ \ + size_t count = avl_count(node); \ + TYPE *arr = (TYPE *)malloc(sizeof(TYPE) * count); \ + TYPE *ptr = arr; \ + NAME##_toarray_rec(node, &ptr); \ + return arr; \ +} \ +static __attribute__((unused)) avl_iter_t* \ +NAME##_iter(avl_node_t *node) \ +{ \ + return avl_iter(node); \ +} \ +static __attribute__((unused)) TYPE* \ +NAME##_iter_next(avl_iter_t *iter) \ +{ \ + avl_node_t *result = avl_iter_next(iter); \ + if (result == NULL) return NULL; \ + return (TYPE*)(result->data); \ +} \ +static __attribute__((unused)) void \ +NAME##_iter_free(avl_iter_t *iter) \ +{ \ + free(iter); \ +} \ +static inline int \ +NAME##_AVL_compare(TYPE *left, TYPE *right) + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/lace.c b/src/lace.c new file mode 100644 index 000000000..9a0b3cb82 --- /dev/null +++ b/src/lace.c @@ -0,0 +1,1045 @@ +/* + * Copyright 2013-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include // for errno +#include // for sched_getaffinity +#include // for fprintf +#include // for memalign, malloc +#include // for memset +#include // for mprotect +#include // for gettimeofday +#include +#include +#include + +#include + +#ifndef USE_HWLOC +#define USE_HWLOC 0 +#endif + +#if USE_HWLOC +#include +#endif + +// public Worker data +static Worker **workers; +static size_t default_stacksize = 0; // set by lace_init +static size_t default_dqsize = 100000; + +#if USE_HWLOC +static hwloc_topology_t topo; +static unsigned int n_nodes, n_cores, n_pus; +#endif + +static int verbosity = 0; + +static int n_workers = 0; +static int enabled_workers = 0; + +// private Worker data (just for stats at end ) +static WorkerP **workers_p; + +// set to 0 when quitting +static int lace_quits = 0; + +// for storing private Worker data +#ifdef __linux__ // use gcc thread-local storage (i.e. __thread variables) +static __thread WorkerP *current_worker; +#else +static pthread_key_t worker_key; +#endif +static pthread_attr_t worker_attr; + +static pthread_cond_t wait_until_done = PTHREAD_COND_INITIALIZER; +static pthread_mutex_t wait_until_done_mutex = PTHREAD_MUTEX_INITIALIZER; + +struct lace_worker_init +{ + void* stack; + size_t stacksize; +}; + +static struct lace_worker_init *workers_init; + +lace_newframe_t lace_newframe; + +WorkerP* +lace_get_worker() +{ +#ifdef __linux__ + return current_worker; +#else + return (WorkerP*)pthread_getspecific(worker_key); +#endif +} + +Task* +lace_get_head(WorkerP *self) +{ + Task *dq = self->dq; + if (dq[0].thief == 0) return dq; + if (dq[1].thief == 0) return dq+1; + if (dq[2].thief == 0) return dq+2; + + size_t low = 2; + size_t high = self->end - self->dq; + + for (;;) { + if (low*2 >= high) { + break; + } else if (dq[low*2].thief == 0) { + high=low*2; + break; + } else { + low*=2; + } + } + + while (low < high) { + size_t mid = low + (high-low)/2; + if (dq[mid].thief == 0) high = mid; + else low = mid + 1; + } + + return dq+low; +} + +size_t +lace_workers() +{ + return n_workers; +} + +size_t +lace_default_stacksize() +{ + return default_stacksize; +} + +#ifndef cas +#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) +#endif + +#if LACE_PIE_TIMES +static uint64_t count_at_start, count_at_end; +static long long unsigned us_elapsed_timer; + +static void +us_elapsed_start(void) +{ + struct timeval now; + gettimeofday(&now, NULL); + us_elapsed_timer = now.tv_sec * 1000000LL + now.tv_usec; +} + +static long long unsigned +us_elapsed(void) +{ + struct timeval now; + long long unsigned t; + + gettimeofday( &now, NULL ); + + t = now.tv_sec * 1000000LL + now.tv_usec; + + return t - us_elapsed_timer; +} +#endif + +#if USE_HWLOC +// Lock used only during parallel lace_init_worker... +static volatile int __attribute__((aligned(64))) lock = 0; +static inline void +lock_acquire() +{ + while (1) { + while (lock) {} + if (cas(&lock, 0, 1)) return; + } +} +static inline void +lock_release() +{ + lock=0; +} +#endif + +/* Barrier */ +#define BARRIER_MAX_THREADS 128 + +typedef union __attribute__((__packed__)) +{ + volatile size_t val; + char pad[LINE_SIZE]; +} asize_t; + +typedef struct { + volatile int __attribute__((aligned(LINE_SIZE))) count; + volatile int __attribute__((aligned(LINE_SIZE))) wait; + /* the following is needed only for destroy: */ + asize_t entered[BARRIER_MAX_THREADS]; +} barrier_t; + +barrier_t lace_bar; + +void +lace_barrier() +{ + int id = lace_get_worker()->worker; + + lace_bar.entered[id].val = 1; // signal entry + + int wait = lace_bar.wait; + if (enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) { + lace_bar.count = 0; // reset counter + lace_bar.wait = 1 - wait; // flip wait + lace_bar.entered[id].val = 0; // signal exit + } else { + while (wait == lace_bar.wait) {} // wait + lace_bar.entered[id].val = 0; // signal exit + } +} + +static void +lace_barrier_init() +{ + assert(n_workers <= BARRIER_MAX_THREADS); + memset(&lace_bar, 0, sizeof(barrier_t)); +} + +static void +lace_barrier_destroy() +{ + // wait for all to exit + for (int i=0; icpuset, HWLOC_CPUBIND_THREAD); + + // Allocate memory on our node... + lock_acquire(); + wt = (Worker *)hwloc_alloc_membind(topo, sizeof(Worker), pu->cpuset, HWLOC_MEMBIND_BIND, 0); + w = (WorkerP *)hwloc_alloc_membind(topo, sizeof(WorkerP), pu->cpuset, HWLOC_MEMBIND_BIND, 0); + if (wt == NULL || w == NULL || (w->dq = (Task*)hwloc_alloc_membind(topo, dq_size * sizeof(Task), pu->cpuset, HWLOC_MEMBIND_BIND, 0)) == NULL) { + fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n"); + exit(1); + } + lock_release(); +#else + // Allocate memory... + if (posix_memalign((void**)&wt, LINE_SIZE, sizeof(Worker)) || + posix_memalign((void**)&w, LINE_SIZE, sizeof(WorkerP)) || + posix_memalign((void**)&w->dq, LINE_SIZE, dq_size * sizeof(Task))) { + fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n"); + exit(1); + } +#endif + + // Initialize public worker data + wt->dq = w->dq; + wt->ts.v = 0; + wt->allstolen = 0; + wt->movesplit = 0; + + // Initialize private worker data + w->_public = wt; + w->end = w->dq + dq_size; + w->split = w->dq; + w->allstolen = 0; + w->worker = worker; +#if USE_HWLOC + w->pu = worker % n_pus; +#else + w->pu = -1; +#endif + w->enabled = 1; + if (workers_init[worker].stack != 0) { + w->stack_trigger = ((size_t)workers_init[worker].stack) + workers_init[worker].stacksize/20; + } else { + w->stack_trigger = 0; + } + +#if LACE_COUNT_EVENTS + // Reset counters + { int k; for (k=0; kctr[k] = 0; } +#endif + + // Set pointers +#ifdef __linux__ + current_worker = w; +#else + pthread_setspecific(worker_key, w); +#endif + workers[worker] = wt; + workers_p[worker] = w; + + // Synchronize with others + lace_barrier(); + +#if LACE_PIE_TIMES + w->time = gethrtime(); + w->level = 0; +#endif +} + +#if defined(__APPLE__) && !defined(pthread_barrier_t) + +typedef int pthread_barrierattr_t; +typedef struct +{ + pthread_mutex_t mutex; + pthread_cond_t cond; + int count; + int tripCount; +} pthread_barrier_t; + +static int +pthread_barrier_init(pthread_barrier_t *barrier, const pthread_barrierattr_t *attr, unsigned int count) +{ + if(count == 0) + { + errno = EINVAL; + return -1; + } + if(pthread_mutex_init(&barrier->mutex, 0) < 0) + { + return -1; + } + if(pthread_cond_init(&barrier->cond, 0) < 0) + { + pthread_mutex_destroy(&barrier->mutex); + return -1; + } + barrier->tripCount = count; + barrier->count = 0; + + return 0; + (void)attr; +} + +static int +pthread_barrier_destroy(pthread_barrier_t *barrier) +{ + pthread_cond_destroy(&barrier->cond); + pthread_mutex_destroy(&barrier->mutex); + return 0; +} + +static int +pthread_barrier_wait(pthread_barrier_t *barrier) +{ + pthread_mutex_lock(&barrier->mutex); + ++(barrier->count); + if(barrier->count >= barrier->tripCount) + { + barrier->count = 0; + pthread_cond_broadcast(&barrier->cond); + pthread_mutex_unlock(&barrier->mutex); + return 1; + } + else + { + pthread_cond_wait(&barrier->cond, &(barrier->mutex)); + pthread_mutex_unlock(&barrier->mutex); + return 0; + } +} + +#endif // defined(__APPLE__) && !defined(pthread_barrier_t) + +static pthread_barrier_t suspend_barrier; +static volatile int must_suspend = 0, suspended = 0; + +void +lace_suspend() +{ + if (suspended == 0) { + suspended = 1; + must_suspend = 1; + lace_barrier(); + must_suspend = 0; + } +} + +void +lace_resume() +{ + if (suspended == 1) { + suspended = 0; + pthread_barrier_wait(&suspend_barrier); + } +} + +/** + * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled. + * You can never disable the current worker or reduce the number of workers below 1. + */ +void +lace_disable_worker(int worker) +{ + int self = lace_get_worker()->worker; + if (worker == self) return; + if (workers_p[worker]->enabled == 1) { + workers_p[worker]->enabled = 0; + enabled_workers--; + } +} + +void +lace_enable_worker(int worker) +{ + int self = lace_get_worker()->worker; + if (worker == self) return; + if (workers_p[worker]->enabled == 0) { + workers_p[worker]->enabled = 1; + enabled_workers++; + } +} + +void +lace_set_workers(int workercount) +{ + if (workercount < 1) workercount = 1; + if (workercount > n_workers) workercount = n_workers; + enabled_workers = workercount; + int self = lace_get_worker()->worker; + if (self >= workercount) workercount--; + for (int i=0; ienabled = (i < workercount || i == self) ? 1 : 0; + } +} + +int +lace_enabled_workers() +{ + return enabled_workers; +} + +static inline uint32_t +rng(uint32_t *seed, int max) +{ + uint32_t next = *seed; + + next *= 1103515245; + next += 12345; + + *seed = next; + + return next % max; +} + +VOID_TASK_IMPL_0(lace_steal_random) +{ + Worker *victim = workers[(__lace_worker->worker + 1 + rng(&__lace_worker->seed, n_workers-1)) % n_workers]; + + YIELD_NEWFRAME(); + + PR_COUNTSTEALS(__lace_worker, CTR_steal_tries); + Worker *res = lace_steal(__lace_worker, __lace_dq_head, victim); + if (res == LACE_STOLEN) { + PR_COUNTSTEALS(__lace_worker, CTR_steals); + } else if (res == LACE_BUSY) { + PR_COUNTSTEALS(__lace_worker, CTR_steal_busy); + } +} + +VOID_TASK_IMPL_1(lace_steal_random_loop, int*, quit) +{ + while(!(*(volatile int*)quit)) { + lace_steal_random(); + + if (must_suspend) { + lace_barrier(); + do { + pthread_barrier_wait(&suspend_barrier); + } while (__lace_worker->enabled == 0); + } + } +} + +static lace_startup_cb main_cb; + +static void* +lace_main_wrapper(void *arg) +{ + lace_init_worker(0, 0); + WorkerP *self = lace_get_worker(); + +#if LACE_PIE_TIMES + self->time = gethrtime(); +#endif + + lace_time_event(self, 1); + main_cb(self, self->dq, arg); + lace_exit(); + pthread_cond_broadcast(&wait_until_done); + + return NULL; +} + +VOID_TASK_IMPL_1(lace_steal_loop, int*, quit) +{ + // Determine who I am + const int worker_id = __lace_worker->worker; + + // Prepare self, victim + Worker ** const self = &workers[worker_id]; + Worker **victim = self; + +#if LACE_PIE_TIMES + __lace_worker->time = gethrtime(); +#endif + + uint32_t seed = worker_id; + unsigned int n = n_workers; + int i=0; + + while(*(volatile int*)quit == 0) { + // Select victim + if( i>0 ) { + i--; + victim++; + if (victim == self) victim++; + if (victim >= workers + n) victim = workers; + if (victim == self) victim++; + } else { + i = rng(&seed, 40); // compute random i 0..40 + victim = workers + (rng(&seed, n-1) + worker_id + 1) % n; + } + + PR_COUNTSTEALS(__lace_worker, CTR_steal_tries); + Worker *res = lace_steal(__lace_worker, __lace_dq_head, *victim); + if (res == LACE_STOLEN) { + PR_COUNTSTEALS(__lace_worker, CTR_steals); + } else if (res == LACE_BUSY) { + PR_COUNTSTEALS(__lace_worker, CTR_steal_busy); + } + + YIELD_NEWFRAME(); + + if (must_suspend) { + lace_barrier(); + do { + pthread_barrier_wait(&suspend_barrier); + } while (__lace_worker->enabled == 0); + } + } +} + +static void* +lace_default_worker(void* arg) +{ + lace_init_worker((size_t)arg, 0); + WorkerP *__lace_worker = lace_get_worker(); + Task *__lace_dq_head = __lace_worker->dq; + lace_steal_loop(&lace_quits); + lace_time_event(__lace_worker, 9); + lace_barrier(); + return NULL; +} + +pthread_t +lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg) +{ + // Determine stack size + if (stacksize == 0) stacksize = default_stacksize; + + size_t pagesize = sysconf(_SC_PAGESIZE); + stacksize = (stacksize + pagesize - 1) & ~(pagesize - 1); // ceil(stacksize, pagesize) + +#if USE_HWLOC + // Get our logical processor + hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus); + + // Allocate memory for the program stack + lock_acquire(); + void *stack_location = hwloc_alloc_membind(topo, stacksize + pagesize, pu->cpuset, HWLOC_MEMBIND_BIND, 0); + lock_release(); + if (stack_location == 0) { + fprintf(stderr, "Lace error: Unable to allocate memory for the pthread stack!\n"); + exit(1); + } +#else + void *stack_location = mmap(NULL, stacksize + pagesize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + if (stack_location == MAP_FAILED) { + fprintf(stderr, "Lace error: Cannot allocate program stack: %s!\n", strerror(errno)); + exit(1); + } +#endif + + if (0 != mprotect(stack_location, pagesize, PROT_NONE)) { + fprintf(stderr, "Lace error: Unable to protect the allocated program stack with a guard page!\n"); + exit(1); + } + stack_location = (uint8_t *)stack_location + pagesize; // skip protected page. + if (0 != pthread_attr_setstack(&worker_attr, stack_location, stacksize)) { + fprintf(stderr, "Lace error: Unable to set the pthread stack in Lace!\n"); + exit(1); + } + + workers_init[worker].stack = stack_location; + workers_init[worker].stacksize = stacksize; + + if (fun == 0) { + fun = lace_default_worker; + arg = (void*)(size_t)worker; + } + + pthread_t res; + pthread_create(&res, &worker_attr, fun, arg); + return res; +} + +static int +get_cpu_count() +{ +#if USE_HWLOC + int count = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU); +#elif defined(sched_getaffinity) + /* Best solution: find actual available cpus */ + cpu_set_t cs; + CPU_ZERO(&cs); + sched_getaffinity(0, sizeof(cs), &cs); + int count = CPU_COUNT(&cs); +#elif defined(_SC_NPROCESSORS_ONLN) + /* Fallback */ + int count = sysconf(_SC_NPROCESSORS_ONLN); +#else + /* Okay... */ + int count = 1; +#endif + return count < 1 ? 1 : count; +} + +void +lace_set_verbosity(int level) +{ + verbosity = level; +} + +void +lace_init(int n, size_t dqsize) +{ +#if USE_HWLOC + hwloc_topology_init(&topo); + hwloc_topology_load(topo); + + n_nodes = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE); + n_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE); + n_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU); +#endif + + // Initialize globals + n_workers = n; + if (n_workers == 0) n_workers = get_cpu_count(); + enabled_workers = n_workers; + if (dqsize != 0) default_dqsize = dqsize; + lace_quits = 0; + + // Create barrier for all workers + lace_barrier_init(); + + // Create suspend barrier + pthread_barrier_init(&suspend_barrier, NULL, n_workers); + + // Allocate array with all workers + if (posix_memalign((void**)&workers, LINE_SIZE, n_workers*sizeof(Worker*)) != 0 || + posix_memalign((void**)&workers_p, LINE_SIZE, n_workers*sizeof(WorkerP*)) != 0) { + fprintf(stderr, "Lace error: unable to allocate memory!\n"); + exit(1); + } + + // Create pthread key +#ifndef __linux__ + pthread_key_create(&worker_key, NULL); +#endif + + // Prepare structures for thread creation + pthread_attr_init(&worker_attr); + + // Set contention scope to system (instead of process) + pthread_attr_setscope(&worker_attr, PTHREAD_SCOPE_SYSTEM); + + // Get default stack size + if (pthread_attr_getstacksize(&worker_attr, &default_stacksize) != 0) { + fprintf(stderr, "Lace warning: pthread_attr_getstacksize returned error!\n"); + default_stacksize = 1048576; // 1 megabyte default + } + + if (verbosity) { +#if USE_HWLOC + fprintf(stderr, "Initializing Lace, %u nodes, %u cores, %u logical processors, %d workers.\n", n_nodes, n_cores, n_pus, n_workers); +#else + fprintf(stderr, "Initializing Lace, %d workers.\n", n_workers); +#endif + } + + // Prepare lace_init structure + workers_init = (struct lace_worker_init*)calloc(1, sizeof(struct lace_worker_init) * n_workers); + + lace_newframe.t = NULL; + +#if LACE_PIE_TIMES + // Initialize counters for pie times + us_elapsed_start(); + count_at_start = gethrtime(); +#endif +} + +void +lace_startup(size_t stacksize, lace_startup_cb cb, void *arg) +{ + if (stacksize == 0) stacksize = default_stacksize; + + if (verbosity) { + if (cb != 0) { + fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers, stacksize); + } else if (n_workers == 1) { + fprintf(stderr, "Lace startup, creating 0 worker threads.\n"); + } else { + fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers-1, stacksize); + } + } + + /* Spawn workers */ + int i; + for (i=1; ictr[j] = 0; + } + } + +#if LACE_PIE_TIMES + for (i=0;itime = gethrtime(); + if (i != 0) workers_p[i]->level = 0; + } + + us_elapsed_start(); + count_at_start = gethrtime(); +#endif +#endif +} + +void +lace_count_report_file(FILE *file) +{ +#if LACE_COUNT_EVENTS + int i; + size_t j; + + for (j=0;jctr; + for (j=0;jctr[CTR_tasks]); + } + fprintf(file, "Tasks (sum): %zu\n", ctr_all[CTR_tasks]); + fprintf(file, "\n"); +#endif + +#if LACE_COUNT_STEALS + for (i=0;ictr[CTR_steals], workers_p[i]->ctr[CTR_steal_busy], + workers_p[i]->ctr[CTR_steal_tries], workers_p[i]->ctr[CTR_leaps], + workers_p[i]->ctr[CTR_leap_busy], workers_p[i]->ctr[CTR_leap_tries]); + } + fprintf(file, "Steals (sum): %zu good/%zu busy of %zu tries; leaps: %zu good/%zu busy of %zu tries\n", + ctr_all[CTR_steals], ctr_all[CTR_steal_busy], + ctr_all[CTR_steal_tries], ctr_all[CTR_leaps], + ctr_all[CTR_leap_busy], ctr_all[CTR_leap_tries]); + fprintf(file, "\n"); +#endif + +#if LACE_COUNT_STEALS && LACE_COUNT_TASKS + for (i=0;ictr[CTR_tasks]/(workers_p[i]->ctr[CTR_steals]+workers_p[i]->ctr[CTR_leaps])); + } + fprintf(file, "Tasks per steal (sum): %zu\n", ctr_all[CTR_tasks]/(ctr_all[CTR_steals]+ctr_all[CTR_leaps])); + fprintf(file, "\n"); +#endif + +#if LACE_COUNT_SPLITS + for (i=0;ictr[CTR_split_shrink], workers_p[i]->ctr[CTR_split_grow], workers_p[i]->ctr[CTR_split_req]); + } + fprintf(file, "Splits (sum): %zu shrinks, %zu grows, %zu outgoing requests\n", + ctr_all[CTR_split_shrink], ctr_all[CTR_split_grow], ctr_all[CTR_split_req]); + fprintf(file, "\n"); +#endif + +#if LACE_PIE_TIMES + count_at_end = gethrtime(); + + uint64_t count_per_ms = (count_at_end - count_at_start) / (us_elapsed() / 1000); + double dcpm = (double)count_per_ms; + + uint64_t sum_count; + sum_count = ctr_all[CTR_init] + ctr_all[CTR_wapp] + ctr_all[CTR_lapp] + ctr_all[CTR_wsteal] + ctr_all[CTR_lsteal] + + ctr_all[CTR_close] + ctr_all[CTR_wstealsucc] + ctr_all[CTR_lstealsucc] + ctr_all[CTR_wsignal] + + ctr_all[CTR_lsignal]; + + fprintf(file, "Measured clock (tick) frequency: %.2f GHz\n", count_per_ms / 1000000.0); + fprintf(file, "Aggregated time per pie slice, total time: %.2f CPU seconds\n\n", sum_count / (1000*dcpm)); + + for (i=0;ictr[CTR_init] / dcpm); + fprintf(file, "Steal work (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_wapp] / dcpm); + fprintf(file, "Leap work (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_lapp] / dcpm); + fprintf(file, "Steal overhead (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_wstealsucc]+workers_p[i]->ctr[CTR_wsignal]) / dcpm); + fprintf(file, "Leap overhead (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_lstealsucc]+workers_p[i]->ctr[CTR_lsignal]) / dcpm); + fprintf(file, "Steal search (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_wsteal]-workers_p[i]->ctr[CTR_wstealsucc]-workers_p[i]->ctr[CTR_wsignal]) / dcpm); + fprintf(file, "Leap search (%d): %10.2f ms\n", i, (workers_p[i]->ctr[CTR_lsteal]-workers_p[i]->ctr[CTR_lstealsucc]-workers_p[i]->ctr[CTR_lsignal]) / dcpm); + fprintf(file, "Exit time (%d): %10.2f ms\n", i, workers_p[i]->ctr[CTR_close] / dcpm); + fprintf(file, "\n"); + } + + fprintf(file, "Startup time (sum): %10.2f ms\n", ctr_all[CTR_init] / dcpm); + fprintf(file, "Steal work (sum): %10.2f ms\n", ctr_all[CTR_wapp] / dcpm); + fprintf(file, "Leap work (sum): %10.2f ms\n", ctr_all[CTR_lapp] / dcpm); + fprintf(file, "Steal overhead (sum): %10.2f ms\n", (ctr_all[CTR_wstealsucc]+ctr_all[CTR_wsignal]) / dcpm); + fprintf(file, "Leap overhead (sum): %10.2f ms\n", (ctr_all[CTR_lstealsucc]+ctr_all[CTR_lsignal]) / dcpm); + fprintf(file, "Steal search (sum): %10.2f ms\n", (ctr_all[CTR_wsteal]-ctr_all[CTR_wstealsucc]-ctr_all[CTR_wsignal]) / dcpm); + fprintf(file, "Leap search (sum): %10.2f ms\n", (ctr_all[CTR_lsteal]-ctr_all[CTR_lstealsucc]-ctr_all[CTR_lsignal]) / dcpm); + fprintf(file, "Exit time (sum): %10.2f ms\n", ctr_all[CTR_close] / dcpm); + fprintf(file, "\n" ); +#endif +#endif + return; + (void)file; +} + +void lace_exit() +{ + lace_time_event(lace_get_worker(), 2); + + // first suspend all other threads + lace_suspend(); + + // now enable all threads and tell them to quit + lace_set_workers(n_workers); + lace_quits = 1; + + // now resume all threads and wait until they all pass the barrier + lace_resume(); + lace_barrier(); + + // finally, destroy the barriers + lace_barrier_destroy(); + pthread_barrier_destroy(&suspend_barrier); + +#if LACE_COUNT_EVENTS + lace_count_report_file(stderr); +#endif +} + +void +lace_exec_in_new_frame(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root) +{ + TailSplit old; + uint8_t old_as; + + // save old tail, split, allstolen and initiate new frame + { + Worker *wt = __lace_worker->_public; + + old_as = wt->allstolen; + wt->allstolen = 1; + old.ts.split = wt->ts.ts.split; + wt->ts.ts.split = 0; + mfence(); + old.ts.tail = wt->ts.ts.tail; + + TailSplit ts_new; + ts_new.ts.tail = __lace_dq_head - __lace_worker->dq; + ts_new.ts.split = __lace_dq_head - __lace_worker->dq; + wt->ts.v = ts_new.v; + + __lace_worker->split = __lace_dq_head; + __lace_worker->allstolen = 1; + } + + // wait until all workers are ready + lace_barrier(); + + // execute task + root->f(__lace_worker, __lace_dq_head, root); + compiler_barrier(); + + // wait until all workers are back (else they may steal from previous frame) + lace_barrier(); + + // restore tail, split, allstolen + { + Worker *wt = __lace_worker->_public; + wt->allstolen = old_as; + wt->ts.v = old.v; + __lace_worker->split = __lace_worker->dq + old.ts.split; + __lace_worker->allstolen = old_as; + } +} + +VOID_TASK_IMPL_2(lace_steal_loop_root, Task*, t, int*, done) +{ + t->f(__lace_worker, __lace_dq_head, t); + *done = 1; +} + +VOID_TASK_2(lace_together_helper, Task*, t, volatile int*, finished) +{ + t->f(__lace_worker, __lace_dq_head, t); + + for (;;) { + int f = *finished; + if (cas(finished, f, f-1)) break; + } + + while (*finished != 0) STEAL_RANDOM(); +} + +static void +lace_sync_and_exec(WorkerP *__lace_worker, Task *__lace_dq_head, Task *root) +{ + // wait until other workers have made a local copy + lace_barrier(); + + // one worker sets t to 0 again + if (LACE_WORKER_ID == 0) lace_newframe.t = 0; + // else while (*(volatile Task**)&lace_newframe.t != 0) {} + + // the above line is commented out since lace_exec_in_new_frame includes + // a lace_barrier before the task is executed + + lace_exec_in_new_frame(__lace_worker, __lace_dq_head, root); +} + +void +lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head) +{ + // make a local copy of the task + Task _t; + memcpy(&_t, lace_newframe.t, sizeof(Task)); + + // wait until all workers have made a local copy + lace_barrier(); + + // one worker sets t to 0 again + if (LACE_WORKER_ID == 0) lace_newframe.t = 0; + // else while (*(volatile Task**)&lace_newframe.t != 0) {} + + // the above line is commented out since lace_exec_in_new_frame includes + // a lace_barrier before the task is executed + + lace_exec_in_new_frame(__lace_worker, __lace_dq_head, &_t); +} + +void +lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t) +{ + /* synchronization integer */ + int done = n_workers; + + /* wrap task in lace_together_helper */ + Task _t2; + TD_lace_together_helper *t2 = (TD_lace_together_helper *)&_t2; + t2->f = lace_together_helper_WRAP; + t2->thief = THIEF_TASK; + t2->d.args.arg_1 = t; + t2->d.args.arg_2 = &done; + + while (!cas(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head); + lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2); +} + +void +lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t) +{ + /* synchronization integer */ + int done = 0; + + /* wrap task in lace_steal_loop_root */ + Task _t2; + TD_lace_steal_loop_root *t2 = (TD_lace_steal_loop_root *)&_t2; + t2->f = lace_steal_loop_root_WRAP; + t2->thief = THIEF_TASK; + t2->d.args.arg_1 = t; + t2->d.args.arg_2 = &done; + + /* and create the lace_steal_loop task for other workers */ + Task _s; + TD_lace_steal_loop *s = (TD_lace_steal_loop *)&_s; + s->f = &lace_steal_loop_WRAP; + s->thief = THIEF_TASK; + s->d.args.arg_1 = &done; + + compiler_barrier(); + + while (!cas(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head); + lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2); +} diff --git a/src/lace.h b/src/lace.h new file mode 100644 index 000000000..8d6d1b645 --- /dev/null +++ b/src/lace.h @@ -0,0 +1,2743 @@ +/* + * Copyright 2013-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include /* for pthread_t */ + +#ifndef __LACE_H__ +#define __LACE_H__ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Some flags */ + +#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */ +#define LACE_DEBUG_PROGRAMSTACK 0 +#endif + +#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */ +#define LACE_LEAP_RANDOM 1 +#endif + +#ifndef LACE_PIE_TIMES /* Record time spent stealing and leapfrogging */ +#define LACE_PIE_TIMES 0 +#endif + +#ifndef LACE_COUNT_TASKS /* Count number of tasks executed */ +#define LACE_COUNT_TASKS 0 +#endif + +#ifndef LACE_COUNT_STEALS /* Count number of steals performed */ +#define LACE_COUNT_STEALS 0 +#endif + +#ifndef LACE_COUNT_SPLITS /* Count number of times the split point is moved */ +#define LACE_COUNT_SPLITS 0 +#endif + +#ifndef LACE_COUNT_EVENTS +#define LACE_COUNT_EVENTS (LACE_PIE_TIMES || LACE_COUNT_TASKS || LACE_COUNT_STEALS || LACE_COUNT_SPLITS) +#endif + +/* Typical cacheline size of system architectures */ +#ifndef LINE_SIZE +#define LINE_SIZE 64 +#endif + +/* The size of a pointer, 8 bytes on a 64-bit architecture */ +#define P_SZ (sizeof(void *)) + +#define PAD(x,b) ( ( (b) - ((x)%(b)) ) & ((b)-1) ) /* b must be power of 2 */ +#define ROUND(x,b) ( (x) + PAD( (x), (b) ) ) + +/* The size is in bytes. Note that this is without the extra overhead from Lace. + The value must be greater than or equal to the maximum size of your tasks. + The task size is the maximum of the size of the result or of the sum of the parameter sizes. */ +#ifndef LACE_TASKSIZE +#define LACE_TASKSIZE (6)*P_SZ +#endif + +/* Some fences */ +#ifndef compiler_barrier +#define compiler_barrier() { asm volatile("" ::: "memory"); } +#endif + +#ifndef mfence +#define mfence() { asm volatile("mfence" ::: "memory"); } +#endif + +/* Compiler specific branch prediction optimization */ +#ifndef likely +#define likely(x) __builtin_expect((x),1) +#endif + +#ifndef unlikely +#define unlikely(x) __builtin_expect((x),0) +#endif + +#if LACE_PIE_TIMES +/* High resolution timer */ +static inline uint64_t gethrtime() +{ + uint32_t hi, lo; + asm volatile ("rdtsc" : "=a"(lo), "=d"(hi) :: "memory"); + return (uint64_t)hi<<32 | lo; +} +#endif + +#if LACE_COUNT_EVENTS +void lace_count_reset(); +void lace_count_report_file(FILE *file); +#endif + +#if LACE_COUNT_TASKS +#define PR_COUNTTASK(s) PR_INC(s,CTR_tasks) +#else +#define PR_COUNTTASK(s) /* Empty */ +#endif + +#if LACE_COUNT_STEALS +#define PR_COUNTSTEALS(s,i) PR_INC(s,i) +#else +#define PR_COUNTSTEALS(s,i) /* Empty */ +#endif + +#if LACE_COUNT_SPLITS +#define PR_COUNTSPLITS(s,i) PR_INC(s,i) +#else +#define PR_COUNTSPLITS(s,i) /* Empty */ +#endif + +#if LACE_COUNT_EVENTS +#define PR_ADD(s,i,k) ( ((s)->ctr[i])+=k ) +#else +#define PR_ADD(s,i,k) /* Empty */ +#endif +#define PR_INC(s,i) PR_ADD(s,i,1) + +typedef enum { +#ifdef LACE_COUNT_TASKS + CTR_tasks, /* Number of tasks spawned */ +#endif +#ifdef LACE_COUNT_STEALS + CTR_steal_tries, /* Number of steal attempts */ + CTR_leap_tries, /* Number of leap attempts */ + CTR_steals, /* Number of succesful steals */ + CTR_leaps, /* Number of succesful leaps */ + CTR_steal_busy, /* Number of steal busies */ + CTR_leap_busy, /* Number of leap busies */ +#endif +#ifdef LACE_COUNT_SPLITS + CTR_split_grow, /* Number of split right */ + CTR_split_shrink,/* Number of split left */ + CTR_split_req, /* Number of split requests */ +#endif + CTR_fast_sync, /* Number of fast syncs */ + CTR_slow_sync, /* Number of slow syncs */ +#ifdef LACE_PIE_TIMES + CTR_init, /* Timer for initialization */ + CTR_close, /* Timer for shutdown */ + CTR_wapp, /* Timer for application code (steal) */ + CTR_lapp, /* Timer for application code (leap) */ + CTR_wsteal, /* Timer for steal code (steal) */ + CTR_lsteal, /* Timer for steal code (leap) */ + CTR_wstealsucc, /* Timer for succesful steal code (steal) */ + CTR_lstealsucc, /* Timer for succesful steal code (leap) */ + CTR_wsignal, /* Timer for signal after work (steal) */ + CTR_lsignal, /* Timer for signal after work (leap) */ +#endif + CTR_MAX +} CTR_index; + +struct _WorkerP; +struct _Worker; +struct _Task; + +#define THIEF_EMPTY ((struct _Worker*)0x0) +#define THIEF_TASK ((struct _Worker*)0x1) +#define THIEF_COMPLETED ((struct _Worker*)0x2) + +#define TASK_COMMON_FIELDS(type) \ + void (*f)(struct _WorkerP *, struct _Task *, struct type *); \ + struct _Worker * volatile thief; + +struct __lace_common_fields_only { TASK_COMMON_FIELDS(_Task) }; +#define LACE_COMMON_FIELD_SIZE sizeof(struct __lace_common_fields_only) + +typedef struct _Task { + TASK_COMMON_FIELDS(_Task); + char p1[PAD(LACE_COMMON_FIELD_SIZE, P_SZ)]; + char d[LACE_TASKSIZE]; + char p2[PAD(ROUND(LACE_COMMON_FIELD_SIZE, P_SZ) + LACE_TASKSIZE, LINE_SIZE)]; +} Task; + +typedef union __attribute__((packed)) { + struct { + uint32_t tail; + uint32_t split; + } ts; + uint64_t v; +} TailSplit; + +typedef struct _Worker { + Task *dq; + TailSplit ts; + uint8_t allstolen; + + char pad1[PAD(P_SZ+sizeof(TailSplit)+1, LINE_SIZE)]; + + uint8_t movesplit; +} Worker; + +typedef struct _WorkerP { + Task *dq; // same as dq + Task *split; // same as dq+ts.ts.split + Task *end; // dq+dq_size + Worker *_public; // pointer to public Worker struct + size_t stack_trigger; // for stack overflow detection + int16_t worker; // what is my worker id? + int16_t pu; // my pu (for HWLOC) + uint8_t allstolen; // my allstolen + volatile int8_t enabled; // if this worker is enabled + +#if LACE_COUNT_EVENTS + uint64_t ctr[CTR_MAX]; // counters + volatile uint64_t time; + volatile int level; +#endif + + uint32_t seed; // my random seed (for lace_steal_random) +} WorkerP; + +#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__); +LACE_TYPEDEF_CB(void, lace_startup_cb, void*); + +/** + * Set verbosity level (0 = no startup messages, 1 = startup messages) + * Default level: 0 + */ +void lace_set_verbosity(int level); + +/** + * Initialize master structures for Lace with workers + * and default deque size of . + * Does not create new threads. + * Tries to detect number of cpus, if n_workers equals 0. + */ +void lace_init(int n_workers, size_t dqsize); + +/** + * After lace_init, start all worker threads. + * If cb,arg are set, suspend this thread, call cb(arg) in a new thread + * and exit Lace upon return + * Otherwise, the current thread is initialized as a Lace thread. + */ +void lace_startup(size_t stacksize, lace_startup_cb, void* arg); + +/** + * Initialize current thread as worker and allocate a deque with size . + * Use this when manually creating worker threads. + */ +void lace_init_worker(int idx, size_t dqsize); + +/** + * Manually spawn worker with (optional) program stack size . + * If fun,arg are set, overrides default startup method. + * Typically: for workers 1...(n_workers-1): lace_spawn_worker(i, stack_size, 0, 0); + */ +pthread_t lace_spawn_worker(int idx, size_t stacksize, void *(*fun)(void*), void* arg); + +/** + * Steal a random task. + */ +#define lace_steal_random() CALL(lace_steal_random) +void lace_steal_random_CALL(WorkerP*, Task*); + +/** + * Steal random tasks until parameter *quit is set + * Note: task declarations at end; quit is of type int* + */ +#define lace_steal_random_loop(quit) CALL(lace_steal_random_loop, quit) +#define lace_steal_loop(quit) CALL(lace_steal_loop, quit) + +/** + * Barrier (all workers must enter it before progressing) + */ +void lace_barrier(); + +/** + * Suspend and resume all other workers. + * May only be used when all other workers are idle. + */ +void lace_suspend(); +void lace_resume(); + +/** + * When all tasks are suspended, workers can be temporarily disabled. + * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled. + * You can never disable the current worker or reduce the number of workers below 1. + * You cannot add workers. + */ +void lace_disable_worker(int worker); +void lace_enable_worker(int worker); +void lace_set_workers(int workercount); +int lace_enabled_workers(); + +/** + * Retrieve number of Lace workers + */ +size_t lace_workers(); + +/** + * Retrieve default program stack size + */ +size_t lace_default_stacksize(); + +/** + * Retrieve current worker. + */ +WorkerP *lace_get_worker(); + +/** + * Retrieve the current head of the deque + */ +Task *lace_get_head(WorkerP *); + +/** + * Exit Lace. Automatically called when started with cb,arg. + */ +void lace_exit(); + +#define LACE_STOLEN ((Worker*)0) +#define LACE_BUSY ((Worker*)1) +#define LACE_NOWORK ((Worker*)2) + +#define TASK(f) ( f##_CALL ) +#define WRAP(f, ...) ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) ) +#define SYNC(f) ( __lace_dq_head--, WRAP(f##_SYNC) ) +#define DROP() ( __lace_dq_head--, WRAP(lace_drop) ) +#define SPAWN(f, ...) ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ ) +#define CALL(f, ...) ( WRAP(f##_CALL, ##__VA_ARGS__) ) +#define TOGETHER(f, ...) ( WRAP(f##_TOGETHER, ##__VA_ARGS__) ) +#define NEWFRAME(f, ...) ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) ) +#define STEAL_RANDOM() ( CALL(lace_steal_random) ) +#define LACE_WORKER_ID ( __lace_worker->worker ) +#define LACE_WORKER_PU ( __lace_worker->pu ) + +/* Use LACE_ME to initialize Lace variables, in case you want to call multiple Lace tasks */ +#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker); + +#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1) +#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2) +#define TASK_RESULT(t) (&t->d[0]) + +#if LACE_DEBUG_PROGRAMSTACK +static inline void CHECKSTACK(WorkerP *w) +{ + if (w->stack_trigger != 0) { + register size_t rsp; + asm volatile("movq %%rsp, %0" : "+r"(rsp) : : "cc"); + if (rsp < w->stack_trigger) { + fputs("Warning: program stack 95% used!\n", stderr); + w->stack_trigger = 0; + } + } +} +#else +#define CHECKSTACK(w) {} +#endif + +typedef struct +{ + Task *t; + uint8_t all; + char pad[64-sizeof(Task *)-sizeof(uint8_t)]; +} lace_newframe_t; + +extern lace_newframe_t lace_newframe; + +/** + * Internal function to start participating on a task in a new frame + * Usually, is set to NULL and the task is copied from lace_newframe.t + * It is possible to override the start task by setting . + */ +void lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task); +void lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task); + +void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head); +#define YIELD_NEWFRAME() { if (unlikely((*(volatile Task**)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); } + +#if LACE_PIE_TIMES +static void lace_time_event( WorkerP *w, int event ) +{ + uint64_t now = gethrtime(), + prev = w->time; + + switch( event ) { + + // Enter application code + case 1 : + if( w->level /* level */ == 0 ) { + PR_ADD( w, CTR_init, now - prev ); + w->level = 1; + } else if( w->level /* level */ == 1 ) { + PR_ADD( w, CTR_wsteal, now - prev ); + PR_ADD( w, CTR_wstealsucc, now - prev ); + } else { + PR_ADD( w, CTR_lsteal, now - prev ); + PR_ADD( w, CTR_lstealsucc, now - prev ); + } + break; + + // Exit application code + case 2 : + if( w->level /* level */ == 1 ) { + PR_ADD( w, CTR_wapp, now - prev ); + } else { + PR_ADD( w, CTR_lapp, now - prev ); + } + break; + + // Enter sync on stolen + case 3 : + if( w->level /* level */ == 1 ) { + PR_ADD( w, CTR_wapp, now - prev ); + } else { + PR_ADD( w, CTR_lapp, now - prev ); + } + w->level++; + break; + + // Exit sync on stolen + case 4 : + if( w->level /* level */ == 1 ) { + fprintf( stderr, "This should not happen, level = %d\n", w->level ); + } else { + PR_ADD( w, CTR_lsteal, now - prev ); + } + w->level--; + break; + + // Return from failed steal + case 7 : + if( w->level /* level */ == 0 ) { + PR_ADD( w, CTR_init, now - prev ); + } else if( w->level /* level */ == 1 ) { + PR_ADD( w, CTR_wsteal, now - prev ); + } else { + PR_ADD( w, CTR_lsteal, now - prev ); + } + break; + + // Signalling time + case 8 : + if( w->level /* level */ == 1 ) { + PR_ADD( w, CTR_wsignal, now - prev ); + PR_ADD( w, CTR_wsteal, now - prev ); + } else { + PR_ADD( w, CTR_lsignal, now - prev ); + PR_ADD( w, CTR_lsteal, now - prev ); + } + break; + + // Done + case 9 : + if( w->level /* level */ == 0 ) { + PR_ADD( w, CTR_init, now - prev ); + } else { + PR_ADD( w, CTR_close, now - prev ); + } + break; + + default: return; + } + + w->time = now; +} +#else +#define lace_time_event( w, e ) /* Empty */ +#endif + +static Worker* __attribute__((noinline)) +lace_steal(WorkerP *self, Task *__dq_head, Worker *victim) +{ + if (!victim->allstolen) { + /* Must be a volatile. In GCC 4.8, if it is not declared volatile, the + compiler will optimize extra memory accesses to victim->ts instead + of comparing the local values ts.ts.tail and ts.ts.split, causing + thieves to steal non existent tasks! */ + register TailSplit ts; + ts.v = *(volatile uint64_t *)&victim->ts.v; + if (ts.ts.tail < ts.ts.split) { + register TailSplit ts_new; + ts_new.v = ts.v; + ts_new.ts.tail++; + if (__sync_bool_compare_and_swap(&victim->ts.v, ts.v, ts_new.v)) { + // Stolen + Task *t = &victim->dq[ts.ts.tail]; + t->thief = self->_public; + lace_time_event(self, 1); + t->f(self, __dq_head, t); + lace_time_event(self, 2); + t->thief = THIEF_COMPLETED; + lace_time_event(self, 8); + return LACE_STOLEN; + } + + lace_time_event(self, 7); + return LACE_BUSY; + } + + if (victim->movesplit == 0) { + victim->movesplit = 1; + PR_COUNTSPLITS(self, CTR_split_req); + } + } + + lace_time_event(self, 7); + return LACE_NOWORK; +} + +static int +lace_shrink_shared(WorkerP *w) +{ + Worker *wt = w->_public; + TailSplit ts; + ts.v = wt->ts.v; /* Force in 1 memory read */ + uint32_t tail = ts.ts.tail; + uint32_t split = ts.ts.split; + + if (tail != split) { + uint32_t newsplit = (tail + split)/2; + wt->ts.ts.split = newsplit; + mfence(); + tail = *(volatile uint32_t *)&(wt->ts.ts.tail); + if (tail != split) { + if (unlikely(tail > newsplit)) { + newsplit = (tail + split) / 2; + wt->ts.ts.split = newsplit; + } + w->split = w->dq + newsplit; + PR_COUNTSPLITS(w, CTR_split_shrink); + return 0; + } + } + + wt->allstolen = 1; + w->allstolen = 1; + return 1; +} + +static inline void +lace_leapfrog(WorkerP *__lace_worker, Task *__lace_dq_head) +{ + lace_time_event(__lace_worker, 3); + Task *t = __lace_dq_head; + Worker *thief = t->thief; + if (thief != THIEF_COMPLETED) { + while ((size_t)thief <= 1) thief = t->thief; + + /* PRE-LEAP: increase head again */ + __lace_dq_head += 1; + + /* Now leapfrog */ + int attempts = 32; + while (thief != THIEF_COMPLETED) { + PR_COUNTSTEALS(__lace_worker, CTR_leap_tries); + Worker *res = lace_steal(__lace_worker, __lace_dq_head, thief); + if (res == LACE_NOWORK) { + YIELD_NEWFRAME(); + if ((LACE_LEAP_RANDOM) && (--attempts == 0)) { lace_steal_random(); attempts = 32; } + } else if (res == LACE_STOLEN) { + PR_COUNTSTEALS(__lace_worker, CTR_leaps); + } else if (res == LACE_BUSY) { + PR_COUNTSTEALS(__lace_worker, CTR_leap_busy); + } + compiler_barrier(); + thief = t->thief; + } + + /* POST-LEAP: really pop the finished task */ + /* no need to decrease __lace_dq_head, since it is a local variable */ + compiler_barrier(); + if (__lace_worker->allstolen == 0) { + /* Assume: tail = split = head (pre-pop) */ + /* Now we do a real pop ergo either decrease tail,split,head or declare allstolen */ + Worker *wt = __lace_worker->_public; + wt->allstolen = 1; + __lace_worker->allstolen = 1; + } + } + + compiler_barrier(); + t->thief = THIEF_EMPTY; + lace_time_event(__lace_worker, 4); +} + +static __attribute__((noinline)) +void lace_drop_slow(WorkerP *w, Task *__dq_head) +{ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) lace_leapfrog(w, __dq_head); +} + +static inline __attribute__((unused)) +void lace_drop(WorkerP *w, Task *__dq_head) +{ + if (likely(0 == w->_public->movesplit)) { + if (likely(w->split <= __dq_head)) { + return; + } + } + lace_drop_slow(w, __dq_head); +} + + + +// Task macros for tasks of arity 0 + +#define TASK_DECL_0(RTYPE, NAME) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { RTYPE res; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * ); \ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head ) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head ) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head ) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head ); \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head ); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_0(RTYPE, NAME) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head ); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head ); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head ) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head ); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) )\ + +#define TASK_0(RTYPE, NAME) TASK_DECL_0(RTYPE, NAME) TASK_IMPL_0(RTYPE, NAME) + +#define VOID_TASK_DECL_0(NAME) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * ); \ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head ) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head ) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head ) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head ); \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head ); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_0(NAME) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head ); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head ); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head ) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head ); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) )\ + +#define VOID_TASK_0(NAME) VOID_TASK_DECL_0(NAME) VOID_TASK_IMPL_0(NAME) + + +// Task macros for tasks of arity 1 + +#define TASK_DECL_1(RTYPE, NAME, ATYPE_1) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; } args; RTYPE res; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_1(RTYPE, NAME, ATYPE_1, ARG_1) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1)\ + +#define TASK_1(RTYPE, NAME, ATYPE_1, ARG_1) TASK_DECL_1(RTYPE, NAME, ATYPE_1) TASK_IMPL_1(RTYPE, NAME, ATYPE_1, ARG_1) + +#define VOID_TASK_DECL_1(NAME, ATYPE_1) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; } args; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1); \ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_1(NAME, ATYPE_1, ARG_1) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1)\ + +#define VOID_TASK_1(NAME, ATYPE_1, ARG_1) VOID_TASK_DECL_1(NAME, ATYPE_1) VOID_TASK_IMPL_1(NAME, ATYPE_1, ARG_1) + + +// Task macros for tasks of arity 2 + +#define TASK_DECL_2(RTYPE, NAME, ATYPE_1, ATYPE_2) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; } args; RTYPE res; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2)\ + +#define TASK_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) TASK_DECL_2(RTYPE, NAME, ATYPE_1, ATYPE_2) TASK_IMPL_2(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) + +#define VOID_TASK_DECL_2(NAME, ATYPE_1, ATYPE_2) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; } args; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2); \ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2); \ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2) \ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2)\ + +#define VOID_TASK_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) VOID_TASK_DECL_2(NAME, ATYPE_1, ATYPE_2) VOID_TASK_IMPL_2(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2) + + +// Task macros for tasks of arity 3 + +#define TASK_DECL_3(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; } args; RTYPE res; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3)\ + +#define TASK_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) TASK_DECL_3(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3) TASK_IMPL_3(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) + +#define VOID_TASK_DECL_3(NAME, ATYPE_1, ATYPE_2, ATYPE_3) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; } args; } d; \ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3); \ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; \ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) \ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3)\ + +#define VOID_TASK_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) VOID_TASK_DECL_3(NAME, ATYPE_1, ATYPE_2, ATYPE_3) VOID_TASK_IMPL_3(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3) + + +// Task macros for tasks of arity 4 + +#define TASK_DECL_4(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; } args; RTYPE res; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4)\ + +#define TASK_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) TASK_DECL_4(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) TASK_IMPL_4(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) + +#define VOID_TASK_DECL_4(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; } args; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4);\ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4);\ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4)\ + +#define VOID_TASK_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) VOID_TASK_DECL_4(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4) VOID_TASK_IMPL_4(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4) + + +// Task macros for tasks of arity 5 + +#define TASK_DECL_5(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; } args; RTYPE res; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5)\ + +#define TASK_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) TASK_DECL_5(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) TASK_IMPL_5(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) + +#define VOID_TASK_DECL_5(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; } args; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5);\ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5);\ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5)\ + +#define VOID_TASK_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) VOID_TASK_DECL_5(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5) VOID_TASK_IMPL_5(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5) + + +// Task macros for tasks of arity 6 + +#define TASK_DECL_6(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6)\ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; ATYPE_6 arg_6; } args; RTYPE res; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +RTYPE NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\ +static inline RTYPE NAME##_SYNC(WorkerP *, Task *); \ +static RTYPE NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ((TD_##NAME *)t)->d.res; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +RTYPE NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ((TD_##NAME *)t)->d.res; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ +} \ + \ +static inline __attribute__((unused)) \ +RTYPE NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define TASK_IMPL_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + t->d.res = NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +RTYPE NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5, arg_6); \ +} \ + \ +static inline __attribute__((always_inline)) \ +RTYPE NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5, ATYPE_6 ARG_6)\ + +#define TASK_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) TASK_DECL_6(RTYPE, NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) TASK_IMPL_6(RTYPE, NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) + +#define VOID_TASK_DECL_6(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) \ + \ +typedef struct _TD_##NAME { \ + TASK_COMMON_FIELDS(_TD_##NAME) \ + union { struct { ATYPE_1 arg_1; ATYPE_2 arg_2; ATYPE_3 arg_3; ATYPE_4 arg_4; ATYPE_5 arg_5; ATYPE_6 arg_6; } args; } d;\ +} TD_##NAME; \ + \ +/* If this line generates an error, please manually set the define LACE_TASKSIZE to a higher value */\ +typedef char assertion_failed_task_descriptor_out_of_bounds_##NAME[(sizeof(TD_##NAME)<=sizeof(Task)) ? 0 : -1];\ + \ +void NAME##_WRAP(WorkerP *, Task *, TD_##NAME *); \ +void NAME##_CALL(WorkerP *, Task * , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6);\ +static inline void NAME##_SYNC(WorkerP *, Task *); \ +static void NAME##_SYNC_SLOW(WorkerP *, Task *); \ + \ +static inline __attribute__((unused)) \ +void NAME##_SPAWN(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + PR_COUNTTASK(w); \ + \ + TD_##NAME *t; \ + TailSplit ts; \ + uint32_t head, split, newsplit; \ + \ + /* assert(__dq_head < w->end); */ /* Assuming to be true */ \ + \ + t = (TD_##NAME *)__dq_head; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (unlikely(w->allstolen)) { \ + if (wt->movesplit) wt->movesplit = 0; \ + head = __dq_head - w->dq; \ + ts = (TailSplit){{head,head+1}}; \ + wt->ts.v = ts.v; \ + compiler_barrier(); \ + wt->allstolen = 0; \ + w->split = __dq_head+1; \ + w->allstolen = 0; \ + } else if (unlikely(wt->movesplit)) { \ + head = __dq_head - w->dq; \ + split = w->split - w->dq; \ + newsplit = (split + head + 2)/2; \ + wt->ts.ts.split = newsplit; \ + w->split = w->dq + newsplit; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_NEWFRAME(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + \ + lace_do_newframe(w, __dq_head, &_t); \ + return ; \ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_TOGETHER(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + Task _t; \ + TD_##NAME *t = (TD_##NAME *)&_t; \ + t->f = &NAME##_WRAP; \ + t->thief = THIEF_TASK; \ + t->d.args.arg_1 = arg_1; t->d.args.arg_2 = arg_2; t->d.args.arg_3 = arg_3; t->d.args.arg_4 = arg_4; t->d.args.arg_5 = arg_5; t->d.args.arg_6 = arg_6;\ + \ + lace_do_together(w, __dq_head, &_t); \ +} \ + \ +static __attribute__((noinline)) \ +void NAME##_SYNC_SLOW(WorkerP *w, Task *__dq_head) \ +{ \ + TD_##NAME *t; \ + \ + if ((w->allstolen) || (w->split > __dq_head && lace_shrink_shared(w))) { \ + lace_leapfrog(w, __dq_head); \ + t = (TD_##NAME *)__dq_head; \ + return ; \ + } \ + \ + compiler_barrier(); \ + \ + Worker *wt = w->_public; \ + if (wt->movesplit) { \ + Task *t = w->split; \ + size_t diff = __dq_head - t; \ + diff = (diff + 1) / 2; \ + w->split = t + diff; \ + wt->ts.ts.split += diff; \ + compiler_barrier(); \ + wt->movesplit = 0; \ + PR_COUNTSPLITS(w, CTR_split_grow); \ + } \ + \ + compiler_barrier(); \ + \ + t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ +} \ + \ +static inline __attribute__((unused)) \ +void NAME##_SYNC(WorkerP *w, Task *__dq_head) \ +{ \ + /* assert (__dq_head > 0); */ /* Commented out because we assume contract */ \ + \ + if (likely(0 == w->_public->movesplit)) { \ + if (likely(w->split <= __dq_head)) { \ + TD_##NAME *t = (TD_##NAME *)__dq_head; \ + t->thief = THIEF_EMPTY; \ + return NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ + } \ + } \ + \ + return NAME##_SYNC_SLOW(w, __dq_head); \ +} \ + \ + \ + +#define VOID_TASK_IMPL_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6)\ +void NAME##_WRAP(WorkerP *w, Task *__dq_head, TD_##NAME *t __attribute__((unused))) \ +{ \ + NAME##_CALL(w, __dq_head , t->d.args.arg_1, t->d.args.arg_2, t->d.args.arg_3, t->d.args.arg_4, t->d.args.arg_5, t->d.args.arg_6);\ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker, Task *__lace_dq_head , ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6);\ + \ +/* NAME##_WORK is inlined in NAME##_CALL and the parameter __lace_in_task will disappear */\ +void NAME##_CALL(WorkerP *w, Task *__dq_head , ATYPE_1 arg_1, ATYPE_2 arg_2, ATYPE_3 arg_3, ATYPE_4 arg_4, ATYPE_5 arg_5, ATYPE_6 arg_6)\ +{ \ + CHECKSTACK(w); \ + return NAME##_WORK(w, __dq_head , arg_1, arg_2, arg_3, arg_4, arg_5, arg_6); \ +} \ + \ +static inline __attribute__((always_inline)) \ +void NAME##_WORK(WorkerP *__lace_worker __attribute__((unused)), Task *__lace_dq_head __attribute__((unused)) , ATYPE_1 ARG_1, ATYPE_2 ARG_2, ATYPE_3 ARG_3, ATYPE_4 ARG_4, ATYPE_5 ARG_5, ATYPE_6 ARG_6)\ + +#define VOID_TASK_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) VOID_TASK_DECL_6(NAME, ATYPE_1, ATYPE_2, ATYPE_3, ATYPE_4, ATYPE_5, ATYPE_6) VOID_TASK_IMPL_6(NAME, ATYPE_1, ARG_1, ATYPE_2, ARG_2, ATYPE_3, ARG_3, ATYPE_4, ARG_4, ATYPE_5, ARG_5, ATYPE_6, ARG_6) + + +VOID_TASK_DECL_0(lace_steal_random); +VOID_TASK_DECL_1(lace_steal_random_loop, int*); +VOID_TASK_DECL_1(lace_steal_loop, int*); +VOID_TASK_DECL_2(lace_steal_loop_root, Task *, int*); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/llmsset.c b/src/llmsset.c new file mode 100644 index 000000000..9c21f2e94 --- /dev/null +++ b/src/llmsset.c @@ -0,0 +1,564 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include // for errno +#include // for uint64_t etc +#include // for printf +#include +#include // memset +#include // for mmap + +#include +#include +#include + +#ifndef USE_HWLOC +#define USE_HWLOC 0 +#endif + +#if USE_HWLOC +#include + +static hwloc_topology_t topo; +#endif + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#ifndef cas +#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) +#endif + +DECLARE_THREAD_LOCAL(my_region, uint64_t); + +VOID_TASK_0(llmsset_reset_region) +{ + LOCALIZE_THREAD_LOCAL(my_region, uint64_t); + my_region = (uint64_t)-1; // no region + SET_THREAD_LOCAL(my_region, my_region); +} + +static uint64_t +claim_data_bucket(const llmsset_t dbs) +{ + LOCALIZE_THREAD_LOCAL(my_region, uint64_t); + + for (;;) { + if (my_region != (uint64_t)-1) { + // find empty bucket in region + uint64_t *ptr = dbs->bitmap2 + (my_region*8); + int i=0; + for (;i<8;) { + uint64_t v = *ptr; + if (v != 0xffffffffffffffffLL) { + int j = __builtin_clzll(~v); + *ptr |= (0x8000000000000000LL>>j); + return (8 * my_region + i) * 64 + j; + } + i++; + ptr++; + } + } else { + // special case on startup or after garbage collection + my_region += (lace_get_worker()->worker*(dbs->table_size/(64*8)))/lace_workers(); + } + uint64_t count = dbs->table_size/(64*8); + for (;;) { + // check if table maybe full + if (count-- == 0) return (uint64_t)-1; + + my_region += 1; + if (my_region >= (dbs->table_size/(64*8))) my_region = 0; + + // try to claim it + uint64_t *ptr = dbs->bitmap1 + (my_region/64); + uint64_t mask = 0x8000000000000000LL >> (my_region&63); + uint64_t v; +restart: + v = *ptr; + if (v & mask) continue; // taken + if (cas(ptr, v, v|mask)) break; + else goto restart; + } + SET_THREAD_LOCAL(my_region, my_region); + } +} + +static void +release_data_bucket(const llmsset_t dbs, uint64_t index) +{ + uint64_t *ptr = dbs->bitmap2 + (index/64); + uint64_t mask = 0x8000000000000000LL >> (index&63); + *ptr &= ~mask; +} + +static void +set_custom_bucket(const llmsset_t dbs, uint64_t index, int on) +{ + uint64_t *ptr = dbs->bitmapc + (index/64); + uint64_t mask = 0x8000000000000000LL >> (index&63); + if (on) *ptr |= mask; + else *ptr &= ~mask; +} + +static int +get_custom_bucket(const llmsset_t dbs, uint64_t index) +{ + uint64_t *ptr = dbs->bitmapc + (index/64); + uint64_t mask = 0x8000000000000000LL >> (index&63); + return (*ptr & mask) ? 1 : 0; +} + +#ifndef rotl64 +static inline uint64_t +rotl64(uint64_t x, int8_t r) +{ + return ((x<>(64-r))); +} +#endif + +uint64_t +llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed) +{ + const uint64_t prime = 1099511628211; + + uint64_t hash = seed; + hash = hash ^ a; + hash = rotl64(hash, 47); + hash = hash * prime; + hash = hash ^ b; + hash = rotl64(hash, 31); + hash = hash * prime; + + return hash ^ (hash >> 32); +} + +/* + * CL_MASK and CL_MASK_R are for the probe sequence calculation. + * With 64 bytes per cacheline, there are 8 64-bit values per cacheline. + */ +// The LINE_SIZE is defined in lace.h +static const uint64_t CL_MASK = ~(((LINE_SIZE) / 8) - 1); +static const uint64_t CL_MASK_R = ((LINE_SIZE) / 8) - 1; + +/* 40 bits for the index, 24 bits for the hash */ +#define MASK_INDEX ((uint64_t)0x000000ffffffffff) +#define MASK_HASH ((uint64_t)0xffffff0000000000) + +static inline uint64_t +llmsset_lookup2(const llmsset_t dbs, uint64_t a, uint64_t b, int* created, const int custom) +{ + uint64_t hash_rehash = 14695981039346656037LLU; + if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); + else hash_rehash = llmsset_hash(a, b, hash_rehash); + + const uint64_t hash = hash_rehash & MASK_HASH; + uint64_t idx, last, cidx = 0; + int i=0; + +#if LLMSSET_MASK + last = idx = hash_rehash & dbs->mask; +#else + last = idx = hash_rehash % dbs->table_size; +#endif + + for (;;) { + volatile uint64_t *bucket = dbs->table + idx; + uint64_t v = *bucket; + + if (v == 0) { + if (cidx == 0) { + cidx = claim_data_bucket(dbs); + if (cidx == (uint64_t)-1) return 0; // failed to claim a data bucket + if (custom) dbs->create_cb(&a, &b); + uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*cidx; + d_ptr[0] = a; + d_ptr[1] = b; + } + if (cas(bucket, 0, hash | cidx)) { + if (custom) set_custom_bucket(dbs, cidx, custom); + *created = 1; + return cidx; + } else { + v = *bucket; + } + } + + if (hash == (v & MASK_HASH)) { + uint64_t d_idx = v & MASK_INDEX; + uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*d_idx; + if (custom) { + if (dbs->equals_cb(a, b, d_ptr[0], d_ptr[1])) { + if (cidx != 0) { + dbs->destroy_cb(a, b); + release_data_bucket(dbs, cidx); + } + *created = 0; + return d_idx; + } + } else { + if (d_ptr[0] == a && d_ptr[1] == b) { + if (cidx != 0) release_data_bucket(dbs, cidx); + *created = 0; + return d_idx; + } + } + } + + sylvan_stats_count(LLMSSET_LOOKUP); + + // find next idx on probe sequence + idx = (idx & CL_MASK) | ((idx+1) & CL_MASK_R); + if (idx == last) { + if (++i == dbs->threshold) return 0; // failed to find empty spot in probe sequence + + // go to next cache line in probe sequence + if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); + else hash_rehash = llmsset_hash(a, b, hash_rehash); + +#if LLMSSET_MASK + last = idx = hash_rehash & dbs->mask; +#else + last = idx = hash_rehash % dbs->table_size; +#endif + } + } +} + +uint64_t +llmsset_lookup(const llmsset_t dbs, const uint64_t a, const uint64_t b, int* created) +{ + return llmsset_lookup2(dbs, a, b, created, 0); +} + +uint64_t +llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b, int* created) +{ + return llmsset_lookup2(dbs, a, b, created, 1); +} + +static inline int +llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx) +{ + const uint64_t * const d_ptr = ((uint64_t*)dbs->data) + 2*d_idx; + const uint64_t a = d_ptr[0]; + const uint64_t b = d_ptr[1]; + + uint64_t hash_rehash = 14695981039346656037LLU; + const int custom = get_custom_bucket(dbs, d_idx) ? 1 : 0; + if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); + else hash_rehash = llmsset_hash(a, b, hash_rehash); + const uint64_t new_v = (hash_rehash & MASK_HASH) | d_idx; + int i=0; + + uint64_t idx, last; +#if LLMSSET_MASK + last = idx = hash_rehash & dbs->mask; +#else + last = idx = hash_rehash % dbs->table_size; +#endif + + for (;;) { + volatile uint64_t *bucket = &dbs->table[idx]; + if (*bucket == 0 && cas(bucket, 0, new_v)) return 1; + + // find next idx on probe sequence + idx = (idx & CL_MASK) | ((idx+1) & CL_MASK_R); + if (idx == last) { + if (++i == dbs->threshold) return 0; // failed to find empty spot in probe sequence + + // go to next cache line in probe sequence + if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash); + else hash_rehash = llmsset_hash(a, b, hash_rehash); + +#if LLMSSET_MASK + last = idx = hash_rehash & dbs->mask; +#else + last = idx = hash_rehash % dbs->table_size; +#endif + } + } +} + +llmsset_t +llmsset_create(size_t initial_size, size_t max_size) +{ +#if USE_HWLOC + hwloc_topology_init(&topo); + hwloc_topology_load(topo); +#endif + + llmsset_t dbs = NULL; + if (posix_memalign((void**)&dbs, LINE_SIZE, sizeof(struct llmsset)) != 0) { + fprintf(stderr, "llmsset_create: Unable to allocate memory!\n"); + exit(1); + } + +#if LLMSSET_MASK + /* Check if initial_size and max_size are powers of 2 */ + if (__builtin_popcountll(initial_size) != 1) { + fprintf(stderr, "llmsset_create: initial_size is not a power of 2!\n"); + exit(1); + } + + if (__builtin_popcountll(max_size) != 1) { + fprintf(stderr, "llmsset_create: max_size is not a power of 2!\n"); + exit(1); + } +#endif + + if (initial_size > max_size) { + fprintf(stderr, "llmsset_create: initial_size > max_size!\n"); + exit(1); + } + + // minimum size is now 512 buckets (region size, but of course, n_workers * 512 is suggested as minimum) + + if (initial_size < 512) { + fprintf(stderr, "llmsset_create: initial_size too small!\n"); + exit(1); + } + + dbs->max_size = max_size; + llmsset_set_size(dbs, initial_size); + + /* This implementation of "resizable hash table" allocates the max_size table in virtual memory, + but only uses the "actual size" part in real memory */ + + dbs->table = (uint64_t*)mmap(0, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + dbs->data = (uint8_t*)mmap(0, dbs->max_size * 16, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + /* Also allocate bitmaps. Each region is 64*8 = 512 buckets. + Overhead of bitmap1: 1 bit per 4096 bucket. + Overhead of bitmap2: 1 bit per bucket. + Overhead of bitmapc: 1 bit per bucket. */ + + dbs->bitmap1 = (uint64_t*)mmap(0, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + dbs->bitmap2 = (uint64_t*)mmap(0, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + dbs->bitmapc = (uint64_t*)mmap(0, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (dbs->table == (uint64_t*)-1 || dbs->data == (uint8_t*)-1 || dbs->bitmap1 == (uint64_t*)-1 || dbs->bitmap2 == (uint64_t*)-1 || dbs->bitmapc == (uint64_t*)-1) { + fprintf(stderr, "llmsset_create: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } + +#if defined(madvise) && defined(MADV_RANDOM) + madvise(dbs->table, dbs->max_size * 8, MADV_RANDOM); +#endif + +#if USE_HWLOC + hwloc_set_area_membind(topo, dbs->table, dbs->max_size * 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); + hwloc_set_area_membind(topo, dbs->data, dbs->max_size * 16, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); + hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); + hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); + hwloc_set_area_membind(topo, dbs->bitmapc, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); +#endif + + // forbid first two positions (index 0 and 1) + dbs->bitmap2[0] = 0xc000000000000000LL; + + dbs->hash_cb = NULL; + dbs->equals_cb = NULL; + dbs->create_cb = NULL; + dbs->destroy_cb = NULL; + + // yes, ugly. for now, we use a global thread-local value. + // that is a problem with multiple tables. + // so, for now, do NOT use multiple tables!! + + LACE_ME; + INIT_THREAD_LOCAL(my_region); + TOGETHER(llmsset_reset_region); + + return dbs; +} + +void +llmsset_free(llmsset_t dbs) +{ + munmap(dbs->table, dbs->max_size * 8); + munmap(dbs->data, dbs->max_size * 16); + munmap(dbs->bitmap1, dbs->max_size / (512*8)); + munmap(dbs->bitmap2, dbs->max_size / 8); + munmap(dbs->bitmapc, dbs->max_size / 8); + free(dbs); +} + +VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs) +{ + // just reallocate... + if (mmap(dbs->table, dbs->max_size * 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { +#if defined(madvise) && defined(MADV_RANDOM) + madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM); +#endif +#if USE_HWLOC + hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); +#endif + } else { + // reallocate failed... expensive fallback + memset(dbs->table, 0, dbs->max_size * 8); + } + + if (mmap(dbs->bitmap1, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { +#if USE_HWLOC + hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0); +#endif + } else { + memset(dbs->bitmap1, 0, dbs->max_size / (512*8)); + } + + if (mmap(dbs->bitmap2, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) { +#if USE_HWLOC + hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0); +#endif + } else { + memset(dbs->bitmap2, 0, dbs->max_size / 8); + } + + // forbid first two positions (index 0 and 1) + dbs->bitmap2[0] = 0xc000000000000000LL; + + TOGETHER(llmsset_reset_region); +} + +int +llmsset_is_marked(const llmsset_t dbs, uint64_t index) +{ + volatile uint64_t *ptr = dbs->bitmap2 + (index/64); + uint64_t mask = 0x8000000000000000LL >> (index&63); + return (*ptr & mask) ? 1 : 0; +} + +int +llmsset_mark(const llmsset_t dbs, uint64_t index) +{ + volatile uint64_t *ptr = dbs->bitmap2 + (index/64); + uint64_t mask = 0x8000000000000000LL >> (index&63); + for (;;) { + uint64_t v = *ptr; + if (v & mask) return 0; + if (cas(ptr, v, v|mask)) return 1; + } +} + +VOID_TASK_3(llmsset_rehash_par, llmsset_t, dbs, size_t, first, size_t, count) +{ + if (count > 512) { + size_t split = count/2; + SPAWN(llmsset_rehash_par, dbs, first, split); + CALL(llmsset_rehash_par, dbs, first + split, count - split); + SYNC(llmsset_rehash_par); + } else { + uint64_t *ptr = dbs->bitmap2 + (first / 64); + uint64_t mask = 0x8000000000000000LL >> (first & 63); + for (size_t k=0; k>= 1; + if (mask == 0) { + ptr++; + mask = 0x8000000000000000LL; + } + } + } +} + +VOID_TASK_IMPL_1(llmsset_rehash, llmsset_t, dbs) +{ + CALL(llmsset_rehash_par, dbs, 0, dbs->table_size); +} + +TASK_3(size_t, llmsset_count_marked_par, llmsset_t, dbs, size_t, first, size_t, count) +{ + if (count > 512) { + size_t split = count/2; + SPAWN(llmsset_count_marked_par, dbs, first, split); + size_t right = CALL(llmsset_count_marked_par, dbs, first + split, count - split); + size_t left = SYNC(llmsset_count_marked_par); + return left + right; + } else { + size_t result = 0; + uint64_t *ptr = dbs->bitmap2 + (first / 64); + if (count == 512) { + result += __builtin_popcountll(ptr[0]); + result += __builtin_popcountll(ptr[1]); + result += __builtin_popcountll(ptr[2]); + result += __builtin_popcountll(ptr[3]); + result += __builtin_popcountll(ptr[4]); + result += __builtin_popcountll(ptr[5]); + result += __builtin_popcountll(ptr[6]); + result += __builtin_popcountll(ptr[7]); + } else { + uint64_t mask = 0x8000000000000000LL >> (first & 63); + for (size_t k=0; k>= 1; + if (mask == 0) { + ptr++; + mask = 0x8000000000000000LL; + } + } + } + return result; + } +} + +TASK_IMPL_1(size_t, llmsset_count_marked, llmsset_t, dbs) +{ + return CALL(llmsset_count_marked_par, dbs, 0, dbs->table_size); +} + +VOID_TASK_3(llmsset_destroy_par, llmsset_t, dbs, size_t, first, size_t, count) +{ + if (count > 1024) { + size_t split = count/2; + SPAWN(llmsset_destroy_par, dbs, first, split); + CALL(llmsset_destroy_par, dbs, first + split, count - split); + SYNC(llmsset_destroy_par); + } else { + for (size_t k=first; kbitmap2 + (k/64); + volatile uint64_t *ptrc = dbs->bitmapc + (k/64); + uint64_t mask = 0x8000000000000000LL >> (k&63); + + // if not marked but is custom + if ((*ptr2 & mask) == 0 && (*ptrc & mask)) { + uint64_t *d_ptr = ((uint64_t*)dbs->data) + 2*k; + dbs->destroy_cb(d_ptr[0], d_ptr[1]); + *ptrc &= ~mask; + } + } + } +} + +VOID_TASK_IMPL_1(llmsset_destroy_unmarked, llmsset_t, dbs) +{ + if (dbs->destroy_cb == NULL) return; // no custom function + CALL(llmsset_destroy_par, dbs, 0, dbs->table_size); +} + +/** + * Set custom functions + */ +void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb) +{ + dbs->hash_cb = hash_cb; + dbs->equals_cb = equals_cb; + dbs->create_cb = create_cb; + dbs->destroy_cb = destroy_cb; +} diff --git a/src/llmsset.h b/src/llmsset.h new file mode 100644 index 000000000..84c55e23b --- /dev/null +++ b/src/llmsset.h @@ -0,0 +1,202 @@ +/* + * Copyright 2011-2014 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include + +#ifndef LLMSSET_H +#define LLMSSET_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifndef LLMSSET_MASK +#define LLMSSET_MASK 0 // set to 1 to use bit mask instead of modulo +#endif + +/** + * Lockless hash table (set) to store 16-byte keys. + * Each unique key is associated with a 42-bit number. + * + * The set has support for stop-the-world garbage collection. + * Methods llmsset_clear, llmsset_mark and llmsset_rehash implement garbage collection. + * During their execution, llmsset_lookup is not allowed. + */ + +/** + * hash(a, b, seed) + * equals(lhs_a, lhs_b, rhs_a, rhs_b) + * create(a, b) -- with a,b pointers, allows changing pointers on create of node, + * but must keep hash/equals same! + * destroy(a, b) + */ +typedef uint64_t (*llmsset_hash_cb)(uint64_t, uint64_t, uint64_t); +typedef int (*llmsset_equals_cb)(uint64_t, uint64_t, uint64_t, uint64_t); +typedef void (*llmsset_create_cb)(uint64_t *, uint64_t *); +typedef void (*llmsset_destroy_cb)(uint64_t, uint64_t); + +typedef struct llmsset +{ + uint64_t *table; // table with hashes + uint8_t *data; // table with values + uint64_t *bitmap1; // ownership bitmap (per 512 buckets) + uint64_t *bitmap2; // bitmap for "contains data" + uint64_t *bitmapc; // bitmap for "use custom functions" + size_t max_size; // maximum size of the hash table (for resizing) + size_t table_size; // size of the hash table (number of slots) --> power of 2! +#if LLMSSET_MASK + size_t mask; // size-1 +#endif + size_t f_size; + llmsset_hash_cb hash_cb; // custom hash function + llmsset_equals_cb equals_cb; // custom equals function + llmsset_create_cb create_cb; // custom create function + llmsset_destroy_cb destroy_cb; // custom destroy function + int16_t threshold; // number of iterations for insertion until returning error +} *llmsset_t; + +/** + * Retrieve a pointer to the data associated with the 42-bit value. + */ +static inline void* +llmsset_index_to_ptr(const llmsset_t dbs, size_t index) +{ + return dbs->data + index * 16; +} + +/** + * Create the set. + * This will allocate a set of buckets in virtual memory. + * The actual space used is buckets. + */ +llmsset_t llmsset_create(size_t initial_size, size_t max_size); + +/** + * Free the set. + */ +void llmsset_free(llmsset_t dbs); + +/** + * Retrieve the maximum size of the set. + */ +static inline size_t +llmsset_get_max_size(const llmsset_t dbs) +{ + return dbs->max_size; +} + +/** + * Retrieve the current size of the lockless MS set. + */ +static inline size_t +llmsset_get_size(const llmsset_t dbs) +{ + return dbs->table_size; +} + +/** + * Set the table size of the set. + * Typically called during garbage collection, after clear and before rehash. + * Returns 0 if dbs->table_size > dbs->max_size! + */ +static inline void +llmsset_set_size(llmsset_t dbs, size_t size) +{ + /* check bounds (don't be rediculous) */ + if (size > 128 && size <= dbs->max_size) { + dbs->table_size = size; +#if LLMSSET_MASK + /* Warning: if size is not a power of two, you will get interesting behavior */ + dbs->mask = dbs->table_size - 1; +#endif + dbs->threshold = (64 - __builtin_clzll(dbs->table_size)) + 4; // doubling table_size increases threshold by 1 + } +} + +/** + * Core function: find existing data or add new. + * Returns the unique 42-bit value associated with the data, or 0 when table is full. + * Also, this value will never equal 0 or 1. + * Note: garbage collection during lookup strictly forbidden + */ +uint64_t llmsset_lookup(const llmsset_t dbs, const uint64_t a, const uint64_t b, int *created); + +/** + * Same as lookup, but use the custom functions + */ +uint64_t llmsset_lookupc(const llmsset_t dbs, const uint64_t a, const uint64_t b, int *created); + +/** + * To perform garbage collection, the user is responsible that no lookups are performed during the process. + * + * 1) call llmsset_clear + * 2) call llmsset_mark for every bucket to rehash + * 3) call llmsset_rehash + */ +VOID_TASK_DECL_1(llmsset_clear, llmsset_t); +#define llmsset_clear(dbs) CALL(llmsset_clear, dbs) + +/** + * Check if a certain data bucket is marked (in use). + */ +int llmsset_is_marked(const llmsset_t dbs, uint64_t index); + +/** + * During garbage collection, buckets are marked (for rehashing) with this function. + * Returns 0 if the node was already marked, or non-zero if it was not marked. + * May also return non-zero if multiple workers marked at the same time. + */ +int llmsset_mark(const llmsset_t dbs, uint64_t index); + +/** + * Rehash all marked buckets. + */ +VOID_TASK_DECL_1(llmsset_rehash, llmsset_t); +#define llmsset_rehash(dbs) CALL(llmsset_rehash, dbs) + +/** + * Retrieve number of marked buckets. + */ +TASK_DECL_1(size_t, llmsset_count_marked, llmsset_t); +#define llmsset_count_marked(dbs) CALL(llmsset_count_marked, dbs) + +/** + * During garbage collection, this method calls the destroy callback + * for all 'custom' data that is not kept. + */ +VOID_TASK_DECL_1(llmsset_destroy_unmarked, llmsset_t); +#define llmsset_destroy_unmarked(dbs) CALL(llmsset_destroy_unmarked, dbs) + +/** + * Set custom functions + */ +void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb); + +/** + * Default hashing function + */ +uint64_t llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif diff --git a/src/refs.c b/src/refs.c new file mode 100644 index 000000000..0d6963bfe --- /dev/null +++ b/src/refs.c @@ -0,0 +1,598 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include // for assert +#include // for errno +#include // for fprintf +#include // for uint32_t etc +#include // for exit +#include // for strerror +#include // for mmap + +#include + +#ifndef compiler_barrier +#define compiler_barrier() { asm volatile("" ::: "memory"); } +#endif + +#ifndef cas +#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) +#endif + +/** + * Implementation of external references + * Based on a hash table for 40-bit non-null values, linear probing + * Use tombstones for deleting, higher bits for reference count + */ +static const uint64_t refs_ts = 0x7fffffffffffffff; // tombstone + +/* FNV-1a 64-bit hash */ +static inline uint64_t +fnv_hash(uint64_t a) +{ + const uint64_t prime = 1099511628211; + uint64_t hash = 14695981039346656037LLU; + hash = (hash ^ a) * prime; + hash = (hash ^ ((a << 25) | (a >> 39))) * prime; + return hash ^ (hash >> 32); +} + +// Count number of unique entries (not number of references) +size_t +refs_count(refs_table_t *tbl) +{ + size_t count = 0; + uint64_t *bucket = tbl->refs_table; + uint64_t * const end = bucket + tbl->refs_size; + while (bucket != end) { + if (*bucket != 0 && *bucket != refs_ts) count++; + bucket++; + } + return count; +} + +static inline void +refs_rehash(refs_table_t *tbl, uint64_t v) +{ + if (v == 0) return; // do not rehash empty value + if (v == refs_ts) return; // do not rehash tombstone + + volatile uint64_t *bucket = tbl->refs_table + (fnv_hash(v & 0x000000ffffffffff) % tbl->refs_size); + uint64_t * const end = tbl->refs_table + tbl->refs_size; + + int i = 128; // try 128 times linear probing + while (i--) { + if (*bucket == 0) { if (cas(bucket, 0, v)) return; } + if (++bucket == end) bucket = tbl->refs_table; + } + + // assert(0); // impossible! +} + +/** + * Called internally to assist resize operations + * Returns 1 for retry, 0 for done + */ +static int +refs_resize_help(refs_table_t *tbl) +{ + if (0 == (tbl->refs_control & 0xf0000000)) return 0; // no resize in progress (anymore) + if (tbl->refs_control & 0x80000000) return 1; // still waiting for preparation + + if (tbl->refs_resize_part >= tbl->refs_resize_size / 128) return 1; // all parts claimed + size_t part = __sync_fetch_and_add(&tbl->refs_resize_part, 1); + if (part >= tbl->refs_resize_size/128) return 1; // all parts claimed + + // rehash all + int i; + volatile uint64_t *bucket = tbl->refs_resize_table + part * 128; + for (i=0; i<128; i++) refs_rehash(tbl, *bucket++); + + __sync_fetch_and_add(&tbl->refs_resize_done, 1); + return 1; +} + +static void +refs_resize(refs_table_t *tbl) +{ + while (1) { + uint32_t v = tbl->refs_control; + if (v & 0xf0000000) { + // someone else started resize + // just rehash blocks until done + while (refs_resize_help(tbl)) continue; + return; + } + if (cas(&tbl->refs_control, v, 0x80000000 | v)) { + // wait until all users gone + while (tbl->refs_control != 0x80000000) continue; + break; + } + } + + tbl->refs_resize_table = tbl->refs_table; + tbl->refs_resize_size = tbl->refs_size; + tbl->refs_resize_part = 0; + tbl->refs_resize_done = 0; + + // calculate new size + size_t new_size = tbl->refs_size; + size_t count = refs_count(tbl); + if (count*4 > tbl->refs_size) new_size *= 2; + + // allocate new table + uint64_t *new_table = (uint64_t*)mmap(0, new_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (new_table == (uint64_t*)-1) { + fprintf(stderr, "refs: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } + + // set new data and go + tbl->refs_table = new_table; + tbl->refs_size = new_size; + compiler_barrier(); + tbl->refs_control = 0x40000000; + + // until all parts are done, rehash blocks + while (tbl->refs_resize_done != tbl->refs_resize_size/128) refs_resize_help(tbl); + + // done! + compiler_barrier(); + tbl->refs_control = 0; + + // unmap old table + munmap(tbl->refs_resize_table, tbl->refs_resize_size * sizeof(uint64_t)); +} + +/* Enter refs_modify */ +static inline void +refs_enter(refs_table_t *tbl) +{ + for (;;) { + uint32_t v = tbl->refs_control; + if (v & 0xf0000000) { + while (refs_resize_help(tbl)) continue; + } else { + if (cas(&tbl->refs_control, v, v+1)) return; + } + } +} + +/* Leave refs_modify */ +static inline void +refs_leave(refs_table_t *tbl) +{ + for (;;) { + uint32_t v = tbl->refs_control; + if (cas(&tbl->refs_control, v, v-1)) return; + } +} + +static inline int +refs_modify(refs_table_t *tbl, const uint64_t a, const int dir) +{ + volatile uint64_t *bucket; + volatile uint64_t *ts_bucket; + uint64_t v, new_v; + int res, i; + + refs_enter(tbl); + +ref_retry: + bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); + ts_bucket = NULL; // tombstone + i = 128; // try 128 times linear probing + + while (i--) { +ref_restart: + v = *bucket; + if (v == refs_ts) { + if (ts_bucket == NULL) ts_bucket = bucket; + } else if (v == 0) { + // not found + res = 0; + if (dir < 0) goto ref_exit; + if (ts_bucket != NULL) { + bucket = ts_bucket; + ts_bucket = NULL; + v = refs_ts; + } + new_v = a | (1ULL << 40); + goto ref_mod; + } else if ((v & 0x000000ffffffffff) == a) { + // found + res = 1; + uint64_t count = v >> 40; + if (count == 0x7fffff) goto ref_exit; + count += dir; + if (count == 0) new_v = refs_ts; + else new_v = a | (count << 40); + goto ref_mod; + } + + if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; + } + + // not found after linear probing + if (dir < 0) { + res = 0; + goto ref_exit; + } else if (ts_bucket != NULL) { + bucket = ts_bucket; + ts_bucket = NULL; + v = refs_ts; + new_v = a | (1ULL << 40); + if (!cas(bucket, v, new_v)) goto ref_retry; + res = 1; + goto ref_exit; + } else { + // hash table full + refs_leave(tbl); + refs_resize(tbl); + return refs_modify(tbl, a, dir); + } + +ref_mod: + if (!cas(bucket, v, new_v)) goto ref_restart; + +ref_exit: + refs_leave(tbl); + return res; +} + +void +refs_up(refs_table_t *tbl, uint64_t a) +{ + refs_modify(tbl, a, 1); +} + +void +refs_down(refs_table_t *tbl, uint64_t a) +{ +#ifdef NDEBUG + refs_modify(tbl, a, -1); +#else + int res = refs_modify(tbl, a, -1); + assert(res != 0); +#endif +} + +uint64_t* +refs_iter(refs_table_t *tbl, size_t first, size_t end) +{ + // assert(first < tbl->refs_size); + // assert(end <= tbl->refs_size); + + uint64_t *bucket = tbl->refs_table + first; + while (bucket != tbl->refs_table + end) { + if (*bucket != 0 && *bucket != refs_ts) return bucket; + bucket++; + } + return NULL; +} + +uint64_t +refs_next(refs_table_t *tbl, uint64_t **_bucket, size_t end) +{ + uint64_t *bucket = *_bucket; + // assert(bucket != NULL); + // assert(end <= tbl->refs_size); + uint64_t result = *bucket & 0x000000ffffffffff; + bucket++; + while (bucket != tbl->refs_table + end) { + if (*bucket != 0 && *bucket != refs_ts) { + *_bucket = bucket; + return result; + } + bucket++; + } + *_bucket = NULL; + return result; +} + +void +refs_create(refs_table_t *tbl, size_t _refs_size) +{ + if (__builtin_popcountll(_refs_size) != 1) { + fprintf(stderr, "refs: Table size must be a power of 2!\n"); + exit(1); + } + + tbl->refs_size = _refs_size; + tbl->refs_table = (uint64_t*)mmap(0, tbl->refs_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (tbl->refs_table == (uint64_t*)-1) { + fprintf(stderr, "refs: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } +} + +void +refs_free(refs_table_t *tbl) +{ + munmap(tbl->refs_table, tbl->refs_size * sizeof(uint64_t)); +} + +/** + * Simple implementation of a 64-bit resizable hash-table + * No idea if this is scalable... :( but it seems thread-safe + */ + +// Count number of unique entries (not number of references) +size_t +protect_count(refs_table_t *tbl) +{ + size_t count = 0; + uint64_t *bucket = tbl->refs_table; + uint64_t * const end = bucket + tbl->refs_size; + while (bucket != end) { + if (*bucket != 0 && *bucket != refs_ts) count++; + bucket++; + } + return count; +} + +static inline void +protect_rehash(refs_table_t *tbl, uint64_t v) +{ + if (v == 0) return; // do not rehash empty value + if (v == refs_ts) return; // do not rehash tombstone + + volatile uint64_t *bucket = tbl->refs_table + (fnv_hash(v) % tbl->refs_size); + uint64_t * const end = tbl->refs_table + tbl->refs_size; + + int i = 128; // try 128 times linear probing + while (i--) { + if (*bucket == 0 && cas(bucket, 0, v)) return; + if (++bucket == end) bucket = tbl->refs_table; + } + + assert(0); // whoops! +} + +/** + * Called internally to assist resize operations + * Returns 1 for retry, 0 for done + */ +static int +protect_resize_help(refs_table_t *tbl) +{ + if (0 == (tbl->refs_control & 0xf0000000)) return 0; // no resize in progress (anymore) + if (tbl->refs_control & 0x80000000) return 1; // still waiting for preparation + if (tbl->refs_resize_part >= tbl->refs_resize_size / 128) return 1; // all parts claimed + size_t part = __sync_fetch_and_add(&tbl->refs_resize_part, 1); + if (part >= tbl->refs_resize_size/128) return 1; // all parts claimed + + // rehash all + int i; + volatile uint64_t *bucket = tbl->refs_resize_table + part * 128; + for (i=0; i<128; i++) protect_rehash(tbl, *bucket++); + + __sync_fetch_and_add(&tbl->refs_resize_done, 1); + return 1; +} + +static void +protect_resize(refs_table_t *tbl) +{ + while (1) { + uint32_t v = tbl->refs_control; + if (v & 0xf0000000) { + // someone else started resize + // just rehash blocks until done + while (protect_resize_help(tbl)) continue; + return; + } + if (cas(&tbl->refs_control, v, 0x80000000 | v)) { + // wait until all users gone + while (tbl->refs_control != 0x80000000) continue; + break; + } + } + + tbl->refs_resize_table = tbl->refs_table; + tbl->refs_resize_size = tbl->refs_size; + tbl->refs_resize_part = 0; + tbl->refs_resize_done = 0; + + // calculate new size + size_t new_size = tbl->refs_size; + size_t count = refs_count(tbl); + if (count*4 > tbl->refs_size) new_size *= 2; + + // allocate new table + uint64_t *new_table = (uint64_t*)mmap(0, new_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (new_table == (uint64_t*)-1) { + fprintf(stderr, "refs: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } + + // set new data and go + tbl->refs_table = new_table; + tbl->refs_size = new_size; + compiler_barrier(); + tbl->refs_control = 0x40000000; + + // until all parts are done, rehash blocks + while (tbl->refs_resize_done < tbl->refs_resize_size/128) protect_resize_help(tbl); + + // done! + compiler_barrier(); + tbl->refs_control = 0; + + // unmap old table + munmap(tbl->refs_resize_table, tbl->refs_resize_size * sizeof(uint64_t)); +} + +static inline void +protect_enter(refs_table_t *tbl) +{ + for (;;) { + uint32_t v = tbl->refs_control; + if (v & 0xf0000000) { + while (protect_resize_help(tbl)) continue; + } else { + if (cas(&tbl->refs_control, v, v+1)) return; + } + } +} + +static inline void +protect_leave(refs_table_t *tbl) +{ + for (;;) { + uint32_t v = tbl->refs_control; + if (cas(&tbl->refs_control, v, v-1)) return; + } +} + +void +protect_up(refs_table_t *tbl, uint64_t a) +{ + volatile uint64_t *bucket; + volatile uint64_t *ts_bucket; + uint64_t v; + int i; + + protect_enter(tbl); + +ref_retry: + bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); + ts_bucket = NULL; // tombstone + i = 128; // try 128 times linear probing + + while (i--) { +ref_restart: + v = *bucket; + if (v == refs_ts) { + if (ts_bucket == NULL) ts_bucket = bucket; + } else if (v == 0) { + // go go go + if (ts_bucket != NULL) { + if (cas(ts_bucket, refs_ts, a)) { + protect_leave(tbl); + return; + } else { + goto ref_retry; + } + } else { + if (cas(bucket, 0, a)) { + protect_leave(tbl); + return; + } else { + goto ref_restart; + } + } + } + + if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; + } + + // not found after linear probing + if (ts_bucket != NULL) { + if (cas(ts_bucket, refs_ts, a)) { + protect_leave(tbl); + return; + } else { + goto ref_retry; + } + } else { + // hash table full + protect_leave(tbl); + protect_resize(tbl); + protect_enter(tbl); + goto ref_retry; + } +} + +void +protect_down(refs_table_t *tbl, uint64_t a) +{ + volatile uint64_t *bucket; + protect_enter(tbl); + + bucket = tbl->refs_table + (fnv_hash(a) & (tbl->refs_size - 1)); + int i = 128; // try 128 times linear probing + + while (i--) { + if (*bucket == a) { + *bucket = refs_ts; + protect_leave(tbl); + return; + } + if (++bucket == tbl->refs_table + tbl->refs_size) bucket = tbl->refs_table; + } + + // not found after linear probing + assert(0); +} + +uint64_t* +protect_iter(refs_table_t *tbl, size_t first, size_t end) +{ + // assert(first < tbl->refs_size); + // assert(end <= tbl->refs_size); + + uint64_t *bucket = tbl->refs_table + first; + while (bucket != tbl->refs_table + end) { + if (*bucket != 0 && *bucket != refs_ts) return bucket; + bucket++; + } + return NULL; +} + +uint64_t +protect_next(refs_table_t *tbl, uint64_t **_bucket, size_t end) +{ + uint64_t *bucket = *_bucket; + // assert(bucket != NULL); + // assert(end <= tbl->refs_size); + uint64_t result = *bucket; + bucket++; + while (bucket != tbl->refs_table + end) { + if (*bucket != 0 && *bucket != refs_ts) { + *_bucket = bucket; + return result; + } + bucket++; + } + *_bucket = NULL; + return result; +} + +void +protect_create(refs_table_t *tbl, size_t _refs_size) +{ + if (__builtin_popcountll(_refs_size) != 1) { + fprintf(stderr, "refs: Table size must be a power of 2!\n"); + exit(1); + } + + tbl->refs_size = _refs_size; + tbl->refs_table = (uint64_t*)mmap(0, tbl->refs_size * sizeof(uint64_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); + if (tbl->refs_table == (uint64_t*)-1) { + fprintf(stderr, "refs: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } +} + +void +protect_free(refs_table_t *tbl) +{ + munmap(tbl->refs_table, tbl->refs_size * sizeof(uint64_t)); + tbl->refs_table = 0; +} diff --git a/src/refs.h b/src/refs.h new file mode 100644 index 000000000..928948c90 --- /dev/null +++ b/src/refs.h @@ -0,0 +1,77 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include // for uint32_t etc + +#ifndef REFS_INLINE_H +#define REFS_INLINE_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * Implementation of external references + * Based on a hash table for 40-bit non-null values, linear probing + * Use tombstones for deleting, higher bits for reference count + */ +typedef struct +{ + uint64_t *refs_table; // table itself + size_t refs_size; // number of buckets + + /* helpers during resize operation */ + volatile uint32_t refs_control; // control field + uint64_t *refs_resize_table; // previous table + size_t refs_resize_size; // size of previous table + size_t refs_resize_part; // which part is next + size_t refs_resize_done; // how many parts are done +} refs_table_t; + +// Count number of unique entries (not number of references) +size_t refs_count(refs_table_t *tbl); + +// Increase or decrease reference to 40-bit value a +// Will fail (assertion) if more down than up are called for a +void refs_up(refs_table_t *tbl, uint64_t a); +void refs_down(refs_table_t *tbl, uint64_t a); + +// Return a bucket or NULL to start iterating +uint64_t *refs_iter(refs_table_t *tbl, size_t first, size_t end); + +// Continue iterating, set bucket to next bucket or NULL +uint64_t refs_next(refs_table_t *tbl, uint64_t **bucket, size_t end); + +// User must supply a pointer, refs_create and refs_free handle initialization/destruction +void refs_create(refs_table_t *tbl, size_t _refs_size); +void refs_free(refs_table_t *tbl); + +// The same, but now for 64-bit values ("protect pointers") +size_t protect_count(refs_table_t *tbl); +void protect_up(refs_table_t *tbl, uint64_t a); +void protect_down(refs_table_t *tbl, uint64_t a); +uint64_t *protect_iter(refs_table_t *tbl, size_t first, size_t end); +uint64_t protect_next(refs_table_t *tbl, uint64_t **bucket, size_t end); +void protect_create(refs_table_t *tbl, size_t _refs_size); +void protect_free(refs_table_t *tbl); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif diff --git a/src/sha2.c b/src/sha2.c new file mode 100644 index 000000000..db17dbb2d --- /dev/null +++ b/src/sha2.c @@ -0,0 +1,1067 @@ +/* + * FILE: sha2.c + * AUTHOR: Aaron D. Gifford - http://www.aarongifford.com/ + * + * Copyright (c) 2000-2001, Aaron D. Gifford + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include /* memcpy()/memset() or bcopy()/bzero() */ +#include /* assert() */ +#include "sha2.h" + +/* + * ASSERT NOTE: + * Some sanity checking code is included using assert(). On my FreeBSD + * system, this additional code can be removed by compiling with NDEBUG + * defined. Check your own systems manpage on assert() to see how to + * compile WITHOUT the sanity checking code on your system. + * + * UNROLLED TRANSFORM LOOP NOTE: + * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform + * loop version for the hash transform rounds (defined using macros + * later in this file). Either define on the command line, for example: + * + * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c + * + * or define below: + * + * #define SHA2_UNROLL_TRANSFORM + * + */ + + +/*** SHA-256/384/512 Machine Architecture Definitions *****************/ +/* + * BYTE_ORDER NOTE: + * + * Please make sure that your system defines BYTE_ORDER. If your + * architecture is little-endian, make sure it also defines + * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are + * equivilent. + * + * If your system does not define the above, then you can do so by + * hand like this: + * + * #define LITTLE_ENDIAN 1234 + * #define BIG_ENDIAN 4321 + * + * And for little-endian machines, add: + * + * #define BYTE_ORDER LITTLE_ENDIAN + * + * Or for big-endian machines: + * + * #define BYTE_ORDER BIG_ENDIAN + * + * The FreeBSD machine this was written on defines BYTE_ORDER + * appropriately by including (which in turn includes + * where the appropriate definitions are actually + * made). + */ +#if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN) +#error Define BYTE_ORDER to be equal to either LITTLE_ENDIAN or BIG_ENDIAN +#endif + +/* + * Define the followingsha2_* types to types of the correct length on + * the native archtecture. Most BSD systems and Linux define u_intXX_t + * types. Machines with very recent ANSI C headers, can use the + * uintXX_t definintions from inttypes.h by defining SHA2_USE_INTTYPES_H + * during compile or in the sha.h header file. + * + * Machines that support neither u_intXX_t nor inttypes.h's uintXX_t + * will need to define these three typedefs below (and the appropriate + * ones in sha.h too) by hand according to their system architecture. + * + * Thank you, Jun-ichiro itojun Hagino, for suggesting using u_intXX_t + * types and pointing out recent ANSI C support for uintXX_t in inttypes.h. + */ +#ifdef SHA2_USE_INTTYPES_H + +typedef uint8_t sha2_byte; /* Exactly 1 byte */ +typedef uint32_t sha2_word32; /* Exactly 4 bytes */ +typedef uint64_t sha2_word64; /* Exactly 8 bytes */ + +#else /* SHA2_USE_INTTYPES_H */ + +typedef u_int8_t sha2_byte; /* Exactly 1 byte */ +typedef u_int32_t sha2_word32; /* Exactly 4 bytes */ +typedef u_int64_t sha2_word64; /* Exactly 8 bytes */ + +#endif /* SHA2_USE_INTTYPES_H */ + + +/*** SHA-256/384/512 Various Length Definitions ***********************/ +/* NOTE: Most of these are in sha2.h */ +#define SHA256_SHORT_BLOCK_LENGTH (SHA256_BLOCK_LENGTH - 8) +#define SHA384_SHORT_BLOCK_LENGTH (SHA384_BLOCK_LENGTH - 16) +#define SHA512_SHORT_BLOCK_LENGTH (SHA512_BLOCK_LENGTH - 16) + + +/*** ENDIAN REVERSAL MACROS *******************************************/ +#if BYTE_ORDER == LITTLE_ENDIAN +#define REVERSE32(w,x) { \ + sha2_word32 tmp = (w); \ + tmp = (tmp >> 16) | (tmp << 16); \ + (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \ +} +#define REVERSE64(w,x) { \ + sha2_word64 tmp = (w); \ + tmp = (tmp >> 32) | (tmp << 32); \ + tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \ + ((tmp & 0x00ff00ff00ff00ffULL) << 8); \ + (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \ + ((tmp & 0x0000ffff0000ffffULL) << 16); \ +} +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +/* + * Macro for incrementally adding the unsigned 64-bit integer n to the + * unsigned 128-bit integer (represented using a two-element array of + * 64-bit words): + */ +#define ADDINC128(w,n) { \ + (w)[0] += (sha2_word64)(n); \ + if ((w)[0] < (n)) { \ + (w)[1]++; \ + } \ +} + +/* + * Macros for copying blocks of memory and for zeroing out ranges + * of memory. Using these macros makes it easy to switch from + * using memset()/memcpy() and using bzero()/bcopy(). + * + * Please define either SHA2_USE_MEMSET_MEMCPY or define + * SHA2_USE_BZERO_BCOPY depending on which function set you + * choose to use: + */ +#if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY) +/* Default to memset()/memcpy() if no option is specified */ +#define SHA2_USE_MEMSET_MEMCPY 1 +#endif +#if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY) +/* Abort with an error if BOTH options are defined */ +#error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both! +#endif + +#ifdef SHA2_USE_MEMSET_MEMCPY +#define MEMSET_BZERO(p,l) memset((p), 0, (l)) +#define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l)) +#endif +#ifdef SHA2_USE_BZERO_BCOPY +#define MEMSET_BZERO(p,l) bzero((p), (l)) +#define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l)) +#endif + + +/*** THE SIX LOGICAL FUNCTIONS ****************************************/ +/* + * Bit shifting and rotation (used by the six SHA-XYZ logical functions: + * + * NOTE: The naming of R and S appears backwards here (R is a SHIFT and + * S is a ROTATION) because the SHA-256/384/512 description document + * (see http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf) uses this + * same "backwards" definition. + */ +/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */ +#define R(b,x) ((x) >> (b)) +/* 32-bit Rotate-right (used in SHA-256): */ +#define S32(b,x) (((x) >> (b)) | ((x) << (32 - (b)))) +/* 64-bit Rotate-right (used in SHA-384 and SHA-512): */ +#define S64(b,x) (((x) >> (b)) | ((x) << (64 - (b)))) + +/* Two of six logical functions used in SHA-256, SHA-384, and SHA-512: */ +#define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z))) +#define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) + +/* Four of six logical functions used in SHA-256: */ +#define Sigma0_256(x) (S32(2, (x)) ^ S32(13, (x)) ^ S32(22, (x))) +#define Sigma1_256(x) (S32(6, (x)) ^ S32(11, (x)) ^ S32(25, (x))) +#define sigma0_256(x) (S32(7, (x)) ^ S32(18, (x)) ^ R(3 , (x))) +#define sigma1_256(x) (S32(17, (x)) ^ S32(19, (x)) ^ R(10, (x))) + +/* Four of six logical functions used in SHA-384 and SHA-512: */ +#define Sigma0_512(x) (S64(28, (x)) ^ S64(34, (x)) ^ S64(39, (x))) +#define Sigma1_512(x) (S64(14, (x)) ^ S64(18, (x)) ^ S64(41, (x))) +#define sigma0_512(x) (S64( 1, (x)) ^ S64( 8, (x)) ^ R( 7, (x))) +#define sigma1_512(x) (S64(19, (x)) ^ S64(61, (x)) ^ R( 6, (x))) + +/*** INTERNAL FUNCTION PROTOTYPES *************************************/ +/* NOTE: These should not be accessed directly from outside this + * library -- they are intended for private internal visibility/use + * only. + */ +void SHA512_Last(SHA512_CTX*); +void SHA256_Transform(SHA256_CTX*, const sha2_word32*); +void SHA512_Transform(SHA512_CTX*, const sha2_word64*); + + +/*** SHA-XYZ INITIAL HASH VALUES AND CONSTANTS ************************/ +/* Hash constant words K for SHA-256: */ +static const sha2_word32 K256[64] = { + 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, + 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, + 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL, + 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL, + 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL, + 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, + 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, + 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL, + 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL, + 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL, + 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, + 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, + 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL, + 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL, + 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL, + 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL +}; + +/* Initial hash value H for SHA-256: */ +static const sha2_word32 sha256_initial_hash_value[8] = { + 0x6a09e667UL, + 0xbb67ae85UL, + 0x3c6ef372UL, + 0xa54ff53aUL, + 0x510e527fUL, + 0x9b05688cUL, + 0x1f83d9abUL, + 0x5be0cd19UL +}; + +/* Hash constant words K for SHA-384 and SHA-512: */ +static const sha2_word64 K512[80] = { + 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL, + 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL, + 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL, + 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL, + 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL, + 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL, + 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL, + 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL, + 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL, + 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL, + 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL, + 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL, + 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL, + 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL, + 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL, + 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL, + 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL, + 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL, + 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL, + 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL, + 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL, + 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL, + 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL, + 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL, + 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL, + 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL, + 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL, + 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL, + 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL, + 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL, + 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL, + 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL, + 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL, + 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL, + 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL, + 0x113f9804bef90daeULL, 0x1b710b35131c471bULL, + 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL, + 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL, + 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL, + 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL +}; + +/* Initial hash value H for SHA-384 */ +static const sha2_word64 sha384_initial_hash_value[8] = { + 0xcbbb9d5dc1059ed8ULL, + 0x629a292a367cd507ULL, + 0x9159015a3070dd17ULL, + 0x152fecd8f70e5939ULL, + 0x67332667ffc00b31ULL, + 0x8eb44a8768581511ULL, + 0xdb0c2e0d64f98fa7ULL, + 0x47b5481dbefa4fa4ULL +}; + +/* Initial hash value H for SHA-512 */ +static const sha2_word64 sha512_initial_hash_value[8] = { + 0x6a09e667f3bcc908ULL, + 0xbb67ae8584caa73bULL, + 0x3c6ef372fe94f82bULL, + 0xa54ff53a5f1d36f1ULL, + 0x510e527fade682d1ULL, + 0x9b05688c2b3e6c1fULL, + 0x1f83d9abfb41bd6bULL, + 0x5be0cd19137e2179ULL +}; + +/* + * Constant used by SHA256/384/512_End() functions for converting the + * digest to a readable hexadecimal character string: + */ +static const char *sha2_hex_digits = "0123456789abcdef"; + + +/*** SHA-256: *********************************************************/ +void SHA256_Init(SHA256_CTX* context) { + if (context == (SHA256_CTX*)0) { + return; + } + MEMCPY_BCOPY(context->state, sha256_initial_hash_value, SHA256_DIGEST_LENGTH); + MEMSET_BZERO(context->buffer, SHA256_BLOCK_LENGTH); + context->bitcount = 0; +} + +#ifdef SHA2_UNROLL_TRANSFORM + +/* Unrolled SHA-256 round macros: */ + +#if BYTE_ORDER == LITTLE_ENDIAN + +#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \ + REVERSE32(*data++, W256[j]); \ + T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \ + K256[j] + W256[j]; \ + (d) += T1; \ + (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ + j++ + + +#else /* BYTE_ORDER == LITTLE_ENDIAN */ + +#define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \ + T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \ + K256[j] + (W256[j] = *data++); \ + (d) += T1; \ + (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ + j++ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#define ROUND256(a,b,c,d,e,f,g,h) \ + s0 = W256[(j+1)&0x0f]; \ + s0 = sigma0_256(s0); \ + s1 = W256[(j+14)&0x0f]; \ + s1 = sigma1_256(s1); \ + T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \ + (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \ + (d) += T1; \ + (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \ + j++ + +void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) { + sha2_word32 a, b, c, d, e, f, g, h, s0, s1; + sha2_word32 T1, *W256; + int j; + + W256 = (sha2_word32*)context->buffer; + + /* Initialize registers with the prev. intermediate value */ + a = context->state[0]; + b = context->state[1]; + c = context->state[2]; + d = context->state[3]; + e = context->state[4]; + f = context->state[5]; + g = context->state[6]; + h = context->state[7]; + + j = 0; + do { + /* Rounds 0 to 15 (unrolled): */ + ROUND256_0_TO_15(a,b,c,d,e,f,g,h); + ROUND256_0_TO_15(h,a,b,c,d,e,f,g); + ROUND256_0_TO_15(g,h,a,b,c,d,e,f); + ROUND256_0_TO_15(f,g,h,a,b,c,d,e); + ROUND256_0_TO_15(e,f,g,h,a,b,c,d); + ROUND256_0_TO_15(d,e,f,g,h,a,b,c); + ROUND256_0_TO_15(c,d,e,f,g,h,a,b); + ROUND256_0_TO_15(b,c,d,e,f,g,h,a); + } while (j < 16); + + /* Now for the remaining rounds to 64: */ + do { + ROUND256(a,b,c,d,e,f,g,h); + ROUND256(h,a,b,c,d,e,f,g); + ROUND256(g,h,a,b,c,d,e,f); + ROUND256(f,g,h,a,b,c,d,e); + ROUND256(e,f,g,h,a,b,c,d); + ROUND256(d,e,f,g,h,a,b,c); + ROUND256(c,d,e,f,g,h,a,b); + ROUND256(b,c,d,e,f,g,h,a); + } while (j < 64); + + /* Compute the current intermediate hash value */ + context->state[0] += a; + context->state[1] += b; + context->state[2] += c; + context->state[3] += d; + context->state[4] += e; + context->state[5] += f; + context->state[6] += g; + context->state[7] += h; + + /* Clean up */ + a = b = c = d = e = f = g = h = T1 = 0; +} + +#else /* SHA2_UNROLL_TRANSFORM */ + +void SHA256_Transform(SHA256_CTX* context, const sha2_word32* data) { + sha2_word32 a, b, c, d, e, f, g, h, s0, s1; + sha2_word32 T1, T2, *W256; + int j; + + W256 = (sha2_word32*)context->buffer; + + /* Initialize registers with the prev. intermediate value */ + a = context->state[0]; + b = context->state[1]; + c = context->state[2]; + d = context->state[3]; + e = context->state[4]; + f = context->state[5]; + g = context->state[6]; + h = context->state[7]; + + j = 0; + do { +#if BYTE_ORDER == LITTLE_ENDIAN + /* Copy data while converting to host byte order */ + REVERSE32(*data++,W256[j]); + /* Apply the SHA-256 compression function to update a..h */ + T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j]; +#else /* BYTE_ORDER == LITTLE_ENDIAN */ + /* Apply the SHA-256 compression function to update a..h with copy */ + T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++); +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + T2 = Sigma0_256(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + j++; + } while (j < 16); + + do { + /* Part of the message block expansion: */ + s0 = W256[(j+1)&0x0f]; + s0 = sigma0_256(s0); + s1 = W256[(j+14)&0x0f]; + s1 = sigma1_256(s1); + + /* Apply the SHA-256 compression function to update a..h */ + T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + + (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); + T2 = Sigma0_256(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + j++; + } while (j < 64); + + /* Compute the current intermediate hash value */ + context->state[0] += a; + context->state[1] += b; + context->state[2] += c; + context->state[3] += d; + context->state[4] += e; + context->state[5] += f; + context->state[6] += g; + context->state[7] += h; + + /* Clean up */ + a = b = c = d = e = f = g = h = T1 = T2 = 0; +} + +#endif /* SHA2_UNROLL_TRANSFORM */ + +void SHA256_Update(SHA256_CTX* context, const sha2_byte *data, size_t len) { + unsigned int freespace, usedspace; + + if (len == 0) { + /* Calling with no data is valid - we do nothing */ + return; + } + + /* Sanity check: */ + assert(context != (SHA256_CTX*)0 && data != (sha2_byte*)0); + + usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH; + if (usedspace > 0) { + /* Calculate how much free space is available in the buffer */ + freespace = SHA256_BLOCK_LENGTH - usedspace; + + if (len >= freespace) { + /* Fill the buffer completely and process it */ + MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace); + context->bitcount += freespace << 3; + len -= freespace; + data += freespace; + SHA256_Transform(context, (sha2_word32*)context->buffer); + } else { + /* The buffer is not yet full */ + MEMCPY_BCOPY(&context->buffer[usedspace], data, len); + context->bitcount += len << 3; + /* Clean up: */ + usedspace = freespace = 0; + return; + } + } + while (len >= SHA256_BLOCK_LENGTH) { + /* Process as many complete blocks as we can */ + SHA256_Transform(context, (sha2_word32*)data); + context->bitcount += SHA256_BLOCK_LENGTH << 3; + len -= SHA256_BLOCK_LENGTH; + data += SHA256_BLOCK_LENGTH; + } + if (len > 0) { + /* There's left-overs, so save 'em */ + MEMCPY_BCOPY(context->buffer, data, len); + context->bitcount += len << 3; + } + /* Clean up: */ + usedspace = freespace = 0; +} + +void SHA256_Final(sha2_byte digest[], SHA256_CTX* context) { + sha2_word32 *d = (sha2_word32*)digest; + unsigned int usedspace; + + /* Sanity check: */ + assert(context != (SHA256_CTX*)0); + + /* If no digest buffer is passed, we don't bother doing this: */ + if (digest != (sha2_byte*)0) { + usedspace = (context->bitcount >> 3) % SHA256_BLOCK_LENGTH; +#if BYTE_ORDER == LITTLE_ENDIAN + /* Convert FROM host byte order */ + REVERSE64(context->bitcount,context->bitcount); +#endif + if (usedspace > 0) { + /* Begin padding with a 1 bit: */ + context->buffer[usedspace++] = 0x80; + + if (usedspace <= SHA256_SHORT_BLOCK_LENGTH) { + /* Set-up for the last transform: */ + MEMSET_BZERO(&context->buffer[usedspace], SHA256_SHORT_BLOCK_LENGTH - usedspace); + } else { + if (usedspace < SHA256_BLOCK_LENGTH) { + MEMSET_BZERO(&context->buffer[usedspace], SHA256_BLOCK_LENGTH - usedspace); + } + /* Do second-to-last transform: */ + SHA256_Transform(context, (sha2_word32*)context->buffer); + + /* And set-up for the last transform: */ + MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH); + } + } else { + /* Set-up for the last transform: */ + MEMSET_BZERO(context->buffer, SHA256_SHORT_BLOCK_LENGTH); + + /* Begin padding with a 1 bit: */ + *context->buffer = 0x80; + } + /* Set the bit count: */ + sha2_word64* ptr = (sha2_word64*)(&context->buffer[SHA256_SHORT_BLOCK_LENGTH]); + *ptr = context->bitcount; + + /* Final transform: */ + SHA256_Transform(context, (sha2_word32*)context->buffer); + +#if BYTE_ORDER == LITTLE_ENDIAN + { + /* Convert TO host byte order */ + int j; + for (j = 0; j < 8; j++) { + REVERSE32(context->state[j],context->state[j]); + *d++ = context->state[j]; + } + } +#else + MEMCPY_BCOPY(d, context->state, SHA256_DIGEST_LENGTH); +#endif + } + + /* Clean up state data: */ + MEMSET_BZERO(context, sizeof(SHA256_CTX)); + usedspace = 0; +} + +char *SHA256_End(SHA256_CTX* context, char buffer[]) { + sha2_byte digest[SHA256_DIGEST_LENGTH], *d = digest; + int i; + + /* Sanity check: */ + assert(context != (SHA256_CTX*)0); + + if (buffer != (char*)0) { + SHA256_Final(digest, context); + + for (i = 0; i < SHA256_DIGEST_LENGTH; i++) { + *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; + *buffer++ = sha2_hex_digits[*d & 0x0f]; + d++; + } + *buffer = (char)0; + } else { + MEMSET_BZERO(context, sizeof(SHA256_CTX)); + } + MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH); + return buffer; +} + +char* SHA256_Data(const sha2_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) { + SHA256_CTX context; + + SHA256_Init(&context); + SHA256_Update(&context, data, len); + return SHA256_End(&context, digest); +} + + +/*** SHA-512: *********************************************************/ +void SHA512_Init(SHA512_CTX* context) { + if (context == (SHA512_CTX*)0) { + return; + } + MEMCPY_BCOPY(context->state, sha512_initial_hash_value, SHA512_DIGEST_LENGTH); + MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH); + context->bitcount[0] = context->bitcount[1] = 0; +} + +#ifdef SHA2_UNROLL_TRANSFORM + +/* Unrolled SHA-512 round macros: */ +#if BYTE_ORDER == LITTLE_ENDIAN + +#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \ + REVERSE64(*data++, W512[j]); \ + T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \ + K512[j] + W512[j]; \ + (d) += T1, \ + (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \ + j++ + + +#else /* BYTE_ORDER == LITTLE_ENDIAN */ + +#define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \ + T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \ + K512[j] + (W512[j] = *data++); \ + (d) += T1; \ + (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \ + j++ + +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + +#define ROUND512(a,b,c,d,e,f,g,h) \ + s0 = W512[(j+1)&0x0f]; \ + s0 = sigma0_512(s0); \ + s1 = W512[(j+14)&0x0f]; \ + s1 = sigma1_512(s1); \ + T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \ + (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \ + (d) += T1; \ + (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \ + j++ + +void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) { + sha2_word64 a, b, c, d, e, f, g, h, s0, s1; + sha2_word64 T1, *W512 = (sha2_word64*)context->buffer; + int j; + + /* Initialize registers with the prev. intermediate value */ + a = context->state[0]; + b = context->state[1]; + c = context->state[2]; + d = context->state[3]; + e = context->state[4]; + f = context->state[5]; + g = context->state[6]; + h = context->state[7]; + + j = 0; + do { + ROUND512_0_TO_15(a,b,c,d,e,f,g,h); + ROUND512_0_TO_15(h,a,b,c,d,e,f,g); + ROUND512_0_TO_15(g,h,a,b,c,d,e,f); + ROUND512_0_TO_15(f,g,h,a,b,c,d,e); + ROUND512_0_TO_15(e,f,g,h,a,b,c,d); + ROUND512_0_TO_15(d,e,f,g,h,a,b,c); + ROUND512_0_TO_15(c,d,e,f,g,h,a,b); + ROUND512_0_TO_15(b,c,d,e,f,g,h,a); + } while (j < 16); + + /* Now for the remaining rounds up to 79: */ + do { + ROUND512(a,b,c,d,e,f,g,h); + ROUND512(h,a,b,c,d,e,f,g); + ROUND512(g,h,a,b,c,d,e,f); + ROUND512(f,g,h,a,b,c,d,e); + ROUND512(e,f,g,h,a,b,c,d); + ROUND512(d,e,f,g,h,a,b,c); + ROUND512(c,d,e,f,g,h,a,b); + ROUND512(b,c,d,e,f,g,h,a); + } while (j < 80); + + /* Compute the current intermediate hash value */ + context->state[0] += a; + context->state[1] += b; + context->state[2] += c; + context->state[3] += d; + context->state[4] += e; + context->state[5] += f; + context->state[6] += g; + context->state[7] += h; + + /* Clean up */ + a = b = c = d = e = f = g = h = T1 = 0; +} + +#else /* SHA2_UNROLL_TRANSFORM */ + +void SHA512_Transform(SHA512_CTX* context, const sha2_word64* data) { + sha2_word64 a, b, c, d, e, f, g, h, s0, s1; + sha2_word64 T1, T2, *W512 = (sha2_word64*)context->buffer; + int j; + + /* Initialize registers with the prev. intermediate value */ + a = context->state[0]; + b = context->state[1]; + c = context->state[2]; + d = context->state[3]; + e = context->state[4]; + f = context->state[5]; + g = context->state[6]; + h = context->state[7]; + + j = 0; + do { +#if BYTE_ORDER == LITTLE_ENDIAN + /* Convert TO host byte order */ + REVERSE64(*data++, W512[j]); + /* Apply the SHA-512 compression function to update a..h */ + T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j]; +#else /* BYTE_ORDER == LITTLE_ENDIAN */ + /* Apply the SHA-512 compression function to update a..h with copy */ + T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++); +#endif /* BYTE_ORDER == LITTLE_ENDIAN */ + T2 = Sigma0_512(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + j++; + } while (j < 16); + + do { + /* Part of the message block expansion: */ + s0 = W512[(j+1)&0x0f]; + s0 = sigma0_512(s0); + s1 = W512[(j+14)&0x0f]; + s1 = sigma1_512(s1); + + /* Apply the SHA-512 compression function to update a..h */ + T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + + (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); + T2 = Sigma0_512(a) + Maj(a, b, c); + h = g; + g = f; + f = e; + e = d + T1; + d = c; + c = b; + b = a; + a = T1 + T2; + + j++; + } while (j < 80); + + /* Compute the current intermediate hash value */ + context->state[0] += a; + context->state[1] += b; + context->state[2] += c; + context->state[3] += d; + context->state[4] += e; + context->state[5] += f; + context->state[6] += g; + context->state[7] += h; + + /* Clean up */ + a = b = c = d = e = f = g = h = T1 = T2 = 0; +} + +#endif /* SHA2_UNROLL_TRANSFORM */ + +void SHA512_Update(SHA512_CTX* context, const sha2_byte *data, size_t len) { + unsigned int freespace, usedspace; + + if (len == 0) { + /* Calling with no data is valid - we do nothing */ + return; + } + + /* Sanity check: */ + assert(context != (SHA512_CTX*)0 && data != (sha2_byte*)0); + + usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH; + if (usedspace > 0) { + /* Calculate how much free space is available in the buffer */ + freespace = SHA512_BLOCK_LENGTH - usedspace; + + if (len >= freespace) { + /* Fill the buffer completely and process it */ + MEMCPY_BCOPY(&context->buffer[usedspace], data, freespace); + ADDINC128(context->bitcount, freespace << 3); + len -= freespace; + data += freespace; + SHA512_Transform(context, (sha2_word64*)context->buffer); + } else { + /* The buffer is not yet full */ + MEMCPY_BCOPY(&context->buffer[usedspace], data, len); + ADDINC128(context->bitcount, len << 3); + /* Clean up: */ + usedspace = freespace = 0; + return; + } + } + while (len >= SHA512_BLOCK_LENGTH) { + /* Process as many complete blocks as we can */ + SHA512_Transform(context, (sha2_word64*)data); + ADDINC128(context->bitcount, SHA512_BLOCK_LENGTH << 3); + len -= SHA512_BLOCK_LENGTH; + data += SHA512_BLOCK_LENGTH; + } + if (len > 0) { + /* There's left-overs, so save 'em */ + MEMCPY_BCOPY(context->buffer, data, len); + ADDINC128(context->bitcount, len << 3); + } + /* Clean up: */ + usedspace = freespace = 0; +} + +void SHA512_Last(SHA512_CTX* context) { + unsigned int usedspace; + + usedspace = (context->bitcount[0] >> 3) % SHA512_BLOCK_LENGTH; +#if BYTE_ORDER == LITTLE_ENDIAN + /* Convert FROM host byte order */ + REVERSE64(context->bitcount[0],context->bitcount[0]); + REVERSE64(context->bitcount[1],context->bitcount[1]); +#endif + if (usedspace > 0) { + /* Begin padding with a 1 bit: */ + context->buffer[usedspace++] = 0x80; + + if (usedspace <= SHA512_SHORT_BLOCK_LENGTH) { + /* Set-up for the last transform: */ + MEMSET_BZERO(&context->buffer[usedspace], SHA512_SHORT_BLOCK_LENGTH - usedspace); + } else { + if (usedspace < SHA512_BLOCK_LENGTH) { + MEMSET_BZERO(&context->buffer[usedspace], SHA512_BLOCK_LENGTH - usedspace); + } + /* Do second-to-last transform: */ + SHA512_Transform(context, (sha2_word64*)context->buffer); + + /* And set-up for the last transform: */ + MEMSET_BZERO(context->buffer, SHA512_BLOCK_LENGTH - 2); + } + } else { + /* Prepare for final transform: */ + MEMSET_BZERO(context->buffer, SHA512_SHORT_BLOCK_LENGTH); + + /* Begin padding with a 1 bit: */ + *context->buffer = 0x80; + } + /* Store the length of input data (in bits): */ + sha2_word64 *ptr = (sha2_word64*)(&context->buffer[SHA512_SHORT_BLOCK_LENGTH]); + *ptr = context->bitcount[1]; + ptr = (sha2_word64*)(&context->buffer[SHA512_SHORT_BLOCK_LENGTH+8]); + *ptr = context->bitcount[0]; + + /* Final transform: */ + SHA512_Transform(context, (sha2_word64*)context->buffer); +} + +void SHA512_Final(sha2_byte digest[], SHA512_CTX* context) { + sha2_word64 *d = (sha2_word64*)digest; + + /* Sanity check: */ + assert(context != (SHA512_CTX*)0); + + /* If no digest buffer is passed, we don't bother doing this: */ + if (digest != (sha2_byte*)0) { + SHA512_Last(context); + + /* Save the hash data for output: */ +#if BYTE_ORDER == LITTLE_ENDIAN + { + /* Convert TO host byte order */ + int j; + for (j = 0; j < 8; j++) { + REVERSE64(context->state[j],context->state[j]); + *d++ = context->state[j]; + } + } +#else + MEMCPY_BCOPY(d, context->state, SHA512_DIGEST_LENGTH); +#endif + } + + /* Zero out state data */ + MEMSET_BZERO(context, sizeof(SHA512_CTX)); +} + +char *SHA512_End(SHA512_CTX* context, char buffer[]) { + sha2_byte digest[SHA512_DIGEST_LENGTH], *d = digest; + int i; + + /* Sanity check: */ + assert(context != (SHA512_CTX*)0); + + if (buffer != (char*)0) { + SHA512_Final(digest, context); + + for (i = 0; i < SHA512_DIGEST_LENGTH; i++) { + *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; + *buffer++ = sha2_hex_digits[*d & 0x0f]; + d++; + } + *buffer = (char)0; + } else { + MEMSET_BZERO(context, sizeof(SHA512_CTX)); + } + MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH); + return buffer; +} + +char* SHA512_Data(const sha2_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) { + SHA512_CTX context; + + SHA512_Init(&context); + SHA512_Update(&context, data, len); + return SHA512_End(&context, digest); +} + + +/*** SHA-384: *********************************************************/ +void SHA384_Init(SHA384_CTX* context) { + if (context == (SHA384_CTX*)0) { + return; + } + MEMCPY_BCOPY(context->state, sha384_initial_hash_value, SHA512_DIGEST_LENGTH); + MEMSET_BZERO(context->buffer, SHA384_BLOCK_LENGTH); + context->bitcount[0] = context->bitcount[1] = 0; +} + +void SHA384_Update(SHA384_CTX* context, const sha2_byte* data, size_t len) { + SHA512_Update((SHA512_CTX*)context, data, len); +} + +void SHA384_Final(sha2_byte digest[], SHA384_CTX* context) { + sha2_word64 *d = (sha2_word64*)digest; + + /* Sanity check: */ + assert(context != (SHA384_CTX*)0); + + /* If no digest buffer is passed, we don't bother doing this: */ + if (digest != (sha2_byte*)0) { + SHA512_Last((SHA512_CTX*)context); + + /* Save the hash data for output: */ +#if BYTE_ORDER == LITTLE_ENDIAN + { + /* Convert TO host byte order */ + int j; + for (j = 0; j < 6; j++) { + REVERSE64(context->state[j],context->state[j]); + *d++ = context->state[j]; + } + } +#else + MEMCPY_BCOPY(d, context->state, SHA384_DIGEST_LENGTH); +#endif + } + + /* Zero out state data */ + MEMSET_BZERO(context, sizeof(SHA384_CTX)); +} + +char *SHA384_End(SHA384_CTX* context, char buffer[]) { + sha2_byte digest[SHA384_DIGEST_LENGTH], *d = digest; + int i; + + /* Sanity check: */ + assert(context != (SHA384_CTX*)0); + + if (buffer != (char*)0) { + SHA384_Final(digest, context); + + for (i = 0; i < SHA384_DIGEST_LENGTH; i++) { + *buffer++ = sha2_hex_digits[(*d & 0xf0) >> 4]; + *buffer++ = sha2_hex_digits[*d & 0x0f]; + d++; + } + *buffer = (char)0; + } else { + MEMSET_BZERO(context, sizeof(SHA384_CTX)); + } + MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH); + return buffer; +} + +char* SHA384_Data(const sha2_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) { + SHA384_CTX context; + + SHA384_Init(&context); + SHA384_Update(&context, data, len); + return SHA384_End(&context, digest); +} + diff --git a/src/sha2.h b/src/sha2.h new file mode 100644 index 000000000..bf759ad45 --- /dev/null +++ b/src/sha2.h @@ -0,0 +1,197 @@ +/* + * FILE: sha2.h + * AUTHOR: Aaron D. Gifford - http://www.aarongifford.com/ + * + * Copyright (c) 2000-2001, Aaron D. Gifford + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $Id: sha2.h,v 1.1 2001/11/08 00:02:01 adg Exp adg $ + */ + +#ifndef __SHA2_H__ +#define __SHA2_H__ + +#ifdef __cplusplus +extern "C" { +#endif + + +/* + * Import u_intXX_t size_t type definitions from system headers. You + * may need to change this, or define these things yourself in this + * file. + */ +#include + +#ifdef SHA2_USE_INTTYPES_H + +#include + +#endif /* SHA2_USE_INTTYPES_H */ + + +/*** SHA-256/384/512 Various Length Definitions ***********************/ +#define SHA256_BLOCK_LENGTH 64 +#define SHA256_DIGEST_LENGTH 32 +#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1) +#define SHA384_BLOCK_LENGTH 128 +#define SHA384_DIGEST_LENGTH 48 +#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1) +#define SHA512_BLOCK_LENGTH 128 +#define SHA512_DIGEST_LENGTH 64 +#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1) + + +/*** SHA-256/384/512 Context Structures *******************************/ +/* NOTE: If your architecture does not define either u_intXX_t types or + * uintXX_t (from inttypes.h), you may need to define things by hand + * for your system: + */ +#if 0 +typedef unsigned char u_int8_t; /* 1-byte (8-bits) */ +typedef unsigned int u_int32_t; /* 4-bytes (32-bits) */ +typedef unsigned long long u_int64_t; /* 8-bytes (64-bits) */ +#endif +/* + * Most BSD systems already define u_intXX_t types, as does Linux. + * Some systems, however, like Compaq's Tru64 Unix instead can use + * uintXX_t types defined by very recent ANSI C standards and included + * in the file: + * + * #include + * + * If you choose to use then please define: + * + * #define SHA2_USE_INTTYPES_H + * + * Or on the command line during compile: + * + * cc -DSHA2_USE_INTTYPES_H ... + */ +#ifdef SHA2_USE_INTTYPES_H + +typedef struct _SHA256_CTX { + uint32_t state[8]; + uint64_t bitcount; + uint8_t buffer[SHA256_BLOCK_LENGTH]; +} SHA256_CTX; +typedef struct _SHA512_CTX { + uint64_t state[8]; + uint64_t bitcount[2]; + uint8_t buffer[SHA512_BLOCK_LENGTH]; +} SHA512_CTX; + +#else /* SHA2_USE_INTTYPES_H */ + +typedef struct _SHA256_CTX { + u_int32_t state[8]; + u_int64_t bitcount; + u_int8_t buffer[SHA256_BLOCK_LENGTH]; +} SHA256_CTX; +typedef struct _SHA512_CTX { + u_int64_t state[8]; + u_int64_t bitcount[2]; + u_int8_t buffer[SHA512_BLOCK_LENGTH]; +} SHA512_CTX; + +#endif /* SHA2_USE_INTTYPES_H */ + +typedef SHA512_CTX SHA384_CTX; + + +/*** SHA-256/384/512 Function Prototypes ******************************/ +#ifndef NOPROTO +#ifdef SHA2_USE_INTTYPES_H + +void SHA256_Init(SHA256_CTX *); +void SHA256_Update(SHA256_CTX*, const uint8_t*, size_t); +void SHA256_Final(uint8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*); +char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]); +char* SHA256_Data(const uint8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]); + +void SHA384_Init(SHA384_CTX*); +void SHA384_Update(SHA384_CTX*, const uint8_t*, size_t); +void SHA384_Final(uint8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*); +char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]); +char* SHA384_Data(const uint8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]); + +void SHA512_Init(SHA512_CTX*); +void SHA512_Update(SHA512_CTX*, const uint8_t*, size_t); +void SHA512_Final(uint8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*); +char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]); +char* SHA512_Data(const uint8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]); + +#else /* SHA2_USE_INTTYPES_H */ + +void SHA256_Init(SHA256_CTX *); +void SHA256_Update(SHA256_CTX*, const u_int8_t*, size_t); +void SHA256_Final(u_int8_t[SHA256_DIGEST_LENGTH], SHA256_CTX*); +char* SHA256_End(SHA256_CTX*, char[SHA256_DIGEST_STRING_LENGTH]); +char* SHA256_Data(const u_int8_t*, size_t, char[SHA256_DIGEST_STRING_LENGTH]); + +void SHA384_Init(SHA384_CTX*); +void SHA384_Update(SHA384_CTX*, const u_int8_t*, size_t); +void SHA384_Final(u_int8_t[SHA384_DIGEST_LENGTH], SHA384_CTX*); +char* SHA384_End(SHA384_CTX*, char[SHA384_DIGEST_STRING_LENGTH]); +char* SHA384_Data(const u_int8_t*, size_t, char[SHA384_DIGEST_STRING_LENGTH]); + +void SHA512_Init(SHA512_CTX*); +void SHA512_Update(SHA512_CTX*, const u_int8_t*, size_t); +void SHA512_Final(u_int8_t[SHA512_DIGEST_LENGTH], SHA512_CTX*); +char* SHA512_End(SHA512_CTX*, char[SHA512_DIGEST_STRING_LENGTH]); +char* SHA512_Data(const u_int8_t*, size_t, char[SHA512_DIGEST_STRING_LENGTH]); + +#endif /* SHA2_USE_INTTYPES_H */ + +#else /* NOPROTO */ + +void SHA256_Init(); +void SHA256_Update(); +void SHA256_Final(); +char* SHA256_End(); +char* SHA256_Data(); + +void SHA384_Init(); +void SHA384_Update(); +void SHA384_Final(); +char* SHA384_End(); +char* SHA384_Data(); + +void SHA512_Init(); +void SHA512_Update(); +void SHA512_Final(); +char* SHA512_End(); +char* SHA512_Data(); + +#endif /* NOPROTO */ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __SHA2_H__ */ + diff --git a/src/stats.c b/src/stats.c new file mode 100644 index 000000000..18ad5c088 --- /dev/null +++ b/src/stats.c @@ -0,0 +1,245 @@ +/* + * Copyright 2011-2014 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include // for errno +#include // memset +#include +#include +#include +#include // for nodes table + +#if SYLVAN_STATS + +#ifdef __ELF__ +__thread sylvan_stats_t sylvan_stats; +#else +pthread_key_t sylvan_stats_key; +#endif + +#ifndef USE_HWLOC +#define USE_HWLOC 0 +#endif + +#if USE_HWLOC +#include +static hwloc_topology_t topo; +#endif + +VOID_TASK_0(sylvan_stats_reset_perthread) +{ +#ifdef __ELF__ + for (int i=0; icpuset, HWLOC_MEMBIND_BIND, 0); +#endif + pthread_setspecific(sylvan_stats_key, sylvan_stats); + } + for (int i=0; icounters[i] = 0; + } + for (int i=0; itimers[i] = 0; + } +#endif +} + +VOID_TASK_IMPL_0(sylvan_stats_init) +{ +#ifndef __ELF__ + pthread_key_create(&sylvan_stats_key, NULL); +#endif +#if USE_HWLOC + hwloc_topology_init(&topo); + hwloc_topology_load(topo); +#endif + TOGETHER(sylvan_stats_reset_perthread); +} + +/** + * Reset all counters (for statistics) + */ +VOID_TASK_IMPL_0(sylvan_stats_reset) +{ + TOGETHER(sylvan_stats_reset_perthread); +} + +#define BLACK "\33[22;30m" +#define GRAY "\33[01;30m" +#define RED "\33[22;31m" +#define LRED "\33[01;31m" +#define GREEN "\33[22;32m" +#define LGREEN "\33[01;32m" +#define BLUE "\33[22;34m" +#define LBLUE "\33[01;34m" +#define BROWN "\33[22;33m" +#define YELLOW "\33[01;33m" +#define CYAN "\33[22;36m" +#define LCYAN "\33[22;36m" +#define MAGENTA "\33[22;35m" +#define LMAGENTA "\33[01;35m" +#define NC "\33[0m" +#define BOLD "\33[1m" +#define ULINE "\33[4m" //underline +#define BLINK "\33[5m" +#define INVERT "\33[7m" + +VOID_TASK_1(sylvan_stats_sum, sylvan_stats_t*, target) +{ +#ifdef __ELF__ + for (int i=0; icounters[i], sylvan_stats.counters[i]); + } + for (int i=0; itimers[i], sylvan_stats.timers[i]); + } +#else + sylvan_stats_t *sylvan_stats = pthread_getspecific(sylvan_stats_key); + if (sylvan_stats != NULL) { + for (int i=0; icounters[i], sylvan_stats->counters[i]); + } + for (int i=0; itimers[i], sylvan_stats->timers[i]); + } + } +#endif +} + +void +sylvan_stats_report(FILE *target, int color) +{ +#if !SYLVAN_STATS + (void)target; + (void)color; + return; +#else + (void)color; + + sylvan_stats_t totals; + memset(&totals, 0, sizeof(sylvan_stats_t)); + + LACE_ME; + TOGETHER(sylvan_stats_sum, &totals); + + // fix timers for MACH +#ifdef __MACH__ + mach_timebase_info_data_t timebase; + mach_timebase_info(&timebase); + uint64_t c = timebase.numer/timebase.denom; + for (int i=0;i +#include + +#ifndef SYLVAN_STATS_H +#define SYLVAN_STATS_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef enum { + BDD_ITE, + BDD_AND, + BDD_XOR, + BDD_EXISTS, + BDD_AND_EXISTS, + BDD_RELNEXT, + BDD_RELPREV, + BDD_SATCOUNT, + BDD_COMPOSE, + BDD_RESTRICT, + BDD_CONSTRAIN, + BDD_CLOSURE, + BDD_ISBDD, + BDD_SUPPORT, + BDD_PATHCOUNT, + BDD_ITE_CACHEDPUT, + BDD_AND_CACHEDPUT, + BDD_XOR_CACHEDPUT, + BDD_EXISTS_CACHEDPUT, + BDD_AND_EXISTS_CACHEDPUT, + BDD_RELNEXT_CACHEDPUT, + BDD_RELPREV_CACHEDPUT, + BDD_SATCOUNT_CACHEDPUT, + BDD_COMPOSE_CACHEDPUT, + BDD_RESTRICT_CACHEDPUT, + BDD_CONSTRAIN_CACHEDPUT, + BDD_CLOSURE_CACHEDPUT, + BDD_ISBDD_CACHEDPUT, + BDD_SUPPORT_CACHEDPUT, + BDD_PATHCOUNT_CACHEDPUT, + BDD_ITE_CACHED, + BDD_AND_CACHED, + BDD_XOR_CACHED, + BDD_EXISTS_CACHED, + BDD_AND_EXISTS_CACHED, + BDD_RELNEXT_CACHED, + BDD_RELPREV_CACHED, + BDD_SATCOUNT_CACHED, + BDD_COMPOSE_CACHED, + BDD_RESTRICT_CACHED, + BDD_CONSTRAIN_CACHED, + BDD_CLOSURE_CACHED, + BDD_ISBDD_CACHED, + BDD_SUPPORT_CACHED, + BDD_PATHCOUNT_CACHED, + BDD_NODES_CREATED, + BDD_NODES_REUSED, + + LDD_UNION, + LDD_MINUS, + LDD_INTERSECT, + LDD_RELPROD, + LDD_RELPREV, + LDD_PROJECT, + LDD_JOIN, + LDD_MATCH, + LDD_SATCOUNT, + LDD_SATCOUNTL, + LDD_ZIP, + LDD_RELPROD_UNION, + LDD_PROJECT_MINUS, + LDD_UNION_CACHEDPUT, + LDD_MINUS_CACHEDPUT, + LDD_INTERSECT_CACHEDPUT, + LDD_RELPROD_CACHEDPUT, + LDD_RELPREV_CACHEDPUT, + LDD_PROJECT_CACHEDPUT, + LDD_JOIN_CACHEDPUT, + LDD_MATCH_CACHEDPUT, + LDD_SATCOUNT_CACHEDPUT, + LDD_SATCOUNTL_CACHEDPUT, + LDD_ZIP_CACHEDPUT, + LDD_RELPROD_UNION_CACHEDPUT, + LDD_PROJECT_MINUS_CACHEDPUT, + LDD_UNION_CACHED, + LDD_MINUS_CACHED, + LDD_INTERSECT_CACHED, + LDD_RELPROD_CACHED, + LDD_RELPREV_CACHED, + LDD_PROJECT_CACHED, + LDD_JOIN_CACHED, + LDD_MATCH_CACHED, + LDD_SATCOUNT_CACHED, + LDD_SATCOUNTL_CACHED, + LDD_ZIP_CACHED, + LDD_RELPROD_UNION_CACHED, + LDD_PROJECT_MINUS_CACHED, + LDD_NODES_CREATED, + LDD_NODES_REUSED, + + LLMSSET_LOOKUP, + + SYLVAN_GC_COUNT, + SYLVAN_COUNTER_COUNTER +} Sylvan_Counters; + +typedef enum +{ + SYLVAN_GC, + SYLVAN_TIMER_COUNTER +} Sylvan_Timers; + +/** + * Initialize stats system (done by sylvan_init_package) + */ +#define sylvan_stats_init() CALL(sylvan_stats_init) +VOID_TASK_DECL_0(sylvan_stats_init) + +/** + * Reset all counters (for statistics) + */ +#define sylvan_stats_reset() CALL(sylvan_stats_reset) +VOID_TASK_DECL_0(sylvan_stats_reset) + +/** + * Write statistic report to file (stdout, stderr, etc) + */ +void sylvan_stats_report(FILE* target, int color); + +#if SYLVAN_STATS + +/* Infrastructure for internal markings */ +typedef struct +{ + uint64_t counters[SYLVAN_COUNTER_COUNTER]; + uint64_t timers[SYLVAN_TIMER_COUNTER]; + uint64_t timers_startstop[SYLVAN_TIMER_COUNTER]; +} sylvan_stats_t; + +#ifdef __MACH__ +#include +#define getabstime() mach_absolute_time() +#else +#include +static uint64_t +getabstime() +{ + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + uint64_t t = ts.tv_sec; + t *= 1000000000UL; + t += ts.tv_nsec; + return t; +} +#endif + +#ifdef __ELF__ +extern __thread sylvan_stats_t sylvan_stats; +#else +#include +extern pthread_key_t sylvan_stats_key; +#endif + +static inline void +sylvan_stats_count(size_t counter) +{ +#ifdef __ELF__ + sylvan_stats.counters[counter]++; +#else + sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); + sylvan_stats->counters[counter]++; +#endif +} + +static inline void +sylvan_stats_add(size_t counter, size_t amount) +{ +#ifdef __ELF__ + sylvan_stats.counters[counter]+=amount; +#else + sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); + sylvan_stats->counters[counter]+=amount; +#endif +} + +static inline void +sylvan_timer_start(size_t timer) +{ + uint64_t t = getabstime(); + +#ifdef __ELF__ + sylvan_stats.timers_startstop[timer] = t; +#else + sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); + sylvan_stats->timers_startstop[timer] = t; +#endif +} + +static inline void +sylvan_timer_stop(size_t timer) +{ + uint64_t t = getabstime(); + +#ifdef __ELF__ + sylvan_stats.timers[timer] += (t - sylvan_stats.timers_startstop[timer]); +#else + sylvan_stats_t *sylvan_stats = (sylvan_stats_t*)pthread_getspecific(sylvan_stats_key); + sylvan_stats->timers[timer] += (t - sylvan_stats->timers_startstop[timer]); +#endif +} + +#else + +static inline void +sylvan_stats_count(size_t counter) +{ + (void)counter; +} + +static inline void +sylvan_stats_add(size_t counter, size_t amount) +{ + (void)counter; + (void)amount; +} + +static inline void +sylvan_timer_start(size_t timer) +{ + (void)timer; +} + +static inline void +sylvan_timer_stop(size_t timer) +{ + (void)timer; +} + +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan.h b/src/sylvan.h new file mode 100644 index 000000000..6fe09f9d6 --- /dev/null +++ b/src/sylvan.h @@ -0,0 +1,182 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Sylvan: parallel BDD/ListDD package. + * + * This is a multi-core implementation of BDDs with complement edges. + * + * This package requires parallel the work-stealing framework Lace. + * Lace must be initialized before initializing Sylvan + * + * This package uses explicit referencing. + * Use sylvan_ref and sylvan_deref to manage external references. + * + * Garbage collection requires all workers to cooperate. Garbage collection is either initiated + * by the user (calling sylvan_gc) or when the nodes table is full. All Sylvan operations + * check whether they need to cooperate on garbage collection. Garbage collection cannot occur + * otherwise. This means that it is perfectly fine to do this: + * BDD a = sylvan_ref(sylvan_and(b, c)); + * since it is not possible that garbage collection occurs between the two calls. + * + * To temporarily disable garbage collection, use sylvan_gc_disable() and sylvan_gc_enable(). + */ + +#include + +#include +#include // for FILE +#include +#include // for definitions + +#include +#include +#include + +#ifndef SYLVAN_H +#define SYLVAN_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifndef SYLVAN_SIZE_FIBONACCI +#define SYLVAN_SIZE_FIBONACCI 0 +#endif + +// For now, only support 64-bit systems +typedef char __sylvan_check_size_t_is_8_bytes[(sizeof(uint64_t) == sizeof(size_t))?1:-1]; + +/** + * Initialize the Sylvan parallel decision diagrams package. + * + * After initialization, call sylvan_init_bdd and/or sylvan_init_ldd if you want to use + * the BDD and/or LDD functionality. + * + * BDDs and LDDs share a common node table and operations cache. + * + * The node table is resizable. + * The table is resized automatically when >50% of the table is filled during garbage collection. + * This behavior can be customized by overriding the gc hook. + * + * Memory usage: + * Every node requires 24 bytes memory. (16 bytes data + 8 bytes overhead) + * Every operation cache entry requires 36 bytes memory. (32 bytes data + 4 bytes overhead) + * + * Reasonable defaults: datasize of 1L<<26 (2048 MB), cachesize of 1L<<25 (1152 MB) + */ +void sylvan_init_package(size_t initial_tablesize, size_t max_tablesize, size_t initial_cachesize, size_t max_cachesize); + +/** + * Frees all Sylvan data (also calls the quit() functions of BDD/MDD parts) + */ +void sylvan_quit(); + +/** + * Return number of occupied buckets in nodes table and total number of buckets. + */ +VOID_TASK_DECL_2(sylvan_table_usage, size_t*, size_t*); +#define sylvan_table_usage(filled, total) (CALL(sylvan_table_usage, filled, total)) + +/** + * Perform garbage collection. + * + * Garbage collection is performed in a new Lace frame, interrupting all ongoing work + * until garbage collection is completed. + * + * Garbage collection procedure: + * 1) The operation cache is cleared and the hash table is reset. + * 2) All live nodes are marked (to be rehashed). This is done by the "mark" callbacks. + * 3) The "hook" callback is called. + * By default, this doubles the hash table size when it is >50% full. + * 4) All live nodes are rehashed into the hash table. + * + * The behavior of garbage collection can be customized by adding "mark" callbacks and + * replacing the "hook" callback. + */ +VOID_TASK_DECL_0(sylvan_gc); +#define sylvan_gc() (CALL(sylvan_gc)) + +/** + * Enable or disable garbage collection. + * + * This affects both automatic and manual garbage collection, i.e., + * calling sylvan_gc() while garbage collection is disabled does not have any effect. + */ +void sylvan_gc_enable(); +void sylvan_gc_disable(); + +/** + * Add a "mark" callback to the list of callbacks. + * + * These are called during garbage collection to recursively mark nodes. + * + * Default "mark" functions that mark external references (via sylvan_ref) and internal + * references (inside operations) are added by sylvan_init_bdd/sylvan_init_bdd. + * + * Functions are called in order. + * level 10: marking functions of Sylvan (external/internal references) + * level 20: call the hook function (for resizing) + * level 30: rehashing + */ +LACE_TYPEDEF_CB(void, gc_mark_cb); +void sylvan_gc_add_mark(int order, gc_mark_cb callback); + +/** + * Set "hook" callback. There can be only one. + * + * The hook is called after the "mark" phase and before the "rehash" phase. + * This allows users to perform certain actions, such as resizing the nodes table + * and the operation cache. Also, dynamic resizing could be performed then. + */ +LACE_TYPEDEF_CB(void, gc_hook_cb); +void sylvan_gc_set_hook(gc_hook_cb new_hook); + +/** + * One of the hooks for resizing behavior. + * Default if SYLVAN_AGGRESSIVE_RESIZE is set. + * Always double size on gc() until maximum reached. + */ +VOID_TASK_DECL_0(sylvan_gc_aggressive_resize); + +/** + * One of the hooks for resizing behavior. + * Default if SYLVAN_AGGRESSIVE_RESIZE is not set. + * Double size on gc() whenever >50% is used. + */ +VOID_TASK_DECL_0(sylvan_gc_default_hook); + +/** + * Set "notify on dead" callback for the nodes table. + * See also documentation in llmsset.h + */ +#define sylvan_set_ondead(cb, ctx) llmsset_set_ondead(nodes, cb, ctx) + +/** + * Global variables (number of workers, nodes table) + */ + +extern llmsset_t nodes; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#include +#include +#include + +#endif diff --git a/src/sylvan_bdd.c b/src/sylvan_bdd.c new file mode 100644 index 000000000..74936a62d --- /dev/null +++ b/src/sylvan_bdd.c @@ -0,0 +1,2820 @@ +/* + * Copyright 2011-2014 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/** + * Complement handling macros + */ +#define BDD_HASMARK(s) (s&sylvan_complement?1:0) +#define BDD_TOGGLEMARK(s) (s^sylvan_complement) +#define BDD_STRIPMARK(s) (s&~sylvan_complement) +#define BDD_TRANSFERMARK(from, to) (to ^ (from & sylvan_complement)) +// Equal under mark +#define BDD_EQUALM(a, b) ((((a)^(b))&(~sylvan_complement))==0) + +/** + * BDD node structure + */ +typedef struct __attribute__((packed)) bddnode { + uint64_t a, b; +} * bddnode_t; // 16 bytes + +#define GETNODE(bdd) ((bddnode_t)llmsset_index_to_ptr(nodes, bdd&0x000000ffffffffff)) + +static inline int __attribute__((unused)) +bddnode_getcomp(bddnode_t n) +{ + return n->a & 0x8000000000000000 ? 1 : 0; +} + +static inline uint64_t +bddnode_getlow(bddnode_t n) +{ + return n->b & 0x000000ffffffffff; // 40 bits +} + +static inline uint64_t +bddnode_gethigh(bddnode_t n) +{ + return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first +} + +static inline uint32_t +bddnode_getvariable(bddnode_t n) +{ + return (uint32_t)(n->b >> 40); +} + +static inline int +bddnode_getmark(bddnode_t n) +{ + return n->a & 0x2000000000000000 ? 1 : 0; +} + +static inline void +bddnode_setmark(bddnode_t n, int mark) +{ + if (mark) n->a |= 0x2000000000000000; + else n->a &= 0xdfffffffffffffff; +} + +static inline void +bddnode_makenode(bddnode_t n, uint32_t var, uint64_t low, uint64_t high) +{ + n->a = high; + n->b = ((uint64_t)var)<<40 | low; +} + +/** + * Implementation of garbage collection. + */ + +/* Recursively mark BDD nodes as 'in use' */ +VOID_TASK_IMPL_1(sylvan_gc_mark_rec, BDD, bdd) +{ + if (bdd == sylvan_false || bdd == sylvan_true) return; + + if (llmsset_mark(nodes, bdd&0x000000ffffffffff)) { + bddnode_t n = GETNODE(bdd); + SPAWN(sylvan_gc_mark_rec, bddnode_getlow(n)); + CALL(sylvan_gc_mark_rec, bddnode_gethigh(n)); + SYNC(sylvan_gc_mark_rec); + } +} + +/** + * External references + */ + +refs_table_t bdd_refs; +refs_table_t bdd_protected; +static int bdd_protected_created = 0; + +BDD +sylvan_ref(BDD a) +{ + if (a == sylvan_false || a == sylvan_true) return a; + refs_up(&bdd_refs, BDD_STRIPMARK(a)); + return a; +} + +void +sylvan_deref(BDD a) +{ + if (a == sylvan_false || a == sylvan_true) return; + refs_down(&bdd_refs, BDD_STRIPMARK(a)); +} + +void +sylvan_protect(BDD *a) +{ + if (!bdd_protected_created) { + // In C++, sometimes sylvan_protect is called before Sylvan is initialized. Just create a table. + protect_create(&bdd_protected, 4096); + bdd_protected_created = 1; + } + protect_up(&bdd_protected, (size_t)a); +} + +void +sylvan_unprotect(BDD *a) +{ + if (bdd_protected.refs_table != NULL) protect_down(&bdd_protected, (size_t)a); +} + +size_t +sylvan_count_refs() +{ + return refs_count(&bdd_refs); +} + +size_t +sylvan_count_protected() +{ + return protect_count(&bdd_protected); +} + +/* Called during garbage collection */ +VOID_TASK_0(sylvan_gc_mark_external_refs) +{ + // iterate through refs hash table, mark all found + size_t count=0; + uint64_t *it = refs_iter(&bdd_refs, 0, bdd_refs.refs_size); + while (it != NULL) { + BDD to_mark = refs_next(&bdd_refs, &it, bdd_refs.refs_size); + SPAWN(sylvan_gc_mark_rec, to_mark); + count++; + } + while (count--) { + SYNC(sylvan_gc_mark_rec); + } +} + +VOID_TASK_0(sylvan_gc_mark_protected) +{ + // iterate through refs hash table, mark all found + size_t count=0; + uint64_t *it = protect_iter(&bdd_protected, 0, bdd_protected.refs_size); + while (it != NULL) { + BDD *to_mark = (BDD*)protect_next(&bdd_protected, &it, bdd_protected.refs_size); + SPAWN(sylvan_gc_mark_rec, *to_mark); + count++; + } + while (count--) { + SYNC(sylvan_gc_mark_rec); + } +} + +/* Infrastructure for internal markings */ +DECLARE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + +VOID_TASK_0(bdd_refs_mark_task) +{ + LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + size_t i, j=0; + for (i=0; ir_count; i++) { + if (j >= 40) { + while (j--) SYNC(sylvan_gc_mark_rec); + j=0; + } + SPAWN(sylvan_gc_mark_rec, bdd_refs_key->results[i]); + j++; + } + for (i=0; is_count; i++) { + Task *t = bdd_refs_key->spawns[i]; + if (!TASK_IS_STOLEN(t)) break; + if (TASK_IS_COMPLETED(t)) { + if (j >= 40) { + while (j--) SYNC(sylvan_gc_mark_rec); + j=0; + } + SPAWN(sylvan_gc_mark_rec, *(BDD*)TASK_RESULT(t)); + j++; + } + } + while (j--) SYNC(sylvan_gc_mark_rec); +} + +VOID_TASK_0(bdd_refs_mark) +{ + TOGETHER(bdd_refs_mark_task); +} + +VOID_TASK_0(bdd_refs_init_task) +{ + bdd_refs_internal_t s = (bdd_refs_internal_t)malloc(sizeof(struct bdd_refs_internal)); + s->r_size = 128; + s->r_count = 0; + s->s_size = 128; + s->s_count = 0; + s->results = (BDD*)malloc(sizeof(BDD) * 128); + s->spawns = (Task**)malloc(sizeof(Task*) * 128); + SET_THREAD_LOCAL(bdd_refs_key, s); +} + +VOID_TASK_0(bdd_refs_init) +{ + INIT_THREAD_LOCAL(bdd_refs_key); + TOGETHER(bdd_refs_init_task); + sylvan_gc_add_mark(10, TASK(bdd_refs_mark)); +} + +/** + * Initialize and quit functions + */ + +static int granularity = 1; // default + +static void +sylvan_quit_bdd() +{ + refs_free(&bdd_refs); + if (bdd_protected_created) { + protect_free(&bdd_protected); + bdd_protected_created = 0; + } +} + +void +sylvan_init_bdd(int _granularity) +{ + sylvan_register_quit(sylvan_quit_bdd); + sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_external_refs)); + sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_protected)); + + granularity = _granularity; + + // Sanity check + if (sizeof(struct bddnode) != 16) { + fprintf(stderr, "Invalid size of bdd nodes: %ld\n", sizeof(struct bddnode)); + exit(1); + } + + refs_create(&bdd_refs, 1024); + if (!bdd_protected_created) { + protect_create(&bdd_protected, 4096); + bdd_protected_created = 1; + } + + LACE_ME; + CALL(bdd_refs_init); +} + +/** + * Core BDD operations + */ + +BDD +sylvan_makenode(BDDVAR level, BDD low, BDD high) +{ + if (low == high) return low; + + // Normalization to keep canonicity + // low will have no mark + + struct bddnode n; + int mark; + + if (BDD_HASMARK(low)) { + mark = 1; + low = BDD_TOGGLEMARK(low); + high = BDD_TOGGLEMARK(high); + } else { + mark = 0; + } + + bddnode_makenode(&n, level, low, high); + + BDD result; + int created; + uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + LACE_ME; + + bdd_refs_push(low); + bdd_refs_push(high); + sylvan_gc(); + bdd_refs_pop(2); + + index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); + exit(1); + } + } + + if (created) sylvan_stats_count(BDD_NODES_CREATED); + else sylvan_stats_count(BDD_NODES_REUSED); + + result = index; + return mark ? result | sylvan_complement : result; +} + +BDD +sylvan_ithvar(BDDVAR level) +{ + return sylvan_makenode(level, sylvan_false, sylvan_true); +} + +BDDVAR +sylvan_var(BDD bdd) +{ + return bddnode_getvariable(GETNODE(bdd)); +} + +static BDD +node_low(BDD bdd, bddnode_t node) +{ + return BDD_TRANSFERMARK(bdd, bddnode_getlow(node)); +} + +static BDD +node_high(BDD bdd, bddnode_t node) +{ + return BDD_TRANSFERMARK(bdd, bddnode_gethigh(node)); +} + +BDD +sylvan_low(BDD bdd) +{ + if (sylvan_isconst(bdd)) return bdd; + return node_low(bdd, GETNODE(bdd)); +} + +BDD +sylvan_high(BDD bdd) +{ + if (sylvan_isconst(bdd)) return bdd; + return node_high(bdd, GETNODE(bdd)); +} + +/** + * Implementation of unary, binary and if-then-else operators. + */ +TASK_IMPL_3(BDD, sylvan_and, BDD, a, BDD, b, BDDVAR, prev_level) +{ + /* Terminal cases */ + if (a == sylvan_true) return b; + if (b == sylvan_true) return a; + if (a == sylvan_false) return sylvan_false; + if (b == sylvan_false) return sylvan_false; + if (a == b) return a; + if (a == BDD_TOGGLEMARK(b)) return sylvan_false; + + sylvan_gc_test(); + + sylvan_stats_count(BDD_AND); + + /* Improve for caching */ + if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { + BDD t = b; + b = a; + a = t; + } + + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR level = va < vb ? va : vb; + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_AND, a, b, sylvan_false, &result)) { + sylvan_stats_count(BDD_AND_CACHED); + return result; + } + } + + // Get cofactors + BDD aLow = a, aHigh = a; + BDD bLow = b, bHigh = b; + if (level == va) { + aLow = node_low(a, na); + aHigh = node_high(a, na); + } + if (level == vb) { + bLow = node_low(b, nb); + bHigh = node_high(b, nb); + } + + // Recursive computation + BDD low=sylvan_invalid, high=sylvan_invalid, result; + + int n=0; + + if (aHigh == sylvan_true) { + high = bHigh; + } else if (aHigh == sylvan_false || bHigh == sylvan_false) { + high = sylvan_false; + } else if (bHigh == sylvan_true) { + high = aHigh; + } else { + bdd_refs_spawn(SPAWN(sylvan_and, aHigh, bHigh, level)); + n=1; + } + + if (aLow == sylvan_true) { + low = bLow; + } else if (aLow == sylvan_false || bLow == sylvan_false) { + low = sylvan_false; + } else if (bLow == sylvan_true) { + low = aLow; + } else { + low = CALL(sylvan_and, aLow, bLow, level); + } + + if (n) { + bdd_refs_push(low); + high = bdd_refs_sync(SYNC(sylvan_and)); + bdd_refs_pop(1); + } + + result = sylvan_makenode(level, low, high); + + if (cachenow) { + if (cache_put3(CACHE_BDD_AND, a, b, sylvan_false, result)) sylvan_stats_count(BDD_AND_CACHEDPUT); + } + + return result; +} + +TASK_IMPL_3(BDD, sylvan_xor, BDD, a, BDD, b, BDDVAR, prev_level) +{ + /* Terminal cases */ + if (a == sylvan_false) return b; + if (b == sylvan_false) return a; + if (a == sylvan_true) return sylvan_not(b); + if (b == sylvan_true) return sylvan_not(a); + if (a == b) return sylvan_false; + if (a == sylvan_not(b)) return sylvan_true; + + sylvan_gc_test(); + + sylvan_stats_count(BDD_XOR); + + /* Improve for caching */ + if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { + BDD t = b; + b = a; + a = t; + } + + // XOR(~A,B) => XOR(A,~B) + if (BDD_HASMARK(a)) { + a = BDD_STRIPMARK(a); + b = sylvan_not(b); + } + + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR level = va < vb ? va : vb; + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_XOR, a, b, sylvan_false, &result)) { + sylvan_stats_count(BDD_XOR_CACHED); + return result; + } + } + + // Get cofactors + BDD aLow = a, aHigh = a; + BDD bLow = b, bHigh = b; + if (level == va) { + aLow = node_low(a, na); + aHigh = node_high(a, na); + } + if (level == vb) { + bLow = node_low(b, nb); + bHigh = node_high(b, nb); + } + + // Recursive computation + BDD low, high, result; + + bdd_refs_spawn(SPAWN(sylvan_xor, aHigh, bHigh, level)); + low = CALL(sylvan_xor, aLow, bLow, level); + bdd_refs_push(low); + high = bdd_refs_sync(SYNC(sylvan_xor)); + bdd_refs_pop(1); + + result = sylvan_makenode(level, low, high); + + if (cachenow) { + if (cache_put3(CACHE_BDD_XOR, a, b, sylvan_false, result)) sylvan_stats_count(BDD_XOR_CACHEDPUT); + } + + return result; +} + + +TASK_IMPL_4(BDD, sylvan_ite, BDD, a, BDD, b, BDD, c, BDDVAR, prev_level) +{ + /* Terminal cases */ + if (a == sylvan_true) return b; + if (a == sylvan_false) return c; + if (a == b) b = sylvan_true; + if (a == sylvan_not(b)) b = sylvan_false; + if (a == c) c = sylvan_false; + if (a == sylvan_not(c)) c = sylvan_true; + if (b == c) return b; + if (b == sylvan_true && c == sylvan_false) return a; + if (b == sylvan_false && c == sylvan_true) return sylvan_not(a); + + /* Cases that reduce to AND and XOR */ + + // ITE(A,B,0) => AND(A,B) + if (c == sylvan_false) return CALL(sylvan_and, a, b, prev_level); + + // ITE(A,1,C) => ~AND(~A,~C) + if (b == sylvan_true) return sylvan_not(CALL(sylvan_and, sylvan_not(a), sylvan_not(c), prev_level)); + + // ITE(A,0,C) => AND(~A,C) + if (b == sylvan_false) return CALL(sylvan_and, sylvan_not(a), c, prev_level); + + // ITE(A,B,1) => ~AND(A,~B) + if (c == sylvan_true) return sylvan_not(CALL(sylvan_and, a, sylvan_not(b), prev_level)); + + // ITE(A,B,~B) => XOR(A,~B) + if (b == sylvan_not(c)) return CALL(sylvan_xor, a, c, 0); + + /* At this point, there are no more terminals */ + + /* Canonical for optimal cache use */ + + // ITE(~A,B,C) => ITE(A,C,B) + if (BDD_HASMARK(a)) { + a = BDD_STRIPMARK(a); + BDD t = c; + c = b; + b = t; + } + + // ITE(A,~B,C) => ~ITE(A,B,~C) + int mark = 0; + if (BDD_HASMARK(b)) { + b = sylvan_not(b); + c = sylvan_not(c); + mark = 1; + } + + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + bddnode_t nc = GETNODE(c); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR vc = bddnode_getvariable(nc); + + // Get lowest level + BDDVAR level = vb < vc ? vb : vc; + + // Fast case + if (va < level && node_low(a, na) == sylvan_false && node_high(a, na) == sylvan_true) { + BDD result = sylvan_makenode(va, c, b); + return mark ? sylvan_not(result) : result; + } + + if (va < level) level = va; + + sylvan_gc_test(); + + sylvan_stats_count(BDD_ITE); + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_ITE, a, b, c, &result)) { + sylvan_stats_count(BDD_ITE_CACHED); + return mark ? sylvan_not(result) : result; + } + } + + // Get cofactors + BDD aLow = a, aHigh = a; + BDD bLow = b, bHigh = b; + BDD cLow = c, cHigh = c; + if (level == va) { + aLow = node_low(a, na); + aHigh = node_high(a, na); + } + if (level == vb) { + bLow = node_low(b, nb); + bHigh = node_high(b, nb); + } + if (level == vc) { + cLow = node_low(c, nc); + cHigh = node_high(c, nc); + } + + // Recursive computation + BDD low=sylvan_invalid, high=sylvan_invalid, result; + + int n=0; + + if (aHigh == sylvan_true) { + high = bHigh; + } else if (aHigh == sylvan_false) { + high = cHigh; + } else { + bdd_refs_spawn(SPAWN(sylvan_ite, aHigh, bHigh, cHigh, level)); + n=1; + } + + if (aLow == sylvan_true) { + low = bLow; + } else if (aLow == sylvan_false) { + low = cLow; + } else { + low = CALL(sylvan_ite, aLow, bLow, cLow, level); + } + + if (n) { + bdd_refs_push(low); + high = bdd_refs_sync(SYNC(sylvan_ite)); + bdd_refs_pop(1); + } + + result = sylvan_makenode(level, low, high); + + if (cachenow) { + if (cache_put3(CACHE_BDD_ITE, a, b, c, result)) sylvan_stats_count(BDD_ITE_CACHEDPUT); + } + + return mark ? sylvan_not(result) : result; +} + +/** + * Calculate constrain a @ c + */ +TASK_IMPL_3(BDD, sylvan_constrain, BDD, a, BDD, b, BDDVAR, prev_level) +{ + /* Trivial cases */ + if (b == sylvan_true) return a; + if (b == sylvan_false) return sylvan_false; + if (sylvan_isconst(a)) return a; + if (a == b) return sylvan_true; + if (a == sylvan_not(b)) return sylvan_false; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_CONSTRAIN); + + // a != constant and b != constant + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR level = va < vb ? va : vb; + + // CONSULT CACHE + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_CONSTRAIN, a, b, 0, &result)) { + sylvan_stats_count(BDD_CONSTRAIN_CACHED); + return result; + } + } + + // DETERMINE TOP BDDVAR AND COFACTORS + + BDD aLow, aHigh, bLow, bHigh; + + if (level == va) { + aLow = node_low(a, na); + aHigh = node_high(a, na); + } else { + aLow = aHigh = a; + } + + if (level == vb) { + bLow = node_low(b, nb); + bHigh = node_high(b, nb); + } else { + bLow = bHigh = b; + } + + BDD result; + + BDD low=sylvan_invalid, high=sylvan_invalid; + if (bLow == sylvan_false) return CALL(sylvan_constrain, aHigh, bHigh, level); + if (bLow == sylvan_true) { + if (bHigh == sylvan_false) return aLow; + if (bHigh == sylvan_true) { + result = sylvan_makenode(level, aLow, bHigh); + } else { + high = CALL(sylvan_constrain, aHigh, bHigh, level); + result = sylvan_makenode(level, aLow, high); + } + } else { + if (bHigh == sylvan_false) return CALL(sylvan_constrain, aLow, bLow, level); + if (bHigh == sylvan_true) { + low = CALL(sylvan_constrain, aLow, bLow, level); + result = sylvan_makenode(level, low, bHigh); + } else { + bdd_refs_spawn(SPAWN(sylvan_constrain, aLow, bLow, level)); + high = CALL(sylvan_constrain, aHigh, bHigh, level); + bdd_refs_push(high); + low = bdd_refs_sync(SYNC(sylvan_constrain)); + bdd_refs_pop(1); + result = sylvan_makenode(level, low, high); + } + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_CONSTRAIN, a, b, 0, result)) sylvan_stats_count(BDD_CONSTRAIN_CACHEDPUT); + } + + return result; +} + +/** + * Calculate restrict a @ b + */ +TASK_IMPL_3(BDD, sylvan_restrict, BDD, a, BDD, b, BDDVAR, prev_level) +{ + /* Trivial cases */ + if (b == sylvan_true) return a; + if (b == sylvan_false) return sylvan_false; + if (sylvan_isconst(a)) return a; + if (a == b) return sylvan_true; + if (a == sylvan_not(b)) return sylvan_false; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_RESTRICT); + + // a != constant and b != constant + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR level = va < vb ? va : vb; + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_RESTRICT, a, b, 0, &result)) { + sylvan_stats_count(BDD_RESTRICT_CACHED); + return result; + } + } + + BDD result; + + if (vb < va) { + BDD c = CALL(sylvan_ite, node_low(b,nb), sylvan_true, node_high(b,nb), 0); + bdd_refs_push(c); + result = CALL(sylvan_restrict, a, c, level); + bdd_refs_pop(1); + } else { + BDD aLow=node_low(a,na),aHigh=node_high(a,na),bLow=b,bHigh=b; + if (va == vb) { + bLow = node_low(b,nb); + bHigh = node_high(b,nb); + } + if (bLow == sylvan_false) { + result = CALL(sylvan_restrict, aHigh, bHigh, level); + } else if (bHigh == sylvan_false) { + result = CALL(sylvan_restrict, aLow, bLow, level); + } else { + bdd_refs_spawn(SPAWN(sylvan_restrict, aLow, bLow, level)); + BDD high = CALL(sylvan_restrict, aHigh, bHigh, level); + bdd_refs_push(high); + BDD low = bdd_refs_sync(SYNC(sylvan_restrict)); + bdd_refs_pop(1); + result = sylvan_makenode(level, low, high); + } + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_RESTRICT, a, b, 0, result)) sylvan_stats_count(BDD_RESTRICT_CACHEDPUT); + } + + return result; +} + +/** + * Calculates \exists variables . a + */ +TASK_IMPL_3(BDD, sylvan_exists, BDD, a, BDD, variables, BDDVAR, prev_level) +{ + /* Terminal cases */ + if (a == sylvan_true) return sylvan_true; + if (a == sylvan_false) return sylvan_false; + if (sylvan_set_isempty(variables)) return a; + + // a != constant + bddnode_t na = GETNODE(a); + BDDVAR level = bddnode_getvariable(na); + + bddnode_t nv = GETNODE(variables); + BDDVAR vv = bddnode_getvariable(nv); + while (vv < level) { + variables = node_high(variables, nv); + if (sylvan_set_isempty(variables)) return a; + nv = GETNODE(variables); + vv = bddnode_getvariable(nv); + } + + sylvan_gc_test(); + + sylvan_stats_count(BDD_EXISTS); + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_EXISTS, a, variables, 0, &result)) { + sylvan_stats_count(BDD_EXISTS_CACHED); + return result; + } + } + + // Get cofactors + BDD aLow = node_low(a, na); + BDD aHigh = node_high(a, na); + + BDD result; + + if (vv == level) { + // level is in variable set, perform abstraction + if (aLow == sylvan_true || aHigh == sylvan_true || aLow == sylvan_not(aHigh)) { + result = sylvan_true; + } else { + BDD _v = sylvan_set_next(variables); + BDD low = CALL(sylvan_exists, aLow, _v, level); + if (low == sylvan_true) { + result = sylvan_true; + } else { + bdd_refs_push(low); + BDD high = CALL(sylvan_exists, aHigh, _v, level); + if (high == sylvan_true) { + result = sylvan_true; + bdd_refs_pop(1); + } else if (low == sylvan_false && high == sylvan_false) { + result = sylvan_false; + bdd_refs_pop(1); + } else { + bdd_refs_push(high); + result = sylvan_or(low, high); + bdd_refs_pop(2); + } + } + } + } else { + // level is not in variable set + BDD low, high; + bdd_refs_spawn(SPAWN(sylvan_exists, aHigh, variables, level)); + low = CALL(sylvan_exists, aLow, variables, level); + bdd_refs_push(low); + high = bdd_refs_sync(SYNC(sylvan_exists)); + bdd_refs_pop(1); + result = sylvan_makenode(level, low, high); + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_EXISTS, a, variables, 0, result)) sylvan_stats_count(BDD_EXISTS_CACHEDPUT); + } + + return result; +} + +/** + * Calculate exists(a AND b, v) + */ +TASK_IMPL_4(BDD, sylvan_and_exists, BDD, a, BDD, b, BDDSET, v, BDDVAR, prev_level) +{ + /* Terminal cases */ + if (a == sylvan_false) return sylvan_false; + if (b == sylvan_false) return sylvan_false; + if (a == sylvan_not(b)) return sylvan_false; + if (a == sylvan_true && b == sylvan_true) return sylvan_true; + + /* Cases that reduce to "exists" and "and" */ + if (a == sylvan_true) return CALL(sylvan_exists, b, v, 0); + if (b == sylvan_true) return CALL(sylvan_exists, a, v, 0); + if (a == b) return CALL(sylvan_exists, a, v, 0); + if (sylvan_set_isempty(v)) return sylvan_and(a, b); + + /* At this point, a and b are proper nodes, and v is non-empty */ + + /* Improve for caching */ + if (BDD_STRIPMARK(a) > BDD_STRIPMARK(b)) { + BDD t = b; + b = a; + a = t; + } + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + sylvan_stats_count(BDD_AND_EXISTS); + + // a != constant + bddnode_t na = GETNODE(a); + bddnode_t nb = GETNODE(b); + bddnode_t nv = GETNODE(v); + + BDDVAR va = bddnode_getvariable(na); + BDDVAR vb = bddnode_getvariable(nb); + BDDVAR vv = bddnode_getvariable(nv); + BDDVAR level = va < vb ? va : vb; + + /* Skip levels in v that are not in a and b */ + while (vv < level) { + v = node_high(v, nv); // get next variable in conjunction + if (sylvan_set_isempty(v)) return sylvan_and(a, b); + nv = GETNODE(v); + vv = bddnode_getvariable(nv); + } + + BDD result; + + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + if (cache_get3(CACHE_BDD_AND_EXISTS, a, b, v, &result)) { + sylvan_stats_count(BDD_AND_EXISTS_CACHED); + return result; + } + } + + // Get cofactors + BDD aLow, aHigh, bLow, bHigh; + if (level == va) { + aLow = node_low(a, na); + aHigh = node_high(a, na); + } else { + aLow = a; + aHigh = a; + } + if (level == vb) { + bLow = node_low(b, nb); + bHigh = node_high(b, nb); + } else { + bLow = b; + bHigh = b; + } + + if (level == vv) { + // level is in variable set, perform abstraction + BDD _v = node_high(v, nv); + BDD low = CALL(sylvan_and_exists, aLow, bLow, _v, level); + if (low == sylvan_true || low == aHigh || low == bHigh) { + result = low; + } else { + bdd_refs_push(low); + BDD high; + if (low == sylvan_not(aHigh)) { + high = CALL(sylvan_exists, bHigh, _v, 0); + } else if (low == sylvan_not(bHigh)) { + high = CALL(sylvan_exists, aHigh, _v, 0); + } else { + high = CALL(sylvan_and_exists, aHigh, bHigh, _v, level); + } + if (high == sylvan_true) { + result = sylvan_true; + bdd_refs_pop(1); + } else if (high == sylvan_false) { + result = low; + bdd_refs_pop(1); + } else if (low == sylvan_false) { + result = high; + bdd_refs_pop(1); + } else { + bdd_refs_push(high); + result = sylvan_or(low, high); + bdd_refs_pop(2); + } + } + } else { + // level is not in variable set + bdd_refs_spawn(SPAWN(sylvan_and_exists, aHigh, bHigh, v, level)); + BDD low = CALL(sylvan_and_exists, aLow, bLow, v, level); + bdd_refs_push(low); + BDD high = bdd_refs_sync(SYNC(sylvan_and_exists)); + bdd_refs_pop(1); + result = sylvan_makenode(level, low, high); + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_AND_EXISTS, a, b, v, result)) sylvan_stats_count(BDD_AND_EXISTS_CACHEDPUT); + } + + return result; +} + + +TASK_IMPL_4(BDD, sylvan_relnext, BDD, a, BDD, b, BDDSET, vars, BDDVAR, prev_level) +{ + /* Compute R(s) = \exists x: A(x) \and B(x,s) with support(result) = s, support(A) = s, support(B) = s+t + * if vars == sylvan_false, then every level is in s or t + * any other levels (outside s,t) in B are ignored / existentially quantified + */ + + /* Terminals */ + if (a == sylvan_true && b == sylvan_true) return sylvan_true; + if (a == sylvan_false) return sylvan_false; + if (b == sylvan_false) return sylvan_false; + if (sylvan_set_isempty(vars)) return a; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_RELNEXT); + + /* Determine top level */ + bddnode_t na = sylvan_isconst(a) ? 0 : GETNODE(a); + bddnode_t nb = sylvan_isconst(b) ? 0 : GETNODE(b); + + BDDVAR va = na ? bddnode_getvariable(na) : 0xffffffff; + BDDVAR vb = nb ? bddnode_getvariable(nb) : 0xffffffff; + BDDVAR level = va < vb ? va : vb; + + /* Skip vars */ + int is_s_or_t = 0; + bddnode_t nv = 0; + if (vars == sylvan_false) { + is_s_or_t = 1; + } else { + nv = GETNODE(vars); + for (;;) { + /* check if level is s/t */ + BDDVAR vv = bddnode_getvariable(nv); + if (level == vv || (level^1) == vv) { + is_s_or_t = 1; + break; + } + /* check if level < s/t */ + if (level < vv) break; + vars = node_high(vars, nv); // get next in vars + if (sylvan_set_isempty(vars)) return a; + nv = GETNODE(vars); + } + } + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_RELNEXT, a, b, vars, &result)) { + sylvan_stats_count(BDD_RELNEXT_CACHED); + return result; + } + } + + BDD result; + + if (is_s_or_t) { + /* Get s and t */ + BDDVAR s = level & (~1); + BDDVAR t = s+1; + + BDD a0, a1, b0, b1; + if (na && va == s) { + a0 = node_low(a, na); + a1 = node_high(a, na); + } else { + a0 = a1 = a; + } + if (nb && vb == s) { + b0 = node_low(b, nb); + b1 = node_high(b, nb); + } else { + b0 = b1 = b; + } + + BDD b00, b01, b10, b11; + if (!sylvan_isconst(b0)) { + bddnode_t nb0 = GETNODE(b0); + if (bddnode_getvariable(nb0) == t) { + b00 = node_low(b0, nb0); + b01 = node_high(b0, nb0); + } else { + b00 = b01 = b0; + } + } else { + b00 = b01 = b0; + } + if (!sylvan_isconst(b1)) { + bddnode_t nb1 = GETNODE(b1); + if (bddnode_getvariable(nb1) == t) { + b10 = node_low(b1, nb1); + b11 = node_high(b1, nb1); + } else { + b10 = b11 = b1; + } + } else { + b10 = b11 = b1; + } + + BDD _vars = vars == sylvan_false ? sylvan_false : node_high(vars, nv); + + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b00, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b10, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b01, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b11, _vars, level)); + + BDD f = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(f); + BDD e = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(e); + BDD d = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(d); + BDD c = bdd_refs_sync(SYNC(sylvan_relnext)); bdd_refs_push(c); + + bdd_refs_spawn(SPAWN(sylvan_ite, c, sylvan_true, d, 0)); /* a0 b00 \or a1 b01 */ + bdd_refs_spawn(SPAWN(sylvan_ite, e, sylvan_true, f, 0)); /* a0 b01 \or a1 b11 */ + + /* R1 */ d = bdd_refs_sync(SYNC(sylvan_ite)); bdd_refs_push(d); + /* R0 */ c = bdd_refs_sync(SYNC(sylvan_ite)); // not necessary: bdd_refs_push(c); + + bdd_refs_pop(5); + result = sylvan_makenode(s, c, d); + } else { + /* Variable not in vars! Take a, quantify b */ + BDD a0, a1, b0, b1; + if (na && va == level) { + a0 = node_low(a, na); + a1 = node_high(a, na); + } else { + a0 = a1 = a; + } + if (nb && vb == level) { + b0 = node_low(b, nb); + b1 = node_high(b, nb); + } else { + b0 = b1 = b; + } + + if (b0 != b1) { + if (a0 == a1) { + /* Quantify "b" variables */ + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r0); + result = sylvan_or(r0, r1); + bdd_refs_pop(2); + } else { + /* Quantify "b" variables, but keep "a" variables */ + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b1, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); + + BDD r11 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r11); + BDD r10 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r10); + BDD r01 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r01); + BDD r00 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r00); + + bdd_refs_spawn(SPAWN(sylvan_ite, r00, sylvan_true, r01, 0)); + bdd_refs_spawn(SPAWN(sylvan_ite, r10, sylvan_true, r11, 0)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_ite)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_ite)); + bdd_refs_pop(5); + + result = sylvan_makenode(level, r0, r1); + } + } else { + /* Keep "a" variables */ + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_pop(1); + result = sylvan_makenode(level, r0, r1); + } + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_RELNEXT, a, b, vars, result)) sylvan_stats_count(BDD_RELNEXT_CACHEDPUT); + } + + return result; +} + +TASK_IMPL_4(BDD, sylvan_relprev, BDD, a, BDD, b, BDDSET, vars, BDDVAR, prev_level) +{ + /* Compute \exists x: A(s,x) \and B(x,t) + * if vars == sylvan_false, then every level is in s or t + * any other levels (outside s,t) in A are ignored / existentially quantified + */ + + /* Terminals */ + if (a == sylvan_true && b == sylvan_true) return sylvan_true; + if (a == sylvan_false) return sylvan_false; + if (b == sylvan_false) return sylvan_false; + if (sylvan_set_isempty(vars)) return b; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_RELPREV); + + /* Determine top level */ + bddnode_t na = sylvan_isconst(a) ? 0 : GETNODE(a); + bddnode_t nb = sylvan_isconst(b) ? 0 : GETNODE(b); + + BDDVAR va = na ? bddnode_getvariable(na) : 0xffffffff; + BDDVAR vb = nb ? bddnode_getvariable(nb) : 0xffffffff; + BDDVAR level = va < vb ? va : vb; + + /* Skip vars */ + int is_s_or_t = 0; + bddnode_t nv = 0; + if (vars == sylvan_false) { + is_s_or_t = 1; + } else { + nv = GETNODE(vars); + for (;;) { + /* check if level is s/t */ + BDDVAR vv = bddnode_getvariable(nv); + if (level == vv || (level^1) == vv) { + is_s_or_t = 1; + break; + } + /* check if level < s/t */ + if (level < vv) break; + vars = node_high(vars, nv); // get next in vars + if (sylvan_set_isempty(vars)) return b; + nv = GETNODE(vars); + } + } + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_RELPREV, a, b, vars, &result)) { + sylvan_stats_count(BDD_RELPREV_CACHED); + return result; + } + } + + BDD result; + + if (is_s_or_t) { + /* Get s and t */ + BDDVAR s = level & (~1); + BDDVAR t = s+1; + + BDD a0, a1, b0, b1; + if (na && va == s) { + a0 = node_low(a, na); + a1 = node_high(a, na); + } else { + a0 = a1 = a; + } + if (nb && vb == s) { + b0 = node_low(b, nb); + b1 = node_high(b, nb); + } else { + b0 = b1 = b; + } + + BDD a00, a01, a10, a11; + if (!sylvan_isconst(a0)) { + bddnode_t na0 = GETNODE(a0); + if (bddnode_getvariable(na0) == t) { + a00 = node_low(a0, na0); + a01 = node_high(a0, na0); + } else { + a00 = a01 = a0; + } + } else { + a00 = a01 = a0; + } + if (!sylvan_isconst(a1)) { + bddnode_t na1 = GETNODE(a1); + if (bddnode_getvariable(na1) == t) { + a10 = node_low(a1, na1); + a11 = node_high(a1, na1); + } else { + a10 = a11 = a1; + } + } else { + a10 = a11 = a1; + } + + BDD b00, b01, b10, b11; + if (!sylvan_isconst(b0)) { + bddnode_t nb0 = GETNODE(b0); + if (bddnode_getvariable(nb0) == t) { + b00 = node_low(b0, nb0); + b01 = node_high(b0, nb0); + } else { + b00 = b01 = b0; + } + } else { + b00 = b01 = b0; + } + if (!sylvan_isconst(b1)) { + bddnode_t nb1 = GETNODE(b1); + if (bddnode_getvariable(nb1) == t) { + b10 = node_low(b1, nb1); + b11 = node_high(b1, nb1); + } else { + b10 = b11 = b1; + } + } else { + b10 = b11 = b1; + } + + BDD _vars; + if (vars != sylvan_false) { + _vars = node_high(vars, nv); + if (sylvan_set_var(_vars) == t) _vars = sylvan_set_next(_vars); + } else { + _vars = sylvan_false; + } + + if (b00 == b01) { + bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b0, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b0, _vars, level)); + } else { + bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b00, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a00, b01, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b00, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a10, b01, _vars, level)); + } + + if (b10 == b11) { + bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b1, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b1, _vars, level)); + } else { + bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b10, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a01, b11, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b10, _vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a11, b11, _vars, level)); + } + + BDD r00, r01, r10, r11; + + if (b10 == b11) { + r11 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r01 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + } else { + BDD r111 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + BDD r110 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r11 = sylvan_makenode(t, r110, r111); + bdd_refs_pop(2); + bdd_refs_push(r11); + BDD r011 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + BDD r010 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r01 = sylvan_makenode(t, r010, r011); + bdd_refs_pop(2); + bdd_refs_push(r01); + } + + if (b00 == b01) { + r10 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r00 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + } else { + BDD r101 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + BDD r100 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r10 = sylvan_makenode(t, r100, r101); + bdd_refs_pop(2); + bdd_refs_push(r10); + BDD r001 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + BDD r000 = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_relprev))); + r00 = sylvan_makenode(t, r000, r001); + bdd_refs_pop(2); + bdd_refs_push(r00); + } + + bdd_refs_spawn(SPAWN(sylvan_and, sylvan_not(r00), sylvan_not(r01), 0)); + bdd_refs_spawn(SPAWN(sylvan_and, sylvan_not(r10), sylvan_not(r11), 0)); + + BDD r1 = sylvan_not(bdd_refs_push(bdd_refs_sync(SYNC(sylvan_and)))); + BDD r0 = sylvan_not(bdd_refs_sync(SYNC(sylvan_and))); + bdd_refs_pop(5); + result = sylvan_makenode(s, r0, r1); + } else { + BDD a0, a1, b0, b1; + if (na && va == level) { + a0 = node_low(a, na); + a1 = node_high(a, na); + } else { + a0 = a1 = a; + } + if (nb && vb == level) { + b0 = node_low(b, nb); + b1 = node_high(b, nb); + } else { + b0 = b1 = b; + } + + if (a0 != a1) { + if (b0 == b1) { + /* Quantify "a" variables */ + bdd_refs_spawn(SPAWN(sylvan_relprev, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a1, b1, vars, level)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_relprev)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_relprev)); + bdd_refs_push(r0); + result = CALL(sylvan_ite, r0, sylvan_true, r1, 0); + bdd_refs_pop(2); + + } else { + /* Quantify "a" variables, but keep "b" variables */ + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a0, b1, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relnext, a1, b1, vars, level)); + + BDD r11 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r11); + BDD r01 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r01); + BDD r10 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r10); + BDD r00 = bdd_refs_sync(SYNC(sylvan_relnext)); + bdd_refs_push(r00); + + bdd_refs_spawn(SPAWN(sylvan_ite, r00, sylvan_true, r10, 0)); + bdd_refs_spawn(SPAWN(sylvan_ite, r01, sylvan_true, r11, 0)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_ite)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_ite)); + bdd_refs_pop(5); + + result = sylvan_makenode(level, r0, r1); + } + } else { + bdd_refs_spawn(SPAWN(sylvan_relprev, a0, b0, vars, level)); + bdd_refs_spawn(SPAWN(sylvan_relprev, a1, b1, vars, level)); + + BDD r1 = bdd_refs_sync(SYNC(sylvan_relprev)); + bdd_refs_push(r1); + BDD r0 = bdd_refs_sync(SYNC(sylvan_relprev)); + bdd_refs_pop(1); + result = sylvan_makenode(level, r0, r1); + } + } + + if (cachenow) { + if (cache_put3(CACHE_BDD_RELPREV, a, b, vars, result)) sylvan_stats_count(BDD_RELPREV_CACHEDPUT); + } + + return result; +} + +/** + * Computes the transitive closure by traversing the BDD recursively. + * See Y. Matsunaga, P. C. McGeer, R. K. Brayton + * On Computing the Transitive Closre of a State Transition Relation + * 30th ACM Design Automation Conference, 1993. + */ +TASK_IMPL_2(BDD, sylvan_closure, BDD, a, BDDVAR, prev_level) +{ + /* Terminals */ + if (a == sylvan_true) return a; + if (a == sylvan_false) return a; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_CLOSURE); + + /* Determine top level */ + bddnode_t n = GETNODE(a); + BDDVAR level = bddnode_getvariable(n); + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_CLOSURE, a, 0, 0, &result)) { + sylvan_stats_count(BDD_CLOSURE_CACHED); + return result; + } + } + + BDDVAR s = level & (~1); + BDDVAR t = s+1; + + BDD a0, a1; + if (level == s) { + a0 = node_low(a, n); + a1 = node_high(a, n); + } else { + a0 = a1 = a; + } + + BDD a00, a01, a10, a11; + if (!sylvan_isconst(a0)) { + bddnode_t na0 = GETNODE(a0); + if (bddnode_getvariable(na0) == t) { + a00 = node_low(a0, na0); + a01 = node_high(a0, na0); + } else { + a00 = a01 = a0; + } + } else { + a00 = a01 = a0; + } + if (!sylvan_isconst(a1)) { + bddnode_t na1 = GETNODE(a1); + if (bddnode_getvariable(na1) == t) { + a10 = node_low(a1, na1); + a11 = node_high(a1, na1); + } else { + a10 = a11 = a1; + } + } else { + a10 = a11 = a1; + } + + BDD u1 = CALL(sylvan_closure, a11, level); + bdd_refs_push(u1); + /* u3 = */ bdd_refs_spawn(SPAWN(sylvan_relprev, a01, u1, sylvan_false, level)); + BDD u2 = CALL(sylvan_relprev, u1, a10, sylvan_false, level); + bdd_refs_push(u2); + BDD e = CALL(sylvan_relprev, a01, u2, sylvan_false, level); + bdd_refs_push(e); + e = CALL(sylvan_ite, a00, sylvan_true, e, level); + bdd_refs_pop(1); + bdd_refs_push(e); + e = CALL(sylvan_closure, e, level); + bdd_refs_pop(1); + bdd_refs_push(e); + BDD g = CALL(sylvan_relprev, u2, e, sylvan_false, level); + bdd_refs_push(g); + BDD u3 = bdd_refs_sync(SYNC(sylvan_relprev)); + bdd_refs_push(u3); + BDD f = CALL(sylvan_relprev, e, u3, sylvan_false, level); + bdd_refs_push(f); + BDD h = CALL(sylvan_relprev, u2, f, sylvan_false, level); + bdd_refs_push(h); + h = CALL(sylvan_ite, u1, sylvan_true, h, level); + bdd_refs_pop(1); + bdd_refs_push(h); + + BDD r0, r1; + /* R0 */ r0 = sylvan_makenode(t, e, f); + bdd_refs_pop(7); + bdd_refs_push(r0); + /* R1 */ r1 = sylvan_makenode(t, g, h); + bdd_refs_pop(1); + BDD result = sylvan_makenode(s, r0, r1); + + if (cachenow) { + if (cache_put3(CACHE_BDD_CLOSURE, a, 0, 0, result)) sylvan_stats_count(BDD_CLOSURE_CACHEDPUT); + } + + return result; +} + + +/** + * Function composition + */ +TASK_IMPL_3(BDD, sylvan_compose, BDD, a, BDDMAP, map, BDDVAR, prev_level) +{ + /* Trivial cases */ + if (a == sylvan_false || a == sylvan_true) return a; + if (sylvan_map_isempty(map)) return a; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Count operation */ + sylvan_stats_count(BDD_COMPOSE); + + /* Determine top level */ + bddnode_t n = GETNODE(a); + BDDVAR level = bddnode_getvariable(n); + + /* Skip map */ + bddnode_t map_node = GETNODE(map); + BDDVAR map_var = bddnode_getvariable(map_node); + while (map_var < level) { + map = node_low(map, map_node); + if (sylvan_map_isempty(map)) return a; + map_node = GETNODE(map); + map_var = bddnode_getvariable(map_node); + } + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + BDD result; + if (cache_get3(CACHE_BDD_COMPOSE, a, map, 0, &result)) { + sylvan_stats_count(BDD_COMPOSE_CACHED); + return result; + } + } + + /* Recursively calculate low and high */ + bdd_refs_spawn(SPAWN(sylvan_compose, node_low(a, n), map, level)); + BDD high = CALL(sylvan_compose, node_high(a, n), map, level); + bdd_refs_push(high); + BDD low = bdd_refs_sync(SYNC(sylvan_compose)); + bdd_refs_push(low); + + /* Calculate result */ + BDD root = map_var == level ? node_high(map, map_node) : sylvan_ithvar(level); + bdd_refs_push(root); + BDD result = CALL(sylvan_ite, root, high, low, 0); + bdd_refs_pop(3); + + if (cachenow) { + if (cache_put3(CACHE_BDD_COMPOSE, a, map, 0, result)) sylvan_stats_count(BDD_COMPOSE_CACHEDPUT); + } + + return result; +} + +/** + * Count number of nodes in BDD + */ +uint64_t sylvan_nodecount_do_1(BDD a) +{ + if (sylvan_isconst(a)) return 0; + bddnode_t na = GETNODE(a); + if (bddnode_getmark(na)) return 0; + bddnode_setmark(na, 1); + uint64_t result = 1; + result += sylvan_nodecount_do_1(bddnode_getlow(na)); + result += sylvan_nodecount_do_1(bddnode_gethigh(na)); + return result; +} + +void sylvan_nodecount_do_2(BDD a) +{ + if (sylvan_isconst(a)) return; + bddnode_t na = GETNODE(a); + if (!bddnode_getmark(na)) return; + bddnode_setmark(na, 0); + sylvan_nodecount_do_2(bddnode_getlow(na)); + sylvan_nodecount_do_2(bddnode_gethigh(na)); +} + +size_t sylvan_nodecount(BDD a) +{ + uint32_t result = sylvan_nodecount_do_1(a); + sylvan_nodecount_do_2(a); + return result; +} + +/** + * Calculate the number of distinct paths to True. + */ +TASK_IMPL_2(double, sylvan_pathcount, BDD, bdd, BDDVAR, prev_level) +{ + /* Trivial cases */ + if (bdd == sylvan_false) return 0.0; + if (bdd == sylvan_true) return 1.0; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + sylvan_stats_count(BDD_PATHCOUNT); + + BDD level = sylvan_var(bdd); + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != level / granularity; + if (cachenow) { + double result; + if (cache_get3(CACHE_BDD_PATHCOUNT, bdd, 0, 0, (uint64_t*)&result)) { + sylvan_stats_count(BDD_PATHCOUNT_CACHED); + return result; + } + } + + SPAWN(sylvan_pathcount, sylvan_low(bdd), level); + SPAWN(sylvan_pathcount, sylvan_high(bdd), level); + double res1 = SYNC(sylvan_pathcount); + res1 += SYNC(sylvan_pathcount); + + if (cachenow) { + if (cache_put3(CACHE_BDD_PATHCOUNT, bdd, 0, 0, *(uint64_t*)&res1)) sylvan_stats_count(BDD_PATHCOUNT_CACHEDPUT); + } + + return res1; +} + +/** + * Calculate the number of satisfying variable assignments according to . + */ +TASK_IMPL_3(double, sylvan_satcount, BDD, bdd, BDDSET, variables, BDDVAR, prev_level) +{ + /* Trivial cases */ + if (bdd == sylvan_false) return 0.0; + if (bdd == sylvan_true) return powl(2.0L, sylvan_set_count(variables)); + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + sylvan_stats_count(BDD_SATCOUNT); + + /* Count variables before var(bdd) */ + size_t skipped = 0; + BDDVAR var = sylvan_var(bdd); + bddnode_t set_node = GETNODE(variables); + BDDVAR set_var = bddnode_getvariable(set_node); + while (var != set_var) { + skipped++; + variables = node_high(variables, set_node); + // if this assertion fails, then variables is not the support of + assert(!sylvan_set_isempty(variables)); + set_node = GETNODE(variables); + set_var = bddnode_getvariable(set_node); + } + + union { + double d; + uint64_t s; + } hack; + + /* Consult cache */ + int cachenow = granularity < 2 || prev_level == 0 ? 1 : prev_level / granularity != var / granularity; + if (cachenow) { + if (cache_get3(CACHE_BDD_SATCOUNT, bdd, variables, 0, &hack.s)) { + sylvan_stats_count(BDD_SATCOUNT_CACHED); + return hack.d * powl(2.0L, skipped); + } + } + + SPAWN(sylvan_satcount, sylvan_high(bdd), node_high(variables, set_node), var); + double low = CALL(sylvan_satcount, sylvan_low(bdd), node_high(variables, set_node), var); + double result = low + SYNC(sylvan_satcount); + + if (cachenow) { + hack.d = result; + if (cache_put3(CACHE_BDD_SATCOUNT, bdd, variables, 0, hack.s)) sylvan_stats_count(BDD_SATCOUNT_CACHEDPUT); + } + + return result * powl(2.0L, skipped); +} + +int +sylvan_sat_one(BDD bdd, BDDSET vars, uint8_t *str) +{ + if (bdd == sylvan_false) return 0; + if (str == NULL) return 0; + if (sylvan_set_isempty(vars)) return 1; + + for (;;) { + bddnode_t n_vars = GETNODE(vars); + if (bdd == sylvan_true) { + *str = 0; + } else { + bddnode_t n_bdd = GETNODE(bdd); + if (bddnode_getvariable(n_bdd) != bddnode_getvariable(n_vars)) { + *str = 0; + } else { + if (node_low(bdd, n_bdd) == sylvan_false) { + // take high edge + *str = 1; + bdd = node_high(bdd, n_bdd); + } else { + // take low edge + *str = 0; + bdd = node_low(bdd, n_bdd); + } + } + } + vars = node_high(vars, n_vars); + if (sylvan_set_isempty(vars)) break; + str++; + } + + return 1; +} + +BDD +sylvan_sat_one_bdd(BDD bdd) +{ + if (bdd == sylvan_false) return sylvan_false; + if (bdd == sylvan_true) return sylvan_true; + + bddnode_t node = GETNODE(bdd); + BDD low = node_low(bdd, node); + BDD high = node_high(bdd, node); + + BDD m; + + BDD result; + if (low == sylvan_false) { + m = sylvan_sat_one_bdd(high); + result = sylvan_makenode(bddnode_getvariable(node), sylvan_false, m); + } else if (high == sylvan_false) { + m = sylvan_sat_one_bdd(low); + result = sylvan_makenode(bddnode_getvariable(node), m, sylvan_false); + } else { + if (rand() & 0x2000) { + m = sylvan_sat_one_bdd(low); + result = sylvan_makenode(bddnode_getvariable(node), m, sylvan_false); + } else { + m = sylvan_sat_one_bdd(high); + result = sylvan_makenode(bddnode_getvariable(node), sylvan_false, m); + } + } + + return result; +} + +BDD +sylvan_cube(BDDSET vars, uint8_t *cube) +{ + if (sylvan_set_isempty(vars)) return sylvan_true; + + bddnode_t n = GETNODE(vars); + BDDVAR v = bddnode_getvariable(n); + vars = node_high(vars, n); + + BDD result = sylvan_cube(vars, cube+1); + if (*cube == 0) { + result = sylvan_makenode(v, result, sylvan_false); + } else if (*cube == 1) { + result = sylvan_makenode(v, sylvan_false, result); + } + + return result; +} + +TASK_IMPL_3(BDD, sylvan_union_cube, BDD, bdd, BDDSET, vars, uint8_t *, cube) +{ + /* Terminal cases */ + if (bdd == sylvan_true) return sylvan_true; + if (bdd == sylvan_false) return sylvan_cube(vars, cube); + if (sylvan_set_isempty(vars)) return sylvan_true; + + bddnode_t nv = GETNODE(vars); + + for (;;) { + if (*cube == 0 || *cube == 1) break; + // *cube should be 2 + cube++; + vars = node_high(vars, nv); + if (sylvan_set_isempty(vars)) return sylvan_true; + nv = GETNODE(vars); + } + + sylvan_gc_test(); + + // missing: SV_CNT_OP + + bddnode_t n = GETNODE(bdd); + BDD result = bdd; + BDDVAR v = bddnode_getvariable(nv); + BDDVAR n_level = bddnode_getvariable(n); + + if (v < n_level) { + vars = node_high(vars, nv); + if (*cube == 0) { + result = sylvan_union_cube(bdd, vars, cube+1); + result = sylvan_makenode(v, result, bdd); + } else /* *cube == 1 */ { + result = sylvan_union_cube(bdd, vars, cube+1); + result = sylvan_makenode(v, bdd, result); + } + } else if (v > n_level) { + BDD high = node_high(bdd, n); + BDD low = node_low(bdd, n); + SPAWN(sylvan_union_cube, high, vars, cube); + BDD new_low = sylvan_union_cube(low, vars, cube); + bdd_refs_push(new_low); + BDD new_high = SYNC(sylvan_union_cube); + bdd_refs_pop(1); + if (new_low != low || new_high != high) { + result = sylvan_makenode(n_level, new_low, new_high); + } + } else /* v == n_level */ { + vars = node_high(vars, nv); + BDD high = node_high(bdd, n); + BDD low = node_low(bdd, n); + if (*cube == 0) { + BDD new_low = sylvan_union_cube(low, vars, cube+1); + if (new_low != low) { + result = sylvan_makenode(n_level, new_low, high); + } + } else /* *cube == 1 */ { + BDD new_high = sylvan_union_cube(high, vars, cube+1); + if (new_high != high) { + result = sylvan_makenode(n_level, low, new_high); + } + } + } + + return result; +} + +struct bdd_path +{ + struct bdd_path *prev; + BDDVAR var; + int8_t val; // 0=false, 1=true, 2=both +}; + +VOID_TASK_5(sylvan_enum_do, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context, struct bdd_path*, path) +{ + if (bdd == sylvan_false) return; + + if (sylvan_set_isempty(vars)) { + /* bdd should now be true */ + assert(bdd == sylvan_true); + /* compute length of path */ + int i=0; + struct bdd_path *pp; + for (pp = path; pp != NULL; pp = pp->prev) i++; + /* if length is 0 (enum called with empty vars??), return */ + if (i == 0) return; + /* fill cube and vars with trace */ + uint8_t cube[i]; + BDDVAR vars[i]; + int j=0; + for (pp = path; pp != NULL; pp = pp->prev) { + cube[i-j-1] = pp->val; + vars[i-j-1] = pp->var; + j++; + } + /* call callback */ + WRAP(cb, context, vars, cube, i); + return; + } + + BDDVAR var = sylvan_var(vars); + vars = sylvan_set_next(vars); + BDDVAR bdd_var = sylvan_var(bdd); + + /* assert var <= bdd_var */ + if (bdd == sylvan_true || var < bdd_var) { + struct bdd_path pp0 = (struct bdd_path){path, var, 0}; + CALL(sylvan_enum_do, bdd, vars, cb, context, &pp0); + struct bdd_path pp1 = (struct bdd_path){path, var, 1}; + CALL(sylvan_enum_do, bdd, vars, cb, context, &pp1); + } else if (var == bdd_var) { + struct bdd_path pp0 = (struct bdd_path){path, var, 0}; + CALL(sylvan_enum_do, sylvan_low(bdd), vars, cb, context, &pp0); + struct bdd_path pp1 = (struct bdd_path){path, var, 1}; + CALL(sylvan_enum_do, sylvan_high(bdd), vars, cb, context, &pp1); + } else { + printf("var %u not expected (expecting %u)!\n", bdd_var, var); + assert(var <= bdd_var); + } +} + +VOID_TASK_5(sylvan_enum_par_do, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context, struct bdd_path*, path) +{ + if (bdd == sylvan_false) return; + + if (sylvan_set_isempty(vars)) { + /* bdd should now be true */ + assert(bdd == sylvan_true); + /* compute length of path */ + int i=0; + struct bdd_path *pp; + for (pp = path; pp != NULL; pp = pp->prev) i++; + /* if length is 0 (enum called with empty vars??), return */ + if (i == 0) return; + /* fill cube and vars with trace */ + uint8_t cube[i]; + BDDVAR vars[i]; + int j=0; + for (pp = path; pp != NULL; pp = pp->prev) { + cube[i-j-1] = pp->val; + vars[i-j-1] = pp->var; + j++; + } + /* call callback */ + WRAP(cb, context, vars, cube, i); + return; + } + + BDD var = sylvan_var(vars); + vars = sylvan_set_next(vars); + BDD bdd_var = sylvan_var(bdd); + + /* assert var <= bdd_var */ + if (var < bdd_var) { + struct bdd_path pp1 = (struct bdd_path){path, var, 1}; + SPAWN(sylvan_enum_par_do, bdd, vars, cb, context, &pp1); + struct bdd_path pp0 = (struct bdd_path){path, var, 0}; + CALL(sylvan_enum_par_do, bdd, vars, cb, context, &pp0); + SYNC(sylvan_enum_par_do); + } else if (var == bdd_var) { + struct bdd_path pp1 = (struct bdd_path){path, var, 1}; + SPAWN(sylvan_enum_par_do, sylvan_high(bdd), vars, cb, context, &pp1); + struct bdd_path pp0 = (struct bdd_path){path, var, 0}; + CALL(sylvan_enum_par_do, sylvan_low(bdd), vars, cb, context, &pp0); + SYNC(sylvan_enum_par_do); + } else { + assert(var <= bdd_var); + } +} + +VOID_TASK_IMPL_4(sylvan_enum, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context) +{ + CALL(sylvan_enum_do, bdd, vars, cb, context, 0); +} + +VOID_TASK_IMPL_4(sylvan_enum_par, BDD, bdd, BDDSET, vars, enum_cb, cb, void*, context) +{ + CALL(sylvan_enum_par_do, bdd, vars, cb, context, 0); +} + +TASK_5(BDD, sylvan_collect_do, BDD, bdd, BDDSET, vars, sylvan_collect_cb, cb, void*, context, struct bdd_path*, path) +{ + if (bdd == sylvan_false) return sylvan_false; + + if (sylvan_set_isempty(vars)) { + /* compute length of path */ + int i=0; + struct bdd_path *pp; + for (pp = path; pp != NULL; pp = pp->prev) i++; + /* if length is 0 (enum called with empty vars??), return */ + if (i == 0) return WRAP(cb, context, NULL); + /* fill cube and vars with trace */ + uint8_t cube[i]; + int j=0; + for (pp = path; pp != NULL; pp = pp->prev) { + cube[i-j-1] = pp->val; + j++; + } + /* call callback */ + return WRAP(cb, context, cube); + } else { + BDD var = sylvan_var(vars); + vars = sylvan_set_next(vars); + BDD bdd_var = sylvan_var(bdd); + + /* if fails, then has variables not in */ + assert(var <= bdd_var); + + struct bdd_path pp1 = (struct bdd_path){path, var, 1}; + struct bdd_path pp0 = (struct bdd_path){path, var, 0}; + if (var < bdd_var) { + bdd_refs_spawn(SPAWN(sylvan_collect_do, bdd, vars, cb, context, &pp1)); + BDD low = bdd_refs_push(CALL(sylvan_collect_do, bdd, vars, cb, context, &pp0)); + BDD high = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_collect_do))); + BDD res = sylvan_or(low, high); + bdd_refs_pop(2); + return res; + } else if (var == bdd_var) { + bdd_refs_spawn(SPAWN(sylvan_collect_do, sylvan_high(bdd), vars, cb, context, &pp1)); + BDD low = bdd_refs_push(CALL(sylvan_collect_do, sylvan_low(bdd), vars, cb, context, &pp0)); + BDD high = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_collect_do))); + BDD res = sylvan_or(low, high); + bdd_refs_pop(2); + return res; + } else { + return sylvan_invalid; // unreachable + } + } +} + +TASK_IMPL_4(BDD, sylvan_collect, BDD, bdd, BDDSET, vars, sylvan_collect_cb, cb, void*, context) +{ + return CALL(sylvan_collect_do, bdd, vars, cb, context, 0); +} + +/** + * IMPLEMENTATION OF BDDSET + */ + +int +sylvan_set_in(BDDSET set, BDDVAR level) +{ + while (!sylvan_set_isempty(set)) { + bddnode_t n = GETNODE(set); + BDDVAR n_level = bddnode_getvariable(n); + if (n_level == level) return 1; + if (n_level > level) return 0; // BDDs are ordered + set = node_high(set, n); + } + + return 0; +} + +size_t +sylvan_set_count(BDDSET set) +{ + size_t result = 0; + for (;!sylvan_set_isempty(set);set = sylvan_set_next(set)) result++; + return result; +} + +void +sylvan_set_toarray(BDDSET set, BDDVAR *arr) +{ + size_t i = 0; + while (!sylvan_set_isempty(set)) { + bddnode_t n = GETNODE(set); + arr[i++] = bddnode_getvariable(n); + set = node_high(set, n); + } +} + +TASK_IMPL_2(BDDSET, sylvan_set_fromarray, BDDVAR*, arr, size_t, length) +{ + if (length == 0) return sylvan_set_empty(); + BDDSET sub = sylvan_set_fromarray(arr+1, length-1); + bdd_refs_push(sub); + BDDSET result = sylvan_set_add(sub, *arr); + bdd_refs_pop(1); + return result; +} + +void +sylvan_test_isset(BDDSET set) +{ + while (set != sylvan_false) { + assert(set != sylvan_true); + assert(llmsset_is_marked(nodes, set)); + bddnode_t n = GETNODE(set); + assert(node_low(set, n) == sylvan_true); + set = node_high(set, n); + } +} + +/** + * IMPLEMENTATION OF BDDMAP + */ + +BDDMAP +sylvan_map_add(BDDMAP map, BDDVAR key, BDD value) +{ + if (sylvan_map_isempty(map)) return sylvan_makenode(key, sylvan_map_empty(), value); + + bddnode_t n = GETNODE(map); + BDDVAR key_m = bddnode_getvariable(n); + + if (key_m < key) { + // add recursively and rebuild tree + BDDMAP low = sylvan_map_add(node_low(map, n), key, value); + BDDMAP result = sylvan_makenode(key_m, low, node_high(map, n)); + return result; + } else if (key_m > key) { + return sylvan_makenode(key, map, value); + } else { + // replace old + return sylvan_makenode(key, node_low(map, n), value); + } +} + +BDDMAP +sylvan_map_addall(BDDMAP map_1, BDDMAP map_2) +{ + // one of the maps is empty + if (sylvan_map_isempty(map_1)) return map_2; + if (sylvan_map_isempty(map_2)) return map_1; + + bddnode_t n_1 = GETNODE(map_1); + BDDVAR key_1 = bddnode_getvariable(n_1); + + bddnode_t n_2 = GETNODE(map_2); + BDDVAR key_2 = bddnode_getvariable(n_2); + + BDDMAP result; + if (key_1 < key_2) { + // key_1, recurse on n_1->low, map_2 + BDDMAP low = sylvan_map_addall(node_low(map_1, n_1), map_2); + result = sylvan_makenode(key_1, low, node_high(map_1, n_1)); + } else if (key_1 > key_2) { + // key_2, recurse on map_1, n_2->low + BDDMAP low = sylvan_map_addall(map_1, node_low(map_2, n_2)); + result = sylvan_makenode(key_2, low, node_high(map_2, n_2)); + } else { + // equal: key_2, recurse on n_1->low, n_2->low + BDDMAP low = sylvan_map_addall(node_low(map_1, n_1), node_low(map_2, n_2)); + result = sylvan_makenode(key_2, low, node_high(map_2, n_2)); + } + return result; +} + +BDDMAP +sylvan_map_remove(BDDMAP map, BDDVAR key) +{ + if (sylvan_map_isempty(map)) return map; + + bddnode_t n = GETNODE(map); + BDDVAR key_m = bddnode_getvariable(n); + + if (key_m < key) { + BDDMAP low = sylvan_map_remove(node_low(map, n), key); + BDDMAP result = sylvan_makenode(key_m, low, node_high(map, n)); + return result; + } else if (key_m > key) { + return map; + } else { + return node_low(map, n); + } +} + +BDDMAP +sylvan_map_removeall(BDDMAP map, BDDSET toremove) +{ + if (sylvan_map_isempty(map)) return map; + if (sylvan_set_isempty(toremove)) return map; + + bddnode_t n_1 = GETNODE(map); + BDDVAR key_1 = bddnode_getvariable(n_1); + + bddnode_t n_2 = GETNODE(toremove); + BDDVAR key_2 = bddnode_getvariable(n_2); + + if (key_1 < key_2) { + BDDMAP low = sylvan_map_removeall(node_low(map, n_1), toremove); + BDDMAP result = sylvan_makenode(key_1, low, node_high(map, n_1)); + return result; + } else if (key_1 > key_2) { + return sylvan_map_removeall(map, node_high(toremove, n_2)); + } else { + return sylvan_map_removeall(node_low(map, n_1), node_high(toremove, n_2)); + } +} + +int +sylvan_map_in(BDDMAP map, BDDVAR key) +{ + while (!sylvan_map_isempty(map)) { + bddnode_t n = GETNODE(map); + BDDVAR n_level = bddnode_getvariable(n); + if (n_level == key) return 1; + if (n_level > key) return 0; // BDDs are ordered + map = node_low(map, n); + } + + return 0; +} + +size_t +sylvan_map_count(BDDMAP map) +{ + size_t r=0; + while (!sylvan_map_isempty(map)) { r++; map=sylvan_map_next(map); } + return r; +} + +BDDMAP +sylvan_set_to_map(BDDSET set, BDD value) +{ + if (sylvan_set_isempty(set)) return sylvan_map_empty(); + bddnode_t set_n = GETNODE(set); + BDD sub = sylvan_set_to_map(node_high(set, set_n), value); + BDD result = sylvan_makenode(sub, bddnode_getvariable(set_n), value); + return result; +} + +/** + * Determine the support of a BDD (all variables used in the BDD) + */ +TASK_IMPL_1(BDD, sylvan_support, BDD, bdd) +{ + if (bdd == sylvan_true || bdd == sylvan_false) return sylvan_set_empty(); // return empty set + + sylvan_gc_test(); + + sylvan_stats_count(BDD_SUPPORT); + + BDD result; + if (cache_get3(CACHE_BDD_SUPPORT, bdd, 0, 0, &result)) { + sylvan_stats_count(BDD_SUPPORT_CACHED); + return result; + } + + bddnode_t n = GETNODE(bdd); + BDD high, low, set; + + /* compute recursively */ + bdd_refs_spawn(SPAWN(sylvan_support, bddnode_getlow(n))); + high = bdd_refs_push(CALL(sylvan_support, bddnode_gethigh(n))); + low = bdd_refs_push(bdd_refs_sync(SYNC(sylvan_support))); + + /* take intersection of support of low and support of high */ + set = sylvan_and(low, high); + bdd_refs_pop(2); + + /* add current level to set */ + result = sylvan_makenode(bddnode_getvariable(n), sylvan_false, set); + + if (cache_put3(CACHE_BDD_SUPPORT, bdd, 0, 0, result)) sylvan_stats_count(BDD_SUPPORT_CACHEDPUT); + return result; +} + +static void +sylvan_unmark_rec(bddnode_t node) +{ + if (bddnode_getmark(node)) { + bddnode_setmark(node, 0); + if (!sylvan_isconst(bddnode_getlow(node))) sylvan_unmark_rec(GETNODE(bddnode_getlow(node))); + if (!sylvan_isconst(bddnode_gethigh(node))) sylvan_unmark_rec(GETNODE(bddnode_gethigh(node))); + } +} + +/** + * fprint, print + */ +void +sylvan_fprint(FILE *f, BDD bdd) +{ + sylvan_serialize_reset(); + size_t v = sylvan_serialize_add(bdd); + fprintf(f, "%s%zu,", bdd&sylvan_complement?"!":"", v); + sylvan_serialize_totext(f); +} + +void +sylvan_print(BDD bdd) +{ + sylvan_fprint(stdout, bdd); +} + +/** + * Output to .DOT files + */ + +/*** + * We keep a set [level -> [node]] using AVLset + */ +struct level_to_nodeset { + BDDVAR level; + avl_node_t *set; +}; + +AVL(level_to_nodeset, struct level_to_nodeset) +{ + if (left->level > right->level) return 1; + if (right->level > left->level) return -1; + return 0; +} + +AVL(nodeset, BDD) +{ + if (*left > *right) return 1; + if (*right > *left) return -1; + return 0; +} + +/* returns 1 if inserted, 0 if already existed */ +static int __attribute__((noinline)) +sylvan_dothelper_register(avl_node_t **set, BDD bdd) +{ + struct level_to_nodeset s, *ss; + s.level = sylvan_var(bdd); + ss = level_to_nodeset_search(*set, &s); + if (ss == NULL) { + s.set = NULL; + ss = level_to_nodeset_put(set, &s, NULL); + } + assert(ss != NULL); + return nodeset_insert(&ss->set, &bdd); +} + +static void +sylvan_fprintdot_rec(FILE *out, BDD bdd, avl_node_t **levels) +{ + bdd = BDD_STRIPMARK(bdd); + if (bdd == sylvan_false) return; + if (!sylvan_dothelper_register(levels, bdd)) return; + + BDD low = sylvan_low(bdd); + BDD high = sylvan_high(bdd); + fprintf(out, "\"%" PRIx64 "\" [label=\"%d\"];\n", bdd, sylvan_var(bdd)); + fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=dashed];\n", bdd, low); + fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=solid dir=both arrowtail=%s];\n", bdd, BDD_STRIPMARK(high), BDD_HASMARK(high) ? "dot" : "none"); + sylvan_fprintdot_rec(out, low, levels); + sylvan_fprintdot_rec(out, high, levels); +} + +void +sylvan_fprintdot(FILE *out, BDD bdd) +{ + fprintf(out, "digraph \"DD\" {\n"); + fprintf(out, "graph [dpi = 300];\n"); + fprintf(out, "center = true;\n"); + fprintf(out, "edge [dir = forward];\n"); + fprintf(out, "0 [label=\"0\", style=filled, shape=box, height=0.3, width=0.3];\n"); + fprintf(out, "root [style=invis];\n"); + fprintf(out, "root -> \"%" PRIx64 "\" [style=solid dir=both arrowtail=%s];\n", BDD_STRIPMARK(bdd), BDD_HASMARK(bdd) ? "dot" : "none"); + + avl_node_t *levels = NULL; + sylvan_fprintdot_rec(out, bdd, &levels); + + if (levels != NULL) { + size_t levels_count = avl_count(levels); + struct level_to_nodeset *arr = level_to_nodeset_toarray(levels); + size_t i; + for (i=0;i \"%" PRIx64 "\" [style=dashed];\n", bdd, low); + fprintf(out, "\"%" PRIx64 "\" -> \"%" PRIx64 "\" [style=solid];\n", bdd, high); + sylvan_fprintdot_nc_rec(out, low, levels); + sylvan_fprintdot_nc_rec(out, high, levels); +} + +void +sylvan_fprintdot_nc(FILE *out, BDD bdd) +{ + fprintf(out, "digraph \"DD\" {\n"); + fprintf(out, "graph [dpi = 300];\n"); + fprintf(out, "center = true;\n"); + fprintf(out, "edge [dir = forward];\n"); + fprintf(out, "\"%" PRIx64 "\" [shape=box, label=\"F\", style=filled, shape=box, height=0.3, width=0.3];\n", sylvan_false); + fprintf(out, "\"%" PRIx64 "\" [shape=box, label=\"T\", style=filled, shape=box, height=0.3, width=0.3];\n", sylvan_true); + fprintf(out, "root [style=invis];\n"); + fprintf(out, "root -> \"%" PRIx64 "\" [style=solid];\n", bdd); + + avl_node_t *levels = NULL; + sylvan_fprintdot_nc_rec(out, bdd, &levels); + + if (levels != NULL) { + size_t levels_count = avl_count(levels); + struct level_to_nodeset *arr = level_to_nodeset_toarray(levels); + size_t i; + for (i=0;ibdd > right->bdd) return 1; + if (left->bdd < right->bdd) return -1; + return 0; +} + +// Define a AVL tree type with prefix 'sylvan_ser_reversed' holding +// nodes of struct sylvan_ser with the following compare() function... +AVL(sylvan_ser_reversed, struct sylvan_ser) +{ + if (left->assigned > right->assigned) return 1; + if (left->assigned < right->assigned) return -1; + return 0; +} + +// Initially, both sets are empty +static avl_node_t *sylvan_ser_set = NULL; +static avl_node_t *sylvan_ser_reversed_set = NULL; + +// Start counting (assigning numbers to BDDs) at 1 +static size_t sylvan_ser_counter = 1; +static size_t sylvan_ser_done = 0; + +// Given a BDD, assign unique numbers to all nodes +static size_t +sylvan_serialize_assign_rec(BDD bdd) +{ + if (sylvan_isnode(bdd)) { + bddnode_t n = GETNODE(bdd); + + struct sylvan_ser s, *ss; + s.bdd = BDD_STRIPMARK(bdd); + ss = sylvan_ser_search(sylvan_ser_set, &s); + if (ss == NULL) { + // assign dummy value + s.assigned = 0; + ss = sylvan_ser_put(&sylvan_ser_set, &s, NULL); + + // first assign recursively + sylvan_serialize_assign_rec(bddnode_getlow(n)); + sylvan_serialize_assign_rec(bddnode_gethigh(n)); + + // assign real value + ss->assigned = sylvan_ser_counter++; + + // put a copy in the reversed table + sylvan_ser_reversed_insert(&sylvan_ser_reversed_set, ss); + } + + return ss->assigned; + } + + return BDD_STRIPMARK(bdd); +} + +size_t +sylvan_serialize_add(BDD bdd) +{ + return BDD_TRANSFERMARK(bdd, sylvan_serialize_assign_rec(bdd)); +} + +void +sylvan_serialize_reset() +{ + sylvan_ser_free(&sylvan_ser_set); + sylvan_ser_free(&sylvan_ser_reversed_set); + sylvan_ser_counter = 1; + sylvan_ser_done = 0; +} + +size_t +sylvan_serialize_get(BDD bdd) +{ + if (!sylvan_isnode(bdd)) return bdd; + struct sylvan_ser s, *ss; + s.bdd = BDD_STRIPMARK(bdd); + ss = sylvan_ser_search(sylvan_ser_set, &s); + assert(ss != NULL); + return BDD_TRANSFERMARK(bdd, ss->assigned); +} + +BDD +sylvan_serialize_get_reversed(size_t value) +{ + if (!sylvan_isnode(value)) return value; + struct sylvan_ser s, *ss; + s.assigned = BDD_STRIPMARK(value); + ss = sylvan_ser_reversed_search(sylvan_ser_reversed_set, &s); + assert(ss != NULL); + return BDD_TRANSFERMARK(value, ss->bdd); +} + +void +sylvan_serialize_totext(FILE *out) +{ + fprintf(out, "["); + avl_iter_t *it = sylvan_ser_reversed_iter(sylvan_ser_reversed_set); + struct sylvan_ser *s; + + while ((s=sylvan_ser_reversed_iter_next(it))) { + BDD bdd = s->bdd; + bddnode_t n = GETNODE(bdd); + fprintf(out, "(%zu,%u,%zu,%zu,%u),", s->assigned, + bddnode_getvariable(n), + (size_t)bddnode_getlow(n), + (size_t)BDD_STRIPMARK(bddnode_gethigh(n)), + BDD_HASMARK(bddnode_gethigh(n)) ? 1 : 0); + } + + sylvan_ser_reversed_iter_free(it); + fprintf(out, "]"); +} + +void +sylvan_serialize_tofile(FILE *out) +{ + size_t count = avl_count(sylvan_ser_reversed_set); + assert(count >= sylvan_ser_done); + assert(count == sylvan_ser_counter-1); + count -= sylvan_ser_done; + fwrite(&count, sizeof(size_t), 1, out); + + struct sylvan_ser *s; + avl_iter_t *it = sylvan_ser_reversed_iter(sylvan_ser_reversed_set); + + /* Skip already written entries */ + size_t index = 0; + while (index < sylvan_ser_done && (s=sylvan_ser_reversed_iter_next(it))) { + index++; + assert(s->assigned == index); + } + + while ((s=sylvan_ser_reversed_iter_next(it))) { + index++; + assert(s->assigned == index); + + bddnode_t n = GETNODE(s->bdd); + + struct bddnode node; + bddnode_makenode(&node, bddnode_getvariable(n), sylvan_serialize_get(bddnode_getlow(n)), sylvan_serialize_get(bddnode_gethigh(n))); + + fwrite(&node, sizeof(struct bddnode), 1, out); + } + + sylvan_ser_done = sylvan_ser_counter-1; + sylvan_ser_reversed_iter_free(it); +} + +void +sylvan_serialize_fromfile(FILE *in) +{ + size_t count, i; + if (fread(&count, sizeof(size_t), 1, in) != 1) { + // TODO FIXME return error + printf("sylvan_serialize_fromfile: file format error, giving up\n"); + exit(-1); + } + + for (i=1; i<=count; i++) { + struct bddnode node; + if (fread(&node, sizeof(struct bddnode), 1, in) != 1) { + // TODO FIXME return error + printf("sylvan_serialize_fromfile: file format error, giving up\n"); + exit(-1); + } + + BDD low = sylvan_serialize_get_reversed(bddnode_getlow(&node)); + BDD high = sylvan_serialize_get_reversed(bddnode_gethigh(&node)); + + struct sylvan_ser s; + s.bdd = sylvan_makenode(bddnode_getvariable(&node), low, high); + s.assigned = ++sylvan_ser_done; // starts at 0 but we want 1-based... + + sylvan_ser_insert(&sylvan_ser_set, &s); + sylvan_ser_reversed_insert(&sylvan_ser_reversed_set, &s); + } +} + +/** + * Generate SHA2 structural hashes. + * Hashes are independent of location. + * Mainly useful for debugging purposes. + */ +static void +sylvan_sha2_rec(BDD bdd, SHA256_CTX *ctx) +{ + if (bdd == sylvan_true || bdd == sylvan_false) { + SHA256_Update(ctx, (void*)&bdd, sizeof(BDD)); + return; + } + + bddnode_t node = GETNODE(bdd); + if (bddnode_getmark(node) == 0) { + bddnode_setmark(node, 1); + uint32_t level = bddnode_getvariable(node); + if (BDD_STRIPMARK(bddnode_gethigh(node))) level |= 0x80000000; + SHA256_Update(ctx, (void*)&level, sizeof(uint32_t)); + sylvan_sha2_rec(bddnode_gethigh(node), ctx); + sylvan_sha2_rec(bddnode_getlow(node), ctx); + } +} + +void +sylvan_printsha(BDD bdd) +{ + sylvan_fprintsha(stdout, bdd); +} + +void +sylvan_fprintsha(FILE *f, BDD bdd) +{ + char buf[80]; + sylvan_getsha(bdd, buf); + fprintf(f, "%s", buf); +} + +void +sylvan_getsha(BDD bdd, char *target) +{ + SHA256_CTX ctx; + SHA256_Init(&ctx); + sylvan_sha2_rec(bdd, &ctx); + if (bdd != sylvan_true && bdd != sylvan_false) sylvan_unmark_rec(GETNODE(bdd)); + SHA256_End(&ctx, target); +} + +/** + * Debug tool to check that a BDD is properly ordered. + * Also that every BDD node is marked 'in-use' in the hash table. + */ +TASK_2(int, sylvan_test_isbdd_rec, BDD, bdd, BDDVAR, parent_var) +{ + if (bdd == sylvan_true || bdd == sylvan_false) return 1; + assert(llmsset_is_marked(nodes, BDD_STRIPMARK(bdd))); + + sylvan_stats_count(BDD_ISBDD); + + uint64_t result; + if (cache_get3(CACHE_BDD_ISBDD, bdd, 0, 0, &result)) { + sylvan_stats_count(BDD_ISBDD_CACHED); + return result; + } + + bddnode_t n = GETNODE(bdd); + BDDVAR var = bddnode_getvariable(n); + if (var <= parent_var) { + result = 0; + } else { + SPAWN(sylvan_test_isbdd_rec, node_low(bdd, n), var); + result = (uint64_t)CALL(sylvan_test_isbdd_rec, node_high(bdd, n), var); + if (!SYNC(sylvan_test_isbdd_rec)) result = 0; + } + + if (cache_put3(CACHE_BDD_ISBDD, bdd, 0, 0, result)) sylvan_stats_count(BDD_ISBDD_CACHEDPUT); + return result; +} + +TASK_IMPL_1(int, sylvan_test_isbdd, BDD, bdd) +{ + if (bdd == sylvan_true) return 1; + if (bdd == sylvan_false) return 1; + + assert(llmsset_is_marked(nodes, BDD_STRIPMARK(bdd))); + + bddnode_t n = GETNODE(bdd); + BDDVAR var = bddnode_getvariable(n); + SPAWN(sylvan_test_isbdd_rec, node_low(bdd, n), var); + int result = CALL(sylvan_test_isbdd_rec, node_high(bdd, n), var); + if (!SYNC(sylvan_test_isbdd_rec)) result = 0; + return result; +} diff --git a/src/sylvan_bdd.h b/src/sylvan_bdd.h new file mode 100644 index 000000000..6cd4ef584 --- /dev/null +++ b/src/sylvan_bdd.h @@ -0,0 +1,423 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Do not include this file directly. Instead, include sylvan.h */ + +#include + +#ifndef SYLVAN_BDD_H +#define SYLVAN_BDD_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +typedef uint64_t BDD; // low 40 bits used for index, highest bit for complement, rest 0 +// BDDSET uses the BDD node hash table. A BDDSET is an ordered BDD. +typedef uint64_t BDDSET; // encodes a set of variables (e.g. for exists etc.) +// BDDMAP also uses the BDD node hash table. A BDDMAP is *not* an ordered BDD. +typedef uint64_t BDDMAP; // encodes a function of variable->BDD (e.g. for substitute) +typedef uint32_t BDDVAR; // low 24 bits only + +#define sylvan_complement ((uint64_t)0x8000000000000000) +#define sylvan_false ((BDD)0x0000000000000000) +#define sylvan_true (sylvan_false|sylvan_complement) +#define sylvan_invalid ((BDD)0x7fffffffffffffff) + +#define sylvan_isconst(bdd) (bdd == sylvan_true || bdd == sylvan_false) +#define sylvan_isnode(bdd) (bdd != sylvan_true && bdd != sylvan_false) + +/** + * Initialize BDD functionality. + * + * Granularity (BDD only) determines usage of operation cache. Smallest value is 1: use the operation cache always. + * Higher values mean that the cache is used less often. Variables are grouped such that + * the cache is used when going to the next group, i.e., with granularity=3, variables [0,1,2] are in the + * first group, [3,4,5] in the next, etc. Then no caching occur between 0->1, 1->2, 0->2. Caching occurs + * on 0->3, 1->4, 2->3, etc. + * + * A reasonable default is a granularity of 4-16, strongly depending on the structure of the BDDs. + */ +void sylvan_init_bdd(int granularity); + +/* Create a BDD representing just or the negation of */ +BDD sylvan_ithvar(BDDVAR var); +static inline BDD sylvan_nithvar(BDD var) { return sylvan_ithvar(var) ^ sylvan_complement; } + +/* Retrieve the of the BDD node */ +BDDVAR sylvan_var(BDD bdd); + +/* Follow and edges */ +BDD sylvan_low(BDD bdd); +BDD sylvan_high(BDD bdd); + +/* Add or remove external reference to BDD */ +BDD sylvan_ref(BDD a); +void sylvan_deref(BDD a); + +/* For use in custom mark functions */ +VOID_TASK_DECL_1(sylvan_gc_mark_rec, BDD); +#define sylvan_gc_mark_rec(mdd) CALL(sylvan_gc_mark_rec, mdd) + +/* Return the number of external references */ +size_t sylvan_count_refs(); + +/* Add or remove BDD pointers to protect (indirect external references) */ +void sylvan_protect(BDD* ptr); +void sylvan_unprotect(BDD* ptr); + +/* Return the number of protected BDD pointers */ +size_t sylvan_count_protected(); + +/* Mark BDD for "notify on dead" */ +#define sylvan_notify_ondead(bdd) llmsset_notify_ondead(nodes, bdd&~sylvan_complement) + +/* Unary, binary and if-then-else operations */ +#define sylvan_not(a) (((BDD)a)^sylvan_complement) +TASK_DECL_4(BDD, sylvan_ite, BDD, BDD, BDD, BDDVAR); +#define sylvan_ite(a,b,c) (CALL(sylvan_ite,a,b,c,0)) +TASK_DECL_3(BDD, sylvan_and, BDD, BDD, BDDVAR); +#define sylvan_and(a,b) (CALL(sylvan_and,a,b,0)) +TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR); +#define sylvan_xor(a,b) (CALL(sylvan_xor,a,b,0)) +/* Do not use nested calls for xor/equiv parameter b! */ +#define sylvan_equiv(a,b) sylvan_not(sylvan_xor(a,b)) +#define sylvan_or(a,b) sylvan_not(sylvan_and(sylvan_not(a),sylvan_not(b))) +#define sylvan_nand(a,b) sylvan_not(sylvan_and(a,b)) +#define sylvan_nor(a,b) sylvan_not(sylvan_or(a,b)) +#define sylvan_imp(a,b) sylvan_not(sylvan_and(a,sylvan_not(b))) +#define sylvan_invimp(a,b) sylvan_not(sylvan_and(sylvan_not(a),b)) +#define sylvan_biimp sylvan_equiv +#define sylvan_diff(a,b) sylvan_and(a,sylvan_not(b)) +#define sylvan_less(a,b) sylvan_and(sylvan_not(a),b) + +/* Existential and Universal quantifiers */ +TASK_DECL_3(BDD, sylvan_exists, BDD, BDD, BDDVAR); +#define sylvan_exists(a, vars) (CALL(sylvan_exists, a, vars, 0)) +#define sylvan_forall(a, vars) (sylvan_not(CALL(sylvan_exists, sylvan_not(a), vars, 0))) + +/** + * Compute \exists v: A(...) \and B(...) + * Parameter vars is the cube (conjunction) of all v variables. + */ +TASK_DECL_4(BDD, sylvan_and_exists, BDD, BDD, BDDSET, BDDVAR); +#define sylvan_and_exists(a,b,vars) CALL(sylvan_and_exists,a,b,vars,0) + +/** + * Compute R(s,t) = \exists x: A(s,x) \and B(x,t) + * or R(s) = \exists x: A(s,x) \and B(x) + * Assumes s,t are interleaved with s even and t odd (s+1). + * Parameter vars is the cube of all s and/or t variables. + * Other variables in A are "ignored" (existential quantification) + * Other variables in B are kept + * Alternatively, vars=false means all variables are in vars + * + * Use this function to concatenate two relations --> --> + * or to take the 'previous' of a set --> S + */ +TASK_DECL_4(BDD, sylvan_relprev, BDD, BDD, BDDSET, BDDVAR); +#define sylvan_relprev(a,b,vars) CALL(sylvan_relprev,a,b,vars,0) + +/** + * Compute R(s) = \exists x: A(x) \and B(x,s) + * with support(result) = s, support(A) = s, support(B) = s+t + * Assumes s,t are interleaved with s even and t odd (s+1). + * Parameter vars is the cube of all s and/or t variables. + * Other variables in A are kept + * Other variables in B are "ignored" (existential quantification) + * Alternatively, vars=false means all variables are in vars + * + * Use this function to take the 'next' of a set S --> + */ +TASK_DECL_4(BDD, sylvan_relnext, BDD, BDD, BDDSET, BDDVAR); +#define sylvan_relnext(a,b,vars) CALL(sylvan_relnext,a,b,vars,0) + +/** + * Computes the transitive closure by traversing the BDD recursively. + * See Y. Matsunaga, P. C. McGeer, R. K. Brayton + * On Computing the Transitive Closure of a State Transition Relation + * 30th ACM Design Automation Conference, 1993. + * + * The input BDD must be a transition relation that only has levels of s,t + * with s,t interleaved with s even and t odd, i.e. + * s level 0,2,4 matches with t level 1,3,5 and so forth. + */ +TASK_DECL_2(BDD, sylvan_closure, BDD, BDDVAR); +#define sylvan_closure(a) CALL(sylvan_closure,a,0); + +/** + * Calculate a@b (a constrain b), such that (b -> a@b) = (b -> a) + * Special cases: + * - a@0 = 0 + * - a@1 = f + * - 0@b = 0 + * - 1@b = 1 + * - a@a = 1 + * - a@not(a) = 0 + */ +TASK_DECL_3(BDD, sylvan_constrain, BDD, BDD, BDDVAR); +#define sylvan_constrain(f,c) (CALL(sylvan_constrain, (f), (c), 0)) + +TASK_DECL_3(BDD, sylvan_restrict, BDD, BDD, BDDVAR); +#define sylvan_restrict(f,c) (CALL(sylvan_restrict, (f), (c), 0)) + +TASK_DECL_3(BDD, sylvan_compose, BDD, BDDMAP, BDDVAR); +#define sylvan_compose(f,m) (CALL(sylvan_compose, (f), (m), 0)) + +/** + * Calculate the support of a BDD. + * A variable v is in the support of a Boolean function f iff f[v<-0] != f[v<-1] + * It is also the set of all variables in the BDD nodes of the BDD. + */ +TASK_DECL_1(BDD, sylvan_support, BDD); +#define sylvan_support(bdd) (CALL(sylvan_support, bdd)) + +/** + * A set of BDD variables is a cube (conjunction) of variables in their positive form. + * Note 2015-06-10: This used to be a union (disjunction) of variables in their positive form. + */ +// empty bddset +#define sylvan_set_empty() sylvan_true +#define sylvan_set_isempty(set) (set == sylvan_true) +// add variables to the bddset +#define sylvan_set_add(set, var) sylvan_and(set, sylvan_ithvar(var)) +#define sylvan_set_addall(set, set_to_add) sylvan_and(set, set_to_add) +// remove variables from the bddset +#define sylvan_set_remove(set, var) sylvan_exists(set, var) +#define sylvan_set_removeall(set, set_to_remove) sylvan_exists(set, set_to_remove) +// iterate through all variables +#define sylvan_set_var(set) (sylvan_var(set)) +#define sylvan_set_next(set) (sylvan_high(set)) +int sylvan_set_in(BDDSET set, BDDVAR var); +size_t sylvan_set_count(BDDSET set); +void sylvan_set_toarray(BDDSET set, BDDVAR *arr); +// variables in arr should be ordered +TASK_DECL_2(BDDSET, sylvan_set_fromarray, BDDVAR*, size_t); +#define sylvan_set_fromarray(arr, length) ( CALL(sylvan_set_fromarray, arr, length) ) +void sylvan_test_isset(BDDSET set); + +/** + * BDDMAP maps BDDVAR-->BDD, implemented using BDD nodes. + * Based on disjunction of variables, but with high edges to BDDs instead of True terminals. + */ +// empty bddmap +static inline BDDMAP sylvan_map_empty() { return sylvan_false; } +static inline int sylvan_map_isempty(BDDMAP map) { return map == sylvan_false ? 1 : 0; } +// add key-value pairs to the bddmap +BDDMAP sylvan_map_add(BDDMAP map, BDDVAR key, BDD value); +BDDMAP sylvan_map_addall(BDDMAP map_1, BDDMAP map_2); +// remove key-value pairs from the bddmap +BDDMAP sylvan_map_remove(BDDMAP map, BDDVAR key); +BDDMAP sylvan_map_removeall(BDDMAP map, BDDSET toremove); +// iterate through all pairs +static inline BDDVAR sylvan_map_key(BDDMAP map) { return sylvan_var(map); } +static inline BDD sylvan_map_value(BDDMAP map) { return sylvan_high(map); } +static inline BDDMAP sylvan_map_next(BDDMAP map) { return sylvan_low(map); } +// is a key in the map +int sylvan_map_in(BDDMAP map, BDDVAR key); +// count number of keys +size_t sylvan_map_count(BDDMAP map); +// convert a BDDSET (cube of variables) to a map, with all variables pointing on the value +BDDMAP sylvan_set_to_map(BDDSET set, BDD value); + +/** + * Node creation primitive. + * Careful: does not check ordering! + * Preferably only use when debugging! + */ +BDD sylvan_makenode(BDDVAR level, BDD low, BDD high); + +/** + * Write a DOT representation of a BDD + */ +void sylvan_printdot(BDD bdd); +void sylvan_fprintdot(FILE *out, BDD bdd); + +/** + * Write a DOT representation of a BDD. + * This variant does not print complement edges. + */ +void sylvan_printdot_nc(BDD bdd); +void sylvan_fprintdot_nc(FILE *out, BDD bdd); + +void sylvan_print(BDD bdd); +void sylvan_fprint(FILE *f, BDD bdd); + +void sylvan_printsha(BDD bdd); +void sylvan_fprintsha(FILE *f, BDD bdd); +void sylvan_getsha(BDD bdd, char *target); // target must be at least 65 bytes... + +/** + * Calculate number of satisfying variable assignments. + * The set of variables must be >= the support of the BDD. + */ + +TASK_DECL_3(double, sylvan_satcount, BDD, BDDSET, BDDVAR); +#define sylvan_satcount(bdd, variables) CALL(sylvan_satcount, bdd, variables, 0) + +/** + * Create a BDD cube representing the conjunction of variables in their positive or negative + * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). + * CHANGED 2014/09/19: vars is now a BDDSET (ordered!) + */ +BDD sylvan_cube(BDDSET variables, uint8_t *cube); +TASK_DECL_3(BDD, sylvan_union_cube, BDD, BDDSET, uint8_t*); +#define sylvan_union_cube(bdd, variables, cube) CALL(sylvan_union_cube, bdd, variables, cube) + +/** + * Pick one satisfying variable assignment randomly for which is true. + * The set must include all variables in the support of . + * + * The function will set the values of str, such that + * str[index] where index is the index in the set is set to + * 0 when the variable is negative, 1 when positive, or 2 when it could be either. + * + * This implies that str[i] will be set in the variable ordering as in . + * + * Returns 1 when succesful, or 0 when no assignment is found (i.e. bdd==sylvan_false). + */ +int sylvan_sat_one(BDD bdd, BDDSET variables, uint8_t* str); + +/** + * Pick one satisfying variable assignment randomly from the given . + * Functionally equivalent to performing sylvan_cube on the result of sylvan_sat_one. + * For the result: sylvan_and(res, bdd) = res. + */ +BDD sylvan_sat_one_bdd(BDD bdd); +#define sylvan_pick_cube sylvan_sat_one_bdd + +/** + * Enumerate all satisfying variable assignments from the given using variables . + * Calls with four parameters: a user-supplied context, the array of BDD variables in , + * the cube (array of values 0 and 1 for each variables in ) and the length of the two arrays. + */ +LACE_TYPEDEF_CB(void, enum_cb, void*, BDDVAR*, uint8_t*, int); +VOID_TASK_DECL_4(sylvan_enum, BDD, BDDSET, enum_cb, void*); +#define sylvan_enum(bdd, vars, cb, context) CALL(sylvan_enum, bdd, vars, cb, context) +VOID_TASK_DECL_4(sylvan_enum_par, BDD, BDDSET, enum_cb, void*); +#define sylvan_enum_par(bdd, vars, cb, context) CALL(sylvan_enum_par, bdd, vars, cb, context) + +/** + * Enumerate all satisfyable variable assignments of the given using variables . + * Calls with two parameters: a user-supplied context and the cube (array of + * values 0 and 1 for each variable in ). + * The BDD that returns is pair-wise merged (using or) and returned. + */ +LACE_TYPEDEF_CB(BDD, sylvan_collect_cb, void*, uint8_t*); +TASK_DECL_4(BDD, sylvan_collect, BDD, BDDSET, sylvan_collect_cb, void*); +#define sylvan_collect(bdd, vars, cb, context) CALL(sylvan_collect, bdd, vars, cb, context) + +/** + * Compute the number of distinct paths to sylvan_true in the BDD + */ +TASK_DECL_2(double, sylvan_pathcount, BDD, BDDVAR); +#define sylvan_pathcount(bdd) (CALL(sylvan_pathcount, bdd, 0)) + +/** + * Compute the number of BDD nodes in the BDD + */ +size_t sylvan_nodecount(BDD a); + +/** + * SAVING: + * use sylvan_serialize_add on every BDD you want to store + * use sylvan_serialize_get to retrieve the key of every stored BDD + * use sylvan_serialize_tofile + * + * LOADING: + * use sylvan_serialize_fromfile (implies sylvan_serialize_reset) + * use sylvan_serialize_get_reversed for every key + * + * MISC: + * use sylvan_serialize_reset to free all allocated structures + * use sylvan_serialize_totext to write a textual list of tuples of all BDDs. + * format: [(,,,,),...] + * + * for the old sylvan_print functions, use sylvan_serialize_totext + */ +size_t sylvan_serialize_add(BDD bdd); +size_t sylvan_serialize_get(BDD bdd); +BDD sylvan_serialize_get_reversed(size_t value); +void sylvan_serialize_reset(); +void sylvan_serialize_totext(FILE *out); +void sylvan_serialize_tofile(FILE *out); +void sylvan_serialize_fromfile(FILE *in); + +/** + * For debugging + * if (part of) the BDD is not 'marked' in the nodes table, assertion fails + * if the BDD is not ordered, returns 0 + * if nicely ordered, returns 1 + */ +TASK_DECL_1(int, sylvan_test_isbdd, BDD); +#define sylvan_test_isbdd(bdd) CALL(sylvan_test_isbdd, bdd) + +/* Infrastructure for internal markings */ +typedef struct bdd_refs_internal +{ + size_t r_size, r_count; + size_t s_size, s_count; + BDD *results; + Task **spawns; +} *bdd_refs_internal_t; + +extern DECLARE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + +static inline BDD +bdd_refs_push(BDD bdd) +{ + LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + if (bdd_refs_key->r_count >= bdd_refs_key->r_size) { + bdd_refs_key->r_size *= 2; + bdd_refs_key->results = (BDD*)realloc(bdd_refs_key->results, sizeof(BDD) * bdd_refs_key->r_size); + } + bdd_refs_key->results[bdd_refs_key->r_count++] = bdd; + return bdd; +} + +static inline void +bdd_refs_pop(int amount) +{ + LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + bdd_refs_key->r_count-=amount; +} + +static inline void +bdd_refs_spawn(Task *t) +{ + LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + if (bdd_refs_key->s_count >= bdd_refs_key->s_size) { + bdd_refs_key->s_size *= 2; + bdd_refs_key->spawns = (Task**)realloc(bdd_refs_key->spawns, sizeof(Task*) * bdd_refs_key->s_size); + } + bdd_refs_key->spawns[bdd_refs_key->s_count++] = t; +} + +static inline BDD +bdd_refs_sync(BDD result) +{ + LOCALIZE_THREAD_LOCAL(bdd_refs_key, bdd_refs_internal_t); + bdd_refs_key->s_count--; + return result; +} + +#include "sylvan_bdd_storm.h" + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_bdd_storm.h b/src/sylvan_bdd_storm.h new file mode 100644 index 000000000..5806fba5d --- /dev/null +++ b/src/sylvan_bdd_storm.h @@ -0,0 +1,3 @@ +#define bdd_isnegated(dd) ((dd & sylvan_complement) ? 1 : 0) +#define bdd_regular(dd) (dd & ~sylvan_complement) +#define bdd_isterminal(dd) (dd == sylvan_false || dd == sylvan_true) \ No newline at end of file diff --git a/src/sylvan_cache.c b/src/sylvan_cache.c new file mode 100644 index 000000000..7bfb72afd --- /dev/null +++ b/src/sylvan_cache.c @@ -0,0 +1,220 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include // for errno +#include // for fprintf +#include // for uint32_t etc +#include // for exit +#include // for strerror +#include // for mmap + +#include + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#ifndef compiler_barrier +#define compiler_barrier() { asm volatile("" ::: "memory"); } +#endif + +#ifndef cas +#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) +#endif + +/** + * This cache is designed to store a,b,c->res, with a,b,c,res 64-bit integers. + * + * Each cache bucket takes 32 bytes, 2 per cache line. + * Each cache status bucket takes 4 bytes, 16 per cache line. + * Therefore, size 2^N = 36*(2^N) bytes. + */ + +struct __attribute__((packed)) cache_entry { + uint64_t a; + uint64_t b; + uint64_t c; + uint64_t res; +}; + +static size_t cache_size; // power of 2 +static size_t cache_max; // power of 2 +#if CACHE_MASK +static size_t cache_mask; // cache_size-1 +#endif +static cache_entry_t cache_table; +static uint32_t* cache_status; + +static uint64_t next_opid; + +uint64_t +cache_next_opid() +{ + return __sync_fetch_and_add(&next_opid, 1LL<<40); +} + +// status: 0x80000000 - bitlock +// 0x7fff0000 - hash (part of the 64-bit hash not used to position) +// 0x0000ffff - tag (every put increases tag field) + +/* Rotating 64-bit FNV-1a hash */ +static uint64_t +cache_hash(uint64_t a, uint64_t b, uint64_t c) +{ + const uint64_t prime = 1099511628211; + uint64_t hash = 14695981039346656037LLU; + hash = (hash ^ (a>>32)); + hash = (hash ^ a) * prime; + hash = (hash ^ b) * prime; + hash = (hash ^ c) * prime; + return hash; +} + +int +cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res) +{ + const uint64_t hash = cache_hash(a, b, c); +#if CACHE_MASK + volatile uint32_t *s_bucket = cache_status + (hash & cache_mask); + cache_entry_t bucket = cache_table + (hash & cache_mask); +#else + volatile uint32_t *s_bucket = cache_status + (hash % cache_size); + cache_entry_t bucket = cache_table + (hash % cache_size); +#endif + const uint32_t s = *s_bucket; + compiler_barrier(); + // abort if locked + if (s & 0x80000000) return 0; + // abort if different hash + if ((s ^ (hash>>32)) & 0x7fff0000) return 0; + // abort if key different + if (bucket->a != a || bucket->b != b || bucket->c != c) return 0; + *res = bucket->res; + compiler_barrier(); + // abort if status field changed after compiler_barrier() + return *s_bucket == s ? 1 : 0; +} + +int +cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res) +{ + const uint64_t hash = cache_hash(a, b, c); +#if CACHE_MASK + volatile uint32_t *s_bucket = cache_status + (hash & cache_mask); + cache_entry_t bucket = cache_table + (hash & cache_mask); +#else + volatile uint32_t *s_bucket = cache_status + (hash % cache_size); + cache_entry_t bucket = cache_table + (hash % cache_size); +#endif + const uint32_t s = *s_bucket; + // abort if locked + if (s & 0x80000000) return 0; + // abort if hash identical -> no: in iscasmc this occasionally causes timeouts?! + const uint32_t hash_mask = (hash>>32) & 0x7fff0000; + // if ((s & 0x7fff0000) == hash_mask) return 0; + // use cas to claim bucket + const uint32_t new_s = ((s+1) & 0x0000ffff) | hash_mask; + if (!cas(s_bucket, s, new_s | 0x80000000)) return 0; + // cas succesful: write data + bucket->a = a; + bucket->b = b; + bucket->c = c; + bucket->res = res; + compiler_barrier(); + // after compiler_barrier(), unlock status field + *s_bucket = new_s; + return 1; +} + +void +cache_create(size_t _cache_size, size_t _max_size) +{ +#if CACHE_MASK + // Cache size must be a power of 2 + if (__builtin_popcountll(_cache_size) != 1 || __builtin_popcountll(_max_size) != 1) { + fprintf(stderr, "cache_create: Table size must be a power of 2!\n"); + exit(1); + } +#endif + + cache_size = _cache_size; + cache_max = _max_size; +#if CACHE_MASK + cache_mask = cache_size - 1; +#endif + + if (cache_size > cache_max) { + fprintf(stderr, "cache_create: Table size must be <= max size!\n"); + exit(1); + } + + cache_table = (cache_entry_t)mmap(0, cache_max * sizeof(struct cache_entry), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + cache_status = (uint32_t*)mmap(0, cache_max * sizeof(uint32_t), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (cache_table == (cache_entry_t)-1 || cache_status == (uint32_t*)-1) { + fprintf(stderr, "cache_create: Unable to allocate memory: %s!\n", strerror(errno)); + exit(1); + } + + next_opid = 512LL << 40; +} + +void +cache_free() +{ + munmap(cache_table, cache_max * sizeof(struct cache_entry)); + munmap(cache_status, cache_max * sizeof(uint32_t)); +} + +void +cache_clear() +{ + // a bit silly, but this works just fine, and does not require writing 0 everywhere... + cache_free(); + cache_create(cache_size, cache_max); +} + +void +cache_setsize(size_t size) +{ + // easy solution + cache_free(); + cache_create(size, cache_max); +} + +size_t +cache_getsize() +{ + return cache_size; +} + +size_t +cache_getused() +{ + size_t result = 0; + for (size_t i=0;i // for uint32_t etc + +#include + +#ifndef CACHE_H +#define CACHE_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifndef CACHE_MASK +#define CACHE_MASK 1 +#endif + +/** + * Operation cache + * + * Use cache_next_opid() at initialization time to generate unique "operation identifiers". + * You should store these operation identifiers as static globals in your implementation .C/.CPP file. + * + * For custom operations, just use the following functions: + * - cache_get3/cache_put3 for any operation with 1 BDD and 2 other values (that can be BDDs) + * int success = cache_get3(opid, dd1, value2, value3, &result); + * int success = cache_put3(opid, dd1, value2, value3, result); + * - cache_get4/cache_put4 for any operation with 4 BDDs + * int success = cache_get4(opid, dd1, dd2, dd3, dd4, &result); + * int success = cache_get4(opid, dd1, dd2, dd3, dd4, result); + * + * Notes: + * - The "result" is any 64-bit value + * - Use "0" for unused parameters + */ + +typedef struct cache_entry *cache_entry_t; + +/** + * Primitives for cache get/put + */ +int cache_get(uint64_t a, uint64_t b, uint64_t c, uint64_t *res); +int cache_put(uint64_t a, uint64_t b, uint64_t c, uint64_t res); + +/** + * Helper function to get next 'operation id' (during initialization of modules) + */ +uint64_t cache_next_opid(); + +/** + * dd must be MTBDD, d2/d3 can be anything + */ +static inline int __attribute__((unused)) +cache_get3(uint64_t opid, uint64_t dd, uint64_t d2, uint64_t d3, uint64_t *res) +{ + return cache_get(dd | opid, d2, d3, res); +} + +/** + * dd/dd2/dd3/dd4 must be MTBDDs + */ +static inline int __attribute__((unused)) +cache_get4(uint64_t opid, uint64_t dd, uint64_t dd2, uint64_t dd3, uint64_t dd4, uint64_t *res) +{ + uint64_t p2 = dd2 | ((dd4 & 0x00000000000fffff) << 40); // 20 bits and complement bit + if (dd4 & 0x8000000000000000) p2 |= 0x4000000000000000; + uint64_t p3 = dd3 | ((dd4 & 0x000000fffff00000) << 20); // 20 bits + + return cache_get3(opid, dd, p2, p3, res); +} + +/** + * dd must be MTBDD, d2/d3 can be anything + */ +static inline int __attribute__((unused)) +cache_put3(uint64_t opid, uint64_t dd, uint64_t d2, uint64_t d3, uint64_t res) +{ + return cache_put(dd | opid, d2, d3, res); +} + +/** + * dd/dd2/dd3/dd4 must be MTBDDs + */ +static inline int __attribute__((unused)) +cache_put4(uint64_t opid, uint64_t dd, uint64_t dd2, uint64_t dd3, uint64_t dd4, uint64_t res) +{ + uint64_t p2 = dd2 | ((dd4 & 0x00000000000fffff) << 40); // 20 bits and complement bit + if (dd4 & 0x8000000000000000) p2 |= 0x4000000000000000; + uint64_t p3 = dd3 | ((dd4 & 0x000000fffff00000) << 20); // 20 bits + + return cache_put3(opid, dd, p2, p3, res); +} +/** + * Functions for Sylvan for cache management + */ + +void cache_create(size_t _cache_size, size_t _max_size); + +void cache_free(); + +void cache_clear(); + +void cache_setsize(size_t size); + +size_t cache_getused(); + +size_t cache_getsize(); + +size_t cache_getmaxsize(); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_common.c b/src/sylvan_common.c new file mode 100644 index 000000000..aa0374a7b --- /dev/null +++ b/src/sylvan_common.c @@ -0,0 +1,304 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include + +#ifndef cas +#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new))) +#endif + +/** + * Static global variables + */ + +llmsset_t nodes; + +/** + * Retrieve nodes + */ + +llmsset_t +__sylvan_get_internal_data() +{ + return nodes; +} + +/** + * Calculate table usage (in parallel) + */ +VOID_TASK_IMPL_2(sylvan_table_usage, size_t*, filled, size_t*, total) +{ + size_t tot = llmsset_get_size(nodes); + if (filled != NULL) *filled = llmsset_count_marked(nodes); + if (total != NULL) *total = tot; +} + +/** + * Implementation of garbage collection + */ +static int gc_enabled = 1; +static volatile int gc; // variable used in cas switch to ensure only one gc at a time + +struct reg_gc_mark_entry +{ + struct reg_gc_mark_entry *next; + gc_mark_cb cb; + int order; +}; + +static struct reg_gc_mark_entry *gc_mark_register = NULL; + +void +sylvan_gc_add_mark(int order, gc_mark_cb cb) +{ + struct reg_gc_mark_entry *e = (struct reg_gc_mark_entry*)malloc(sizeof(struct reg_gc_mark_entry)); + e->cb = cb; + e->order = order; + if (gc_mark_register == NULL || gc_mark_register->order>order) { + e->next = gc_mark_register; + gc_mark_register = e; + return; + } + struct reg_gc_mark_entry *f = gc_mark_register; + for (;;) { + if (f->next == NULL) { + e->next = NULL; + f->next = e; + return; + } + if (f->next->order > order) { + e->next = f->next; + f->next = e; + return; + } + f = f->next; + } +} + +static gc_hook_cb gc_hook; + +void +sylvan_gc_set_hook(gc_hook_cb new_hook) +{ + gc_hook = new_hook; +} + +void +sylvan_gc_enable() +{ + gc_enabled = 1; +} + +void +sylvan_gc_disable() +{ + gc_enabled = 0; +} + +/* Mark hook for cache */ +VOID_TASK_0(sylvan_gc_mark_cache) +{ + /* We simply clear the cache. + * Alternatively, we could implement for example some strategy + * where part of the cache is cleared and part is marked + */ + cache_clear(); +} + +/* Default hook */ + +size_t +next_size(size_t n) +{ +#if SYLVAN_SIZE_FIBONACCI + size_t f1=1, f2=1; + for (;;) { + f2 += f1; + if (f2 > n) return f2; + f1 += f2; + if (f1 > n) return f1; + } +#else + return n*2; +#endif +} + +VOID_TASK_IMPL_0(sylvan_gc_aggressive_resize) +{ + /** + * Always resize when gc called + */ + size_t max_size = llmsset_get_max_size(nodes); + size_t size = llmsset_get_size(nodes); + if (size < max_size) { + size_t new_size = next_size(size); + if (new_size > max_size) new_size = max_size; + llmsset_set_size(nodes, new_size); + size_t cache_size = cache_getsize(); + size_t cache_max = cache_getmaxsize(); + if (cache_size < cache_max) { + new_size = next_size(cache_size); + if (new_size > cache_max) new_size = cache_max; + cache_setsize(new_size); + } + } +} + +VOID_TASK_IMPL_0(sylvan_gc_default_hook) +{ + /** + * Default behavior: + * if we can resize the nodes set, and if we use more than 50%, then increase size + */ + size_t max_size = llmsset_get_max_size(nodes); + size_t size = llmsset_get_size(nodes); + if (size < max_size) { + size_t marked = llmsset_count_marked(nodes); + if (marked*2 > size) { + size_t new_size = next_size(size); + if (new_size > max_size) new_size = max_size; + llmsset_set_size(nodes, new_size); + size_t cache_size = cache_getsize(); + size_t cache_max = cache_getmaxsize(); + if (cache_size < cache_max) { + new_size = next_size(cache_size); + if (new_size > cache_max) new_size = cache_max; + cache_setsize(new_size); + } + } + } +} + +VOID_TASK_0(sylvan_gc_call_hook) +{ + // call hook function (resizing, reordering, etc) + WRAP(gc_hook); +} + +VOID_TASK_0(sylvan_gc_rehash) +{ + // rehash marked nodes + llmsset_rehash(nodes); +} + +VOID_TASK_0(sylvan_gc_destroy_unmarked) +{ + llmsset_destroy_unmarked(nodes); +} + +VOID_TASK_0(sylvan_gc_go) +{ + sylvan_stats_count(SYLVAN_GC_COUNT); + sylvan_timer_start(SYLVAN_GC); + + // clear hash array + llmsset_clear(nodes); + + // call mark functions, hook and rehash + struct reg_gc_mark_entry *e = gc_mark_register; + while (e != NULL) { + WRAP(e->cb); + e = e->next; + } + + sylvan_timer_stop(SYLVAN_GC); +} + +/* Perform garbage collection */ +VOID_TASK_IMPL_0(sylvan_gc) +{ + if (!gc_enabled) return; + if (cas(&gc, 0, 1)) { + NEWFRAME(sylvan_gc_go); + gc = 0; + } else { + /* wait for new frame to appear */ + while (*(Task* volatile*)&(lace_newframe.t) == 0) {} + lace_yield(__lace_worker, __lace_dq_head); + } +} + +/** + * Package init and quit functions + */ +void +sylvan_init_package(size_t tablesize, size_t maxsize, size_t cachesize, size_t max_cachesize) +{ + if (tablesize > maxsize) tablesize = maxsize; + if (cachesize > max_cachesize) cachesize = max_cachesize; + + if (maxsize > 0x000003ffffffffff) { + fprintf(stderr, "sylvan_init_package error: tablesize must be <= 42 bits!\n"); + exit(1); + } + + nodes = llmsset_create(tablesize, maxsize); + cache_create(cachesize, max_cachesize); + + gc = 0; +#if SYLVAN_AGGRESSIVE_RESIZE + gc_hook = TASK(sylvan_gc_aggressive_resize); +#else + gc_hook = TASK(sylvan_gc_default_hook); +#endif + sylvan_gc_add_mark(10, TASK(sylvan_gc_mark_cache)); + sylvan_gc_add_mark(19, TASK(sylvan_gc_destroy_unmarked)); + sylvan_gc_add_mark(20, TASK(sylvan_gc_call_hook)); + sylvan_gc_add_mark(30, TASK(sylvan_gc_rehash)); + + LACE_ME; + sylvan_stats_init(); +} + +struct reg_quit_entry +{ + struct reg_quit_entry *next; + quit_cb cb; +}; + +static struct reg_quit_entry *quit_register = NULL; + +void +sylvan_register_quit(quit_cb cb) +{ + struct reg_quit_entry *e = (struct reg_quit_entry*)malloc(sizeof(struct reg_quit_entry)); + e->next = quit_register; + e->cb = cb; + quit_register = e; +} + +void +sylvan_quit() +{ + while (quit_register != NULL) { + struct reg_quit_entry *e = quit_register; + quit_register = e->next; + e->cb(); + free(e); + } + + while (gc_mark_register != NULL) { + struct reg_gc_mark_entry *e = gc_mark_register; + gc_mark_register = e->next; + free(e); + } + + cache_free(); + llmsset_free(nodes); +} diff --git a/src/sylvan_common.h b/src/sylvan_common.h new file mode 100644 index 000000000..7f512a904 --- /dev/null +++ b/src/sylvan_common.h @@ -0,0 +1,85 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYLVAN_COMMON_H +#define SYLVAN_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/* Garbage collection test task - t */ +#define sylvan_gc_test() YIELD_NEWFRAME() + +// BDD operations +#define CACHE_BDD_ITE (0LL<<40) +#define CACHE_BDD_AND (1LL<<40) +#define CACHE_BDD_XOR (2LL<<40) +#define CACHE_BDD_EXISTS (3LL<<40) +#define CACHE_BDD_AND_EXISTS (4LL<<40) +#define CACHE_BDD_RELNEXT (5LL<<40) +#define CACHE_BDD_RELPREV (6LL<<40) +#define CACHE_BDD_SATCOUNT (7LL<<40) +#define CACHE_BDD_COMPOSE (8LL<<40) +#define CACHE_BDD_RESTRICT (9LL<<40) +#define CACHE_BDD_CONSTRAIN (10LL<<40) +#define CACHE_BDD_CLOSURE (11LL<<40) +#define CACHE_BDD_ISBDD (12LL<<40) +#define CACHE_BDD_SUPPORT (13LL<<40) +#define CACHE_BDD_PATHCOUNT (14LL<<40) + +// MDD operations +#define CACHE_MDD_RELPROD (20LL<<40) +#define CACHE_MDD_MINUS (21LL<<40) +#define CACHE_MDD_UNION (22LL<<40) +#define CACHE_MDD_INTERSECT (23LL<<40) +#define CACHE_MDD_PROJECT (24LL<<40) +#define CACHE_MDD_JOIN (25LL<<40) +#define CACHE_MDD_MATCH (26LL<<40) +#define CACHE_MDD_RELPREV (27LL<<40) +#define CACHE_MDD_SATCOUNT (28LL<<40) +#define CACHE_MDD_SATCOUNTL1 (29LL<<40) +#define CACHE_MDD_SATCOUNTL2 (30LL<<40) + +// MTBDD operations +#define CACHE_MTBDD_APPLY (40LL<<40) +#define CACHE_MTBDD_UAPPLY (41LL<<40) +#define CACHE_MTBDD_ABSTRACT (42LL<<40) +#define CACHE_MTBDD_ITE (43LL<<40) +#define CACHE_MTBDD_AND_EXISTS (44LL<<40) +#define CACHE_MTBDD_SUPPORT (45LL<<40) +#define CACHE_MTBDD_COMPOSE (46LL<<40) +#define CACHE_MTBDD_EQUAL_NORM (47LL<<40) +#define CACHE_MTBDD_EQUAL_NORM_REL (48LL<<40) +#define CACHE_MTBDD_MINIMUM (49LL<<40) +#define CACHE_MTBDD_MAXIMUM (50LL<<40) +#define CACHE_MTBDD_LEQ (51LL<<40) +#define CACHE_MTBDD_LESS (52LL<<40) +#define CACHE_MTBDD_GEQ (53LL<<40) +#define CACHE_MTBDD_GREATER (54LL<<40) +#define CACHE_MTBDD_NONZERO_COUNT (55LL<<40) + +/** + * Registration of quit functions + */ +typedef void (*quit_cb)(); +void sylvan_register_quit(quit_cb cb); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_config.h b/src/sylvan_config.h new file mode 100644 index 000000000..cb234227d --- /dev/null +++ b/src/sylvan_config.h @@ -0,0 +1,30 @@ +/* Operation cache: use bitmasks for module (size must be power of 2!) */ +#ifndef CACHE_MASK +#define CACHE_MASK 1 +#endif + +/* Nodes table: use bitmasks for module (size must be power of 2!) */ +#ifndef LLMSSET_MASK +#define LLMSSET_MASK 1 +#endif + +/** + * Use Fibonacci sequence as resizing strategy. + * This MAY result in more conservative memory consumption, but is not + * great for performance. + * By default, powers of 2 should be used. + * If you set this, then set CACHE_MASK and LLMSSET_MASK to 0. + */ +#ifndef SYLVAN_SIZE_FIBONACCI +#define SYLVAN_SIZE_FIBONACCI 0 +#endif + +/* Enable/disable counters and timers */ +#ifndef SYLVAN_STATS +#define SYLVAN_STATS 0 +#endif + +/* Aggressive or conservative resizing strategy */ +#ifndef SYLVAN_AGGRESSIVE_RESIZE +#define SYLVAN_AGGRESSIVE_RESIZE 1 +#endif diff --git a/src/sylvan_gmp.c b/src/sylvan_gmp.c new file mode 100644 index 000000000..0437b1be8 --- /dev/null +++ b/src/sylvan_gmp.c @@ -0,0 +1,595 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/** + * helper function for hash + */ +#ifndef rotl64 +static inline uint64_t +rotl64(uint64_t x, int8_t r) +{ + return ((x<>(64-r))); +} +#endif + +static uint64_t +gmp_hash(const uint64_t v, const uint64_t seed) +{ + /* Hash the mpq in pointer v + * A simpler way would be to hash the result of mpq_get_d. + * We just hash on the contents of the memory */ + + mpq_ptr x = (mpq_ptr)(size_t)v; + + const uint64_t prime = 1099511628211; + uint64_t hash = seed; + mp_limb_t *limbs; + + // hash "numerator" limbs + limbs = x[0]._mp_num._mp_d; + for (int i=0; i> 32); +} + +static int +gmp_equals(const uint64_t left, const uint64_t right) +{ + /* This function is called by the unique table when comparing a new + leaf with an existing leaf */ + mpq_ptr x = (mpq_ptr)(size_t)left; + mpq_ptr y = (mpq_ptr)(size_t)right; + + /* Just compare x and y */ + return mpq_equal(x, y) ? 1 : 0; +} + +static void +gmp_create(uint64_t *val) +{ + /* This function is called by the unique table when a leaf does not yet exist. + We make a copy, which will be stored in the hash table. */ + mpq_ptr x = (mpq_ptr)malloc(sizeof(__mpq_struct)); + mpq_init(x); + mpq_set(x, *(mpq_ptr*)val); + *(mpq_ptr*)val = x; +} + +static void +gmp_destroy(uint64_t val) +{ + /* This function is called by the unique table + when a leaf is removed during garbage collection. */ + mpq_clear((mpq_ptr)val); + free((void*)val); +} + +static uint32_t gmp_type; +static uint64_t CACHE_GMP_AND_EXISTS; + +/** + * Initialize gmp custom leaves + */ +void +gmp_init() +{ + /* Register custom leaf 3 */ + gmp_type = mtbdd_register_custom_leaf(gmp_hash, gmp_equals, gmp_create, gmp_destroy); + CACHE_GMP_AND_EXISTS = cache_next_opid(); +} + +/** + * Create GMP mpq leaf + */ +MTBDD +mtbdd_gmp(mpq_t val) +{ + mpq_canonicalize(val); + return mtbdd_makeleaf(gmp_type, (size_t)val); +} + +/** + * Operation "plus" for two mpq MTBDDs + * Interpret partial function as "0" + */ +TASK_IMPL_2(MTBDD, gmp_op_plus, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions */ + if (a == mtbdd_false) return b; + if (b == mtbdd_false) return a; + + /* If both leaves, compute plus */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + + mpq_t mres; + mpq_init(mres); + mpq_add(mres, ma, mb); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + /* Commutative, so swap a,b for better cache performance */ + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Operation "minus" for two mpq MTBDDs + * Interpret partial function as "0" + */ +TASK_IMPL_2(MTBDD, gmp_op_minus, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions */ + if (a == mtbdd_false) return gmp_neg(b); + if (b == mtbdd_false) return a; + + /* If both leaves, compute plus */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + + mpq_t mres; + mpq_init(mres); + mpq_sub(mres, ma, mb); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + return mtbdd_invalid; +} + +/** + * Operation "times" for two mpq MTBDDs. + * One of the parameters can be a BDD, then it is interpreted as a filter. + * For partial functions, domain is intersection + */ +TASK_IMPL_2(MTBDD, gmp_op_times, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions and for Boolean (filter) */ + if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; + + /* If one of Boolean, interpret as filter */ + if (a == mtbdd_true) return b; + if (b == mtbdd_true) return a; + + /* Handle multiplication of leaves */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + + // compute result + mpq_t mres; + mpq_init(mres); + mpq_mul(mres, ma, mb); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + /* Commutative, so make "a" the lowest for better cache performance */ + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Operation "divide" for two mpq MTBDDs. + * For partial functions, domain is intersection + */ +TASK_IMPL_2(MTBDD, gmp_op_divide, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions */ + if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; + + /* Handle division of leaves */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + + // compute result + mpq_t mres; + mpq_init(mres); + mpq_div(mres, ma, mb); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + return mtbdd_invalid; +} + +/** + * Operation "min" for two mpq MTBDDs. + */ +TASK_IMPL_2(MTBDD, gmp_op_min, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Handle partial functions */ + if (a == mtbdd_false) return b; + if (b == mtbdd_false) return a; + + /* Handle trivial case */ + if (a == b) return a; + + /* Compute result for leaves */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + int cmp = mpq_cmp(ma, mb); + return cmp < 0 ? a : b; + } + + /* For cache performance */ + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Operation "max" for two mpq MTBDDs. + */ +TASK_IMPL_2(MTBDD, gmp_op_max, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Handle partial functions */ + if (a == mtbdd_false) return b; + if (b == mtbdd_false) return a; + + /* Handle trivial case */ + if (a == b) return a; + + /* Compute result for leaves */ + if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + int cmp = mpq_cmp(ma, mb); + return cmp > 0 ? a : b; + } + + /* For cache performance */ + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Operation "neg" for one mpq MTBDD + */ +TASK_IMPL_2(MTBDD, gmp_op_neg, MTBDD, dd, size_t, p) +{ + /* Handle partial functions */ + if (dd == mtbdd_false) return mtbdd_false; + + /* Compute result for leaf */ + if (mtbdd_isleaf(dd)) { + mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd); + + mpq_t mres; + mpq_init(mres); + mpq_neg(mres, m); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + return mtbdd_invalid; + (void)p; +} + +/** + * Operation "abs" for one mpq MTBDD + */ +TASK_IMPL_2(MTBDD, gmp_op_abs, MTBDD, dd, size_t, p) +{ + /* Handle partial functions */ + if (dd == mtbdd_false) return mtbdd_false; + + /* Compute result for leaf */ + if (mtbdd_isleaf(dd)) { + mpq_ptr m = (mpq_ptr)mtbdd_getvalue(dd); + + mpq_t mres; + mpq_init(mres); + mpq_abs(mres, m); + MTBDD res = mtbdd_gmp(mres); + mpq_clear(mres); + return res; + } + + return mtbdd_invalid; + (void)p; +} + +/** + * The abstraction operators are called in either of two ways: + * - with k=0, then just calculate "a op b" + * - with k<>0, then just calculate "a := a op a", k times + */ + +TASK_IMPL_3(MTBDD, gmp_abstract_op_plus, MTBDD, a, MTBDD, b, int, k) +{ + if (k==0) { + return mtbdd_apply(a, b, TASK(gmp_op_plus)); + } else { + MTBDD res = a; + for (int i=0; i= value (double) to True, or False otherwise. + */ +TASK_2(MTBDD, gmp_op_threshold_d, MTBDD, a, size_t, svalue) +{ + /* Handle partial function */ + if (a == mtbdd_false) return mtbdd_false; + + /* Compute result */ + if (mtbdd_isleaf(a)) { + double value = *(double*)&svalue; + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + return mpq_get_d(ma) >= value ? mtbdd_true : mtbdd_false; + } + + return mtbdd_invalid; +} + +/** + * Convert to Boolean MTBDD, terminals > value (double) to True, or False otherwise. + */ +TASK_2(MTBDD, gmp_op_strict_threshold_d, MTBDD, a, size_t, svalue) +{ + /* Handle partial function */ + if (a == mtbdd_false) return mtbdd_false; + + /* Compute result */ + if (mtbdd_isleaf(a)) { + double value = *(double*)&svalue; + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + return mpq_get_d(ma) > value ? mtbdd_true : mtbdd_false; + } + + return mtbdd_invalid; +} + +TASK_IMPL_2(MTBDD, gmp_threshold_d, MTBDD, dd, double, d) +{ + return mtbdd_uapply(dd, TASK(gmp_op_threshold_d), *(size_t*)&d); +} + +TASK_IMPL_2(MTBDD, gmp_strict_threshold_d, MTBDD, dd, double, d) +{ + return mtbdd_uapply(dd, TASK(gmp_op_strict_threshold_d), *(size_t*)&d); +} + +/** + * Operation "threshold" for mpq MTBDDs. + * The second parameter must be a mpq leaf. + */ +TASK_IMPL_2(MTBDD, gmp_op_threshold, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions */ + if (a == mtbdd_false) return mtbdd_false; + + /* Handle comparison of leaves */ + if (mtbdd_isleaf(a)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + int cmp = mpq_cmp(ma, mb); + return cmp >= 0 ? mtbdd_true : mtbdd_false; + } + + return mtbdd_invalid; +} + +/** + * Operation "strict threshold" for mpq MTBDDs. + * The second parameter must be a mpq leaf. + */ +TASK_IMPL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + /* Check for partial functions */ + if (a == mtbdd_false) return mtbdd_false; + + /* Handle comparison of leaves */ + if (mtbdd_isleaf(a)) { + mpq_ptr ma = (mpq_ptr)mtbdd_getvalue(a); + mpq_ptr mb = (mpq_ptr)mtbdd_getvalue(b); + int cmp = mpq_cmp(ma, mb); + return cmp > 0 ? mtbdd_true : mtbdd_false; + } + + return mtbdd_invalid; +} + +/** + * Multiply and , and abstract variables using summation. + * This is similar to the "and_exists" operation in BDDs. + */ +TASK_IMPL_3(MTBDD, gmp_and_exists, MTBDD, a, MTBDD, b, MTBDD, v) +{ + /* Check terminal cases */ + + /* If v == true, then is an empty set */ + if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(gmp_op_times)); + + /* Try the times operator on a and b */ + MTBDD result = CALL(gmp_op_times, &a, &b); + if (result != mtbdd_invalid) { + /* Times operator successful, store reference (for garbage collection) */ + mtbdd_refs_push(result); + /* ... and perform abstraction */ + result = mtbdd_abstract(result, v, TASK(gmp_abstract_op_plus)); + mtbdd_refs_pop(1); + /* Note that the operation cache is used in mtbdd_abstract */ + return result; + } + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache. Note that we do this now, since the times operator might swap a and b (commutative) */ + if (cache_get3(CACHE_GMP_AND_EXISTS, a, b, v, &result)) return result; + + /* Now, v is not a constant, and either a or b is not a constant */ + + /* Get top variable */ + int la = mtbdd_isleaf(a); + int lb = mtbdd_isleaf(b); + mtbddnode_t na = la ? 0 : GETNODE(a); + mtbddnode_t nb = lb ? 0 : GETNODE(b); + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + mtbddnode_t nv = GETNODE(v); + uint32_t vv = mtbddnode_getvariable(nv); + + if (vv < var) { + /* Recursive, then abstract result */ + result = CALL(gmp_and_exists, a, b, node_gethigh(v, nv)); + mtbdd_refs_push(result); + result = mtbdd_apply(result, result, TASK(gmp_op_plus)); + mtbdd_refs_pop(1); + } else { + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = (!la && va == var) ? node_getlow(a, na) : a; + ahigh = (!la && va == var) ? node_gethigh(a, na) : a; + blow = (!lb && vb == var) ? node_getlow(b, nb) : b; + bhigh = (!lb && vb == var) ? node_gethigh(b, nb) : b; + + if (vv == var) { + /* Recursive, then abstract result */ + mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, node_gethigh(v, nv))); + MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, node_gethigh(v, nv))); + MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(gmp_and_exists))); + result = CALL(mtbdd_apply, low, high, TASK(gmp_op_plus)); + mtbdd_refs_pop(2); + } else /* vv > v */ { + /* Recursive, then create node */ + mtbdd_refs_spawn(SPAWN(gmp_and_exists, ahigh, bhigh, v)); + MTBDD low = mtbdd_refs_push(CALL(gmp_and_exists, alow, blow, v)); + MTBDD high = mtbdd_refs_sync(SYNC(gmp_and_exists)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(var, low, high); + } + } + + /* Store in cache */ + cache_put3(CACHE_GMP_AND_EXISTS, a, b, v, result); + return result; +} diff --git a/src/sylvan_gmp.h b/src/sylvan_gmp.h new file mode 100644 index 000000000..fbf6bc2ad --- /dev/null +++ b/src/sylvan_gmp.h @@ -0,0 +1,182 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This is an implementation of GMP mpq custom leaves of MTBDDs + */ + +#ifndef SYLVAN_GMP_H +#define SYLVAN_GMP_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * Initialize GMP custom leaves + */ +void gmp_init(); + +/** + * Create MPQ leaf + */ +MTBDD mtbdd_gmp(mpq_t val); + +/** + * Operation "plus" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_plus, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, gmp_abstract_op_plus, MTBDD, MTBDD, int); + +/** + * Operation "minus" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_minus, MTBDD*, MTBDD*); + +/** + * Operation "times" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_times, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, gmp_abstract_op_times, MTBDD, MTBDD, int); + +/** + * Operation "divide" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_divide, MTBDD*, MTBDD*); + +/** + * Operation "min" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_min, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, gmp_abstract_op_min, MTBDD, MTBDD, int); + +/** + * Operation "max" for two mpq MTBDDs + */ +TASK_DECL_2(MTBDD, gmp_op_max, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, gmp_abstract_op_max, MTBDD, MTBDD, int); + +/** + * Operation "negate" for one mpq MTBDD + */ +TASK_DECL_2(MTBDD, gmp_op_neg, MTBDD, size_t); + +/** + * Operation "abs" for one mpq MTBDD + */ +TASK_DECL_2(MTBDD, gmp_op_abs, MTBDD, size_t); + +/** + * Compute a + b + */ +#define gmp_plus(a, b) mtbdd_apply(a, b, TASK(gmp_op_plus)) + +/** + * Compute a + b + */ +#define gmp_minus(a, b) mtbdd_apply(a, b, TASK(gmp_op_minus)) + +/** + * Compute a * b + */ +#define gmp_times(a, b) mtbdd_apply(a, b, TASK(gmp_op_times)) + +/** + * Compute a * b + */ +#define gmp_divide(a, b) mtbdd_apply(a, b, TASK(gmp_op_divide)) + +/** + * Compute min(a, b) + */ +#define gmp_min(a, b) mtbdd_apply(a, b, TASK(gmp_op_min)) + +/** + * Compute max(a, b) + */ +#define gmp_max(a, b) mtbdd_apply(a, b, TASK(gmp_op_max)) + +/** + * Compute -a + */ +#define gmp_neg(a) mtbdd_uapply(a, TASK(gmp_op_neg), 0); + +/** + * Compute abs(a) + */ +#define gmp_abs(a) mtbdd_uapply(a, TASK(gmp_op_abs), 0); + +/** + * Abstract the variables in from by taking the sum of all values + */ +#define gmp_abstract_plus(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_plus)) + +/** + * Abstract the variables in from by taking the product of all values + */ +#define gmp_abstract_times(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_times)) + +/** + * Abstract the variables in from by taking the minimum of all values + */ +#define gmp_abstract_min(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_min)) + +/** + * Abstract the variables in from by taking the maximum of all values + */ +#define gmp_abstract_max(dd, v) mtbdd_abstract(dd, v, TASK(gmp_abstract_op_max)) + +/** + * Multiply and , and abstract variables using summation. + * This is similar to the "and_exists" operation in BDDs. + */ +TASK_DECL_3(MTBDD, gmp_and_exists, MTBDD, MTBDD, MTBDD); +#define gmp_and_exists(a, b, vars) CALL(gmp_and_exists, a, b, vars) + +/** + * Convert to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; + * Parameter
    is the MTBDD to convert; parameter is an GMP mpq leaf + */ +TASK_DECL_2(MTBDD, gmp_op_threshold, MTBDD*, MTBDD*); +#define gmp_threshold(dd, value) mtbdd_apply(dd, value, TASK(gmp_op_threshold)); + +/** + * Convert to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; + * Parameter
    is the MTBDD to convert; parameter is an GMP mpq leaf + */ +TASK_DECL_2(MTBDD, gmp_op_strict_threshold, MTBDD*, MTBDD*); +#define gmp_strict_threshold(dd, value) mtbdd_apply(dd, value, TASK(gmp_op_strict_threshold)); + +/** + * Convert to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, gmp_threshold_d, MTBDD, double); +#define gmp_threshold_d(dd, value) CALL(gmp_threshold_d, dd, value) + +/** + * Convert to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, gmp_strict_threshold_d, MTBDD, double); +#define gmp_strict_threshold_d(dd, value) CALL(gmp_strict_threshold_d, dd, value) + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_ldd.c b/src/sylvan_ldd.c new file mode 100644 index 000000000..814b7e61c --- /dev/null +++ b/src/sylvan_ldd.c @@ -0,0 +1,2560 @@ +/* + * Copyright 2011-2014 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/** + * MDD node structure + */ +typedef struct __attribute__((packed)) mddnode { + uint64_t a, b; +} * mddnode_t; // 16 bytes + +// RmRR RRRR RRRR VVVV | VVVV DcDD DDDD DDDD (little endian - in memory) +// VVVV RRRR RRRR RRRm | DDDD DDDD DDDc VVVV (big endian) + +// Ensure our mddnode is 16 bytes +typedef char __lddmc_check_mddnode_t_is_16_bytes[(sizeof(struct mddnode)==16) ? 1 : -1]; + +static inline uint32_t __attribute__((unused)) +mddnode_getvalue(mddnode_t n) +{ + return *(uint32_t*)((uint8_t*)n+6); +} + +static inline uint8_t __attribute__((unused)) +mddnode_getmark(mddnode_t n) +{ + return n->a & 1; +} + +static inline uint8_t __attribute__((unused)) +mddnode_getcopy(mddnode_t n) +{ + return n->b & 0x10000 ? 1 : 0; +} + +static inline uint64_t __attribute__((unused)) +mddnode_getright(mddnode_t n) +{ + return (n->a & 0x0000ffffffffffff) >> 1; +} + +static inline uint64_t __attribute__((unused)) +mddnode_getdown(mddnode_t n) +{ + return n->b >> 17; +} + +static inline void __attribute__((unused)) +mddnode_setvalue(mddnode_t n, uint32_t value) +{ + *(uint32_t*)((uint8_t*)n+6) = value; +} + +static inline void __attribute__((unused)) +mddnode_setmark(mddnode_t n, uint8_t mark) +{ + n->a = (n->a & 0xfffffffffffffffe) | (mark ? 1 : 0); +} + +static inline void __attribute__((unused)) +mddnode_setright(mddnode_t n, uint64_t right) +{ + n->a = (n->a & 0xffff000000000001) | (right << 1); +} + +static inline void __attribute__((unused)) +mddnode_setdown(mddnode_t n, uint64_t down) +{ + n->b = (n->b & 0x000000000001ffff) | (down << 16); +} + +static inline void __attribute__((unused)) +mddnode_make(mddnode_t n, uint32_t value, uint64_t right, uint64_t down) +{ + n->a = right << 1; + n->b = down << 17; + *(uint32_t*)((uint8_t*)n+6) = value; +} + +static inline void __attribute__((unused)) +mddnode_makecopy(mddnode_t n, uint64_t right, uint64_t down) +{ + n->a = right << 1; + n->b = ((down << 1) | 1) << 16; +} + +#define GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd)) + +/** + * Implementation of garbage collection + */ + +/* Recursively mark MDD nodes as 'in use' */ +VOID_TASK_IMPL_1(lddmc_gc_mark_rec, MDD, mdd) +{ + if (mdd <= lddmc_true) return; + + if (llmsset_mark(nodes, mdd)) { + mddnode_t n = GETNODE(mdd); + SPAWN(lddmc_gc_mark_rec, mddnode_getright(n)); + CALL(lddmc_gc_mark_rec, mddnode_getdown(n)); + SYNC(lddmc_gc_mark_rec); + } +} + +/** + * External references + */ + +refs_table_t mdd_refs; + +MDD +lddmc_ref(MDD a) +{ + if (a == lddmc_true || a == lddmc_false) return a; + refs_up(&mdd_refs, a); + return a; +} + +void +lddmc_deref(MDD a) +{ + if (a == lddmc_true || a == lddmc_false) return; + refs_down(&mdd_refs, a); +} + +size_t +lddmc_count_refs() +{ + return refs_count(&mdd_refs); +} + +/* Called during garbage collection */ +VOID_TASK_0(lddmc_gc_mark_external_refs) +{ + // iterate through refs hash table, mark all found + size_t count=0; + uint64_t *it = refs_iter(&mdd_refs, 0, mdd_refs.refs_size); + while (it != NULL) { + SPAWN(lddmc_gc_mark_rec, refs_next(&mdd_refs, &it, mdd_refs.refs_size)); + count++; + } + while (count--) { + SYNC(lddmc_gc_mark_rec); + } +} + +/* Infrastructure for internal markings */ +DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + +VOID_TASK_0(lddmc_refs_mark_task) +{ + LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + size_t i, j=0; + for (i=0; ir_count; i++) { + if (j >= 40) { + while (j--) SYNC(lddmc_gc_mark_rec); + j=0; + } + SPAWN(lddmc_gc_mark_rec, lddmc_refs_key->results[i]); + j++; + } + for (i=0; is_count; i++) { + Task *t = lddmc_refs_key->spawns[i]; + if (!TASK_IS_STOLEN(t)) break; + if (TASK_IS_COMPLETED(t)) { + if (j >= 40) { + while (j--) SYNC(lddmc_gc_mark_rec); + j=0; + } + SPAWN(lddmc_gc_mark_rec, *(BDD*)TASK_RESULT(t)); + j++; + } + } + while (j--) SYNC(lddmc_gc_mark_rec); +} + +VOID_TASK_0(lddmc_refs_mark) +{ + TOGETHER(lddmc_refs_mark_task); +} + +VOID_TASK_0(lddmc_refs_init_task) +{ + lddmc_refs_internal_t s = (lddmc_refs_internal_t)malloc(sizeof(struct lddmc_refs_internal)); + s->r_size = 128; + s->r_count = 0; + s->s_size = 128; + s->s_count = 0; + s->results = (BDD*)malloc(sizeof(BDD) * 128); + s->spawns = (Task**)malloc(sizeof(Task*) * 128); + SET_THREAD_LOCAL(lddmc_refs_key, s); +} + +VOID_TASK_0(lddmc_refs_init) +{ + INIT_THREAD_LOCAL(lddmc_refs_key); + TOGETHER(lddmc_refs_init_task); + sylvan_gc_add_mark(10, TASK(lddmc_refs_mark)); +} + +/** + * Initialize and quit functions + */ + +static void +lddmc_quit() +{ + refs_free(&mdd_refs); +} + +void +sylvan_init_ldd() +{ + sylvan_register_quit(lddmc_quit); + sylvan_gc_add_mark(10, TASK(lddmc_gc_mark_external_refs)); + + // Sanity check + if (sizeof(struct mddnode) != 16) { + fprintf(stderr, "Invalid size of mdd nodes: %ld\n", sizeof(struct mddnode)); + exit(1); + } + + refs_create(&mdd_refs, 1024); + + LACE_ME; + CALL(lddmc_refs_init); +} + +/** + * Primitives + */ + +MDD +lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq) +{ + if (ifeq == lddmc_false) return ifneq; + + // check if correct (should be false, or next in value) + assert(ifneq != lddmc_true); + if (ifneq != lddmc_false) assert(value < mddnode_getvalue(GETNODE(ifneq))); + + struct mddnode n; + mddnode_make(&n, value, ifneq, ifeq); + + int created; + uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + lddmc_refs_push(ifeq); + lddmc_refs_push(ifneq); + LACE_ME; + sylvan_gc(); + lddmc_refs_pop(1); + + index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); + exit(1); + } + } + + if (created) sylvan_stats_count(LDD_NODES_CREATED); + else sylvan_stats_count(LDD_NODES_REUSED); + + return (MDD)index; +} + +MDD +lddmc_make_copynode(MDD ifeq, MDD ifneq) +{ + struct mddnode n; + mddnode_makecopy(&n, ifneq, ifeq); + + int created; + uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + lddmc_refs_push(ifeq); + lddmc_refs_push(ifneq); + LACE_ME; + sylvan_gc(); + lddmc_refs_pop(1); + + index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + fprintf(stderr, "MDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); + exit(1); + } + } + + if (created) sylvan_stats_count(LDD_NODES_CREATED); + else sylvan_stats_count(LDD_NODES_REUSED); + + return (MDD)index; +} + +MDD +lddmc_extendnode(MDD mdd, uint32_t value, MDD ifeq) +{ + if (mdd <= lddmc_true) return lddmc_makenode(value, ifeq, mdd); + + mddnode_t n = GETNODE(mdd); + if (mddnode_getcopy(n)) return lddmc_make_copynode(mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq)); + uint32_t n_value = mddnode_getvalue(n); + if (n_value < value) return lddmc_makenode(n_value, mddnode_getdown(n), lddmc_extendnode(mddnode_getright(n), value, ifeq)); + if (n_value == value) return lddmc_makenode(value, ifeq, mddnode_getright(n)); + /* (n_value > value) */ return lddmc_makenode(value, ifeq, mdd); +} + +uint32_t +lddmc_getvalue(MDD mdd) +{ + return mddnode_getvalue(GETNODE(mdd)); +} + +MDD +lddmc_getdown(MDD mdd) +{ + return mddnode_getdown(GETNODE(mdd)); +} + +MDD +lddmc_getright(MDD mdd) +{ + return mddnode_getright(GETNODE(mdd)); +} + +MDD +lddmc_follow(MDD mdd, uint32_t value) +{ + for (;;) { + if (mdd <= lddmc_true) return mdd; + const mddnode_t n = GETNODE(mdd); + if (!mddnode_getcopy(n)) { + const uint32_t v = mddnode_getvalue(n); + if (v == value) return mddnode_getdown(n); + if (v > value) return lddmc_false; + } + mdd = mddnode_getright(n); + } +} + +int +lddmc_iscopy(MDD mdd) +{ + if (mdd <= lddmc_true) return 0; + + mddnode_t n = GETNODE(mdd); + return mddnode_getcopy(n) ? 1 : 0; +} + +MDD +lddmc_followcopy(MDD mdd) +{ + if (mdd <= lddmc_true) return lddmc_false; + + mddnode_t n = GETNODE(mdd); + if (mddnode_getcopy(n)) return mddnode_getdown(n); + else return lddmc_false; +} + +/** + * MDD operations + */ +static inline int +match_ldds(MDD *one, MDD *two) +{ + MDD m1 = *one, m2 = *two; + if (m1 == lddmc_false || m2 == lddmc_false) return 0; + mddnode_t n1 = GETNODE(m1), n2 = GETNODE(m2); + uint32_t v1 = mddnode_getvalue(n1), v2 = mddnode_getvalue(n2); + while (v1 != v2) { + if (v1 < v2) { + m1 = mddnode_getright(n1); + if (m1 == lddmc_false) return 0; + n1 = GETNODE(m1); + v1 = mddnode_getvalue(n1); + } else if (v1 > v2) { + m2 = mddnode_getright(n2); + if (m2 == lddmc_false) return 0; + n2 = GETNODE(m2); + v2 = mddnode_getvalue(n2); + } + } + *one = m1; + *two = m2; + return 1; +} + +TASK_IMPL_2(MDD, lddmc_union, MDD, a, MDD, b) +{ + /* Terminal cases */ + if (a == b) return a; + if (a == lddmc_false) return b; + if (b == lddmc_false) return a; + assert(a != lddmc_true && b != lddmc_true); // expecting same length + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_UNION); + + /* Improve cache behavior */ + if (a < b) { MDD tmp=b; b=a; a=tmp; } + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result)) { + sylvan_stats_count(LDD_UNION_CACHED); + return result; + } + + /* Get nodes */ + mddnode_t na = GETNODE(a); + mddnode_t nb = GETNODE(b); + + const int na_copy = mddnode_getcopy(na) ? 1 : 0; + const int nb_copy = mddnode_getcopy(nb) ? 1 : 0; + const uint32_t na_value = mddnode_getvalue(na); + const uint32_t nb_value = mddnode_getvalue(nb); + + /* Perform recursive calculation */ + if (na_copy && nb_copy) { + lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb))); + MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb)); + lddmc_refs_push(right); + MDD down = lddmc_refs_sync(SYNC(lddmc_union)); + lddmc_refs_pop(1); + result = lddmc_make_copynode(down, right); + } else if (na_copy) { + MDD right = CALL(lddmc_union, mddnode_getright(na), b); + result = lddmc_make_copynode(mddnode_getdown(na), right); + } else if (nb_copy) { + MDD right = CALL(lddmc_union, a, mddnode_getright(nb)); + result = lddmc_make_copynode(mddnode_getdown(nb), right); + } else if (na_value < nb_value) { + MDD right = CALL(lddmc_union, mddnode_getright(na), b); + result = lddmc_makenode(na_value, mddnode_getdown(na), right); + } else if (na_value == nb_value) { + lddmc_refs_spawn(SPAWN(lddmc_union, mddnode_getdown(na), mddnode_getdown(nb))); + MDD right = CALL(lddmc_union, mddnode_getright(na), mddnode_getright(nb)); + lddmc_refs_push(right); + MDD down = lddmc_refs_sync(SYNC(lddmc_union)); + lddmc_refs_pop(1); + result = lddmc_makenode(na_value, down, right); + } else /* na_value > nb_value */ { + MDD right = CALL(lddmc_union, a, mddnode_getright(nb)); + result = lddmc_makenode(nb_value, mddnode_getdown(nb), right); + } + + /* Write to cache */ + if (cache_put3(CACHE_MDD_UNION, a, b, 0, result)) sylvan_stats_count(LDD_UNION_CACHEDPUT); + + return result; +} + +TASK_IMPL_2(MDD, lddmc_minus, MDD, a, MDD, b) +{ + /* Terminal cases */ + if (a == b) return lddmc_false; + if (a == lddmc_false) return lddmc_false; + if (b == lddmc_false) return a; + assert(b != lddmc_true); + assert(a != lddmc_true); // Universe is unknown!! // Possibly depth issue? + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_MINUS); + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_MINUS, a, b, 0, &result)) { + sylvan_stats_count(LDD_MINUS_CACHED); + return result; + } + + /* Get nodes */ + mddnode_t na = GETNODE(a); + mddnode_t nb = GETNODE(b); + uint32_t na_value = mddnode_getvalue(na); + uint32_t nb_value = mddnode_getvalue(nb); + + /* Perform recursive calculation */ + if (na_value < nb_value) { + MDD right = CALL(lddmc_minus, mddnode_getright(na), b); + result = lddmc_makenode(na_value, mddnode_getdown(na), right); + } else if (na_value == nb_value) { + lddmc_refs_spawn(SPAWN(lddmc_minus, mddnode_getright(na), mddnode_getright(nb))); + MDD down = CALL(lddmc_minus, mddnode_getdown(na), mddnode_getdown(nb)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_minus)); + lddmc_refs_pop(1); + result = lddmc_makenode(na_value, down, right); + } else /* na_value > nb_value */ { + result = CALL(lddmc_minus, a, mddnode_getright(nb)); + } + + /* Write to cache */ + if (cache_put3(CACHE_MDD_MINUS, a, b, 0, result)) sylvan_stats_count(LDD_MINUS_CACHEDPUT); + + return result; +} + +/* result: a plus b; res2: b minus a */ +TASK_IMPL_3(MDD, lddmc_zip, MDD, a, MDD, b, MDD*, res2) +{ + /* Terminal cases */ + if (a == b) { + *res2 = lddmc_false; + return a; + } + if (a == lddmc_false) { + *res2 = b; + return b; + } + if (b == lddmc_false) { + *res2 = lddmc_false; + return a; + } + + assert(a != lddmc_true && b != lddmc_true); // expecting same length + + /* Test gc */ + sylvan_gc_test(); + + /* Maybe not the ideal way */ + sylvan_stats_count(LDD_ZIP); + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_UNION, a, b, 0, &result) && + cache_get3(CACHE_MDD_MINUS, b, a, 0, res2)) { + sylvan_stats_count(LDD_ZIP); + return result; + } + + /* Get nodes */ + mddnode_t na = GETNODE(a); + mddnode_t nb = GETNODE(b); + uint32_t na_value = mddnode_getvalue(na); + uint32_t nb_value = mddnode_getvalue(nb); + + /* Perform recursive calculation */ + if (na_value < nb_value) { + MDD right = CALL(lddmc_zip, mddnode_getright(na), b, res2); + result = lddmc_makenode(na_value, mddnode_getdown(na), right); + } else if (na_value == nb_value) { + MDD down2, right2; + lddmc_refs_spawn(SPAWN(lddmc_zip, mddnode_getdown(na), mddnode_getdown(nb), &down2)); + MDD right = CALL(lddmc_zip, mddnode_getright(na), mddnode_getright(nb), &right2); + lddmc_refs_push(right); + lddmc_refs_push(right2); + MDD down = lddmc_refs_sync(SYNC(lddmc_zip)); + lddmc_refs_pop(2); + result = lddmc_makenode(na_value, down, right); + *res2 = lddmc_makenode(na_value, down2, right2); + } else /* na_value > nb_value */ { + MDD right2; + MDD right = CALL(lddmc_zip, a, mddnode_getright(nb), &right2); + result = lddmc_makenode(nb_value, mddnode_getdown(nb), right); + *res2 = lddmc_makenode(nb_value, mddnode_getdown(nb), right2); + } + + /* Write to cache */ + int c1 = cache_put3(CACHE_MDD_UNION, a, b, 0, result); + int c2 = cache_put3(CACHE_MDD_MINUS, b, a, 0, *res2); + if (c1 && c2) sylvan_stats_count(LDD_ZIP_CACHEDPUT); + + return result; +} + +TASK_IMPL_2(MDD, lddmc_intersect, MDD, a, MDD, b) +{ + /* Terminal cases */ + if (a == b) return a; + if (a == lddmc_false || b == lddmc_false) return lddmc_false; + assert(a != lddmc_true && b != lddmc_true); + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_INTERSECT); + + /* Get nodes */ + mddnode_t na = GETNODE(a); + mddnode_t nb = GETNODE(b); + uint32_t na_value = mddnode_getvalue(na); + uint32_t nb_value = mddnode_getvalue(nb); + + /* Skip nodes if possible */ + while (na_value != nb_value) { + if (na_value < nb_value) { + a = mddnode_getright(na); + if (a == lddmc_false) return lddmc_false; + na = GETNODE(a); + na_value = mddnode_getvalue(na); + } + if (nb_value < na_value) { + b = mddnode_getright(nb); + if (b == lddmc_false) return lddmc_false; + nb = GETNODE(b); + nb_value = mddnode_getvalue(nb); + } + } + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_INTERSECT, a, b, 0, &result)) { + sylvan_stats_count(LDD_INTERSECT_CACHED); + return result; + } + + /* Perform recursive calculation */ + lddmc_refs_spawn(SPAWN(lddmc_intersect, mddnode_getright(na), mddnode_getright(nb))); + MDD down = CALL(lddmc_intersect, mddnode_getdown(na), mddnode_getdown(nb)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_intersect)); + lddmc_refs_pop(1); + result = lddmc_makenode(na_value, down, right); + + /* Write to cache */ + if (cache_put3(CACHE_MDD_INTERSECT, a, b, 0, result)) sylvan_stats_count(LDD_INTERSECT_CACHEDPUT); + + return result; +} + +// proj: -1 (rest 0), 0 (no match), 1 (match) +TASK_IMPL_3(MDD, lddmc_match, MDD, a, MDD, b, MDD, proj) +{ + if (a == b) return a; + if (a == lddmc_false || b == lddmc_false) return lddmc_false; + + mddnode_t p_node = GETNODE(proj); + uint32_t p_val = mddnode_getvalue(p_node); + if (p_val == (uint32_t)-1) return a; + + assert(a != lddmc_true); + if (p_val == 1) assert(b != lddmc_true); + + /* Test gc */ + sylvan_gc_test(); + + /* Skip nodes if possible */ + if (p_val == 1) { + if (!match_ldds(&a, &b)) return lddmc_false; + } + + sylvan_stats_count(LDD_MATCH); + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_MATCH, a, b, proj, &result)) { + sylvan_stats_count(LDD_MATCH_CACHED); + return result; + } + + /* Perform recursive calculation */ + mddnode_t na = GETNODE(a); + MDD down; + if (p_val == 1) { + mddnode_t nb = GETNODE(b); + /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), mddnode_getright(nb), proj)); + down = CALL(lddmc_match, mddnode_getdown(na), mddnode_getdown(nb), mddnode_getdown(p_node)); + } else { + /* right = */ lddmc_refs_spawn(SPAWN(lddmc_match, mddnode_getright(na), b, proj)); + down = CALL(lddmc_match, mddnode_getdown(na), b, mddnode_getdown(p_node)); + } + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_match)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(na), down, right); + + /* Write to cache */ + if (cache_put3(CACHE_MDD_MATCH, a, b, proj, result)) sylvan_stats_count(LDD_MATCH_CACHEDPUT); + + return result; +} + +TASK_4(MDD, lddmc_relprod_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj) +{ + return lddmc_makenode(val, CALL(lddmc_relprod, set, rel, proj), lddmc_false); +} + +// meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write) +TASK_IMPL_3(MDD, lddmc_relprod, MDD, set, MDD, rel, MDD, meta) +{ + if (set == lddmc_false) return lddmc_false; + if (rel == lddmc_false) return lddmc_false; + + mddnode_t n_meta = GETNODE(meta); + uint32_t m_val = mddnode_getvalue(n_meta); + if (m_val == (uint32_t)-1) return set; + if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true); + + /* Skip nodes if possible */ + if (!mddnode_getcopy(GETNODE(rel))) { + if (m_val == 1 || m_val == 3) { + if (!match_ldds(&set, &rel)) return lddmc_false; + } + } + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_RELPROD); + + /* Access cache */ + MDD result; + if (cache_get3(CACHE_MDD_RELPROD, set, rel, meta, &result)) { + sylvan_stats_count(LDD_RELPROD_CACHED); + return result; + } + + mddnode_t n_set = GETNODE(set); + mddnode_t n_rel = GETNODE(rel); + + /* Recursive operations */ + if (m_val == 0) { // not in rel + lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta)); + MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } else if (m_val == 1) { // read + // read layer: if not copy, then set&rel are already matched + lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn next read in list + + // for this read, either it is copy ('for all') or it is normal match + if (mddnode_getcopy(n_rel)) { + // spawn for every value to copy (set) + int count = 0; + for (;;) { + // stay same level of set (for write) + lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta))); + count++; + set = mddnode_getright(n_set); + if (set == lddmc_false) break; + n_set = GETNODE(set); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } else { + // stay same level of set (for write) + result = CALL(lddmc_relprod, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta)); + } + + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); // sync next read in list + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else if (m_val == 3) { // only-read + if (mddnode_getcopy(n_rel)) { + // copy on read ('for any value') + // result = union(result_with_copy, result_without_copy) + lddmc_refs_spawn(SPAWN(lddmc_relprod, set, mddnode_getright(n_rel), meta)); // spawn without_copy + + // spawn for every value to copy (set) + int count = 0; + for (;;) { + lddmc_refs_spawn(SPAWN(lddmc_relprod_help, mddnode_getvalue(n_set), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta))); + count++; + set = mddnode_getright(n_set); + if (set == lddmc_false) break; + n_set = GETNODE(set); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + + // add result from without_copy + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else { + // only-read, without copy + lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), mddnode_getright(n_rel), meta)); + MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } + } else if (m_val == 2 || m_val == 4) { // write, only-write + if (m_val == 4) { + // only-write, so we need to include 'for all variables' + lddmc_refs_spawn(SPAWN(lddmc_relprod, mddnode_getright(n_set), rel, meta)); // next in set + } + + // spawn for every value to write (rel) + int count = 0; + for (;;) { + uint32_t value; + if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set); + else value = mddnode_getvalue(n_rel); + lddmc_refs_spawn(SPAWN(lddmc_relprod_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta))); + count++; + rel = mddnode_getright(n_rel); + if (rel == lddmc_false) break; + n_rel = GETNODE(rel); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + + if (m_val == 4) { + // sync+union with other variables + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } + + /* Write to cache */ + if (cache_put3(CACHE_MDD_RELPROD, set, rel, meta, result)) sylvan_stats_count(LDD_RELPROD_CACHEDPUT); + + return result; +} + +TASK_5(MDD, lddmc_relprod_union_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, un) +{ + return lddmc_makenode(val, CALL(lddmc_relprod_union, set, rel, proj, un), lddmc_false); +} + +// meta: -1 (end; rest not in rel), 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write) +TASK_IMPL_4(MDD, lddmc_relprod_union, MDD, set, MDD, rel, MDD, meta, MDD, un) +{ + if (set == lddmc_false) return un; + if (rel == lddmc_false) return un; + if (un == lddmc_false) return CALL(lddmc_relprod, set, rel, meta); + + mddnode_t n_meta = GETNODE(meta); + uint32_t m_val = mddnode_getvalue(n_meta); + if (m_val == (uint32_t)-1) return CALL(lddmc_union, set, un); + + // check depths (this triggers on logic error) + if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && un != lddmc_true); + + /* Skip nodes if possible */ + if (!mddnode_getcopy(GETNODE(rel))) { + if (m_val == 1 || m_val == 3) { + if (!match_ldds(&set, &rel)) return un; + } + } + + mddnode_t n_set = GETNODE(set); + mddnode_t n_rel = GETNODE(rel); + mddnode_t n_un = GETNODE(un); + + // in some cases, we know un.value < result.value + if (m_val == 0 || m_val == 3) { + // if m_val == 0, no read/write, then un.value < set.value? + // if m_val == 3, only read (write same), then un.value < set.value? + uint32_t set_value = mddnode_getvalue(n_set); + uint32_t un_value = mddnode_getvalue(n_un); + if (un_value < set_value) { + MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un)); + if (right == mddnode_getright(n_un)) return un; + else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right); + } + } else if (m_val == 2 || m_val == 4) { + // if we write, then we only know for certain that un.value < result.value if + // the root of rel is not a copy node + if (!mddnode_getcopy(n_rel)) { + uint32_t rel_value = mddnode_getvalue(n_rel); + uint32_t un_value = mddnode_getvalue(n_un); + if (un_value < rel_value) { + MDD right = CALL(lddmc_relprod_union, set, rel, meta, mddnode_getright(n_un)); + if (right == mddnode_getright(n_un)) return un; + else return lddmc_makenode(mddnode_getvalue(n_un), mddnode_getdown(n_un), right); + } + } + } + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_RELPROD_UNION); + + /* Access cache */ + MDD result; + if (cache_get4(CACHE_MDD_RELPROD, set, rel, meta, un, &result)) { + sylvan_stats_count(LDD_RELPROD_UNION_CACHED); + return result; + } + + /* Recursive operations */ + if (m_val == 0) { // not in rel + uint32_t set_value = mddnode_getvalue(n_set); + uint32_t un_value = mddnode_getvalue(n_un); + // set_value > un_value already checked above + if (set_value < un_value) { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un)); + // going down, we don't need _union, since un does not contain this subtree + MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_pop(1); + if (down == lddmc_false) result = right; + else result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } else /* set_value == un_value */ { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, mddnode_getright(n_un))); + MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_un)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_pop(1); + if (right == mddnode_getright(n_un) && down == mddnode_getdown(n_un)) result = un; + else result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } + } else if (m_val == 1) { // read + // read layer: if not copy, then set&rel are already matched + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn next read in list + + // for this read, either it is copy ('for all') or it is normal match + if (mddnode_getcopy(n_rel)) { + // spawn for every value in set (copy = for all) + int count = 0; + for (;;) { + // stay same level of set and un (for write) + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un)); + count++; + set = mddnode_getright(n_set); + if (set == lddmc_false) break; + n_set = GETNODE(set); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } else { + // stay same level of set and un (for write) + result = CALL(lddmc_relprod_union, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), un); + } + + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); // sync next read in list + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else if (m_val == 3) { // only-read + // un < set already checked above + if (mddnode_getcopy(n_rel)) { + // copy on read ('for any value') + // result = union(result_with_copy, result_without_copy) + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, set, mddnode_getright(n_rel), meta, un)); // spawn without_copy + + // spawn for every value to copy (set) + int count = 0; + result = lddmc_false; + for (;;) { + uint32_t set_value = mddnode_getvalue(n_set); + uint32_t un_value = mddnode_getvalue(n_un); + if (un_value < set_value) { + // this is a bit tricky + // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un))); + count++; + un = mddnode_getright(n_un); + if (un == lddmc_false) { + result = CALL(lddmc_relprod, set, rel, meta); + break; + } + n_un = GETNODE(un); + } else if (un_value > set_value) { + // tricky again. the result of this is a normal relprod + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false)); + count++; + set = mddnode_getright(n_set); + if (set == lddmc_false) { + result = un; + break; + } + n_set = GETNODE(set); + } else /* un_value == set_value */ { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, set_value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un))); + count++; + set = mddnode_getright(n_set); + un = mddnode_getright(n_un); + if (set == lddmc_false) { + result = un; + break; + } else if (un == lddmc_false) { + result = CALL(lddmc_relprod, set, rel, meta); + break; + } + n_set = GETNODE(set); + n_un = GETNODE(un); + } + } + + // sync+union (one by one) + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + + // add result from without_copy + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else { + // only-read, not a copy node + uint32_t set_value = mddnode_getvalue(n_set); + uint32_t un_value = mddnode_getvalue(n_un); + + // already did un_value < set_value + if (un_value > set_value) { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, un)); + MDD down = CALL(lddmc_relprod, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } else /* un_value == set_value */ { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), mddnode_getright(n_rel), meta, mddnode_getright(n_un))); + MDD down = CALL(lddmc_relprod_union, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } + } + } else if (m_val == 2 || m_val == 4) { // write, only-write + if (m_val == 4) { + // only-write, so we need to include 'for all variables' + lddmc_refs_spawn(SPAWN(lddmc_relprod_union, mddnode_getright(n_set), rel, meta, un)); // next in set + } + + // spawn for every value to write (rel) + int count = 0; + for (;;) { + uint32_t value; + if (mddnode_getcopy(n_rel)) value = mddnode_getvalue(n_set); + else value = mddnode_getvalue(n_rel); + uint32_t un_value = mddnode_getvalue(n_un); + if (un_value < value) { + // the result of this will simply be "un_value, mddnode_getdown(n_un), false" which is intended + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, un_value, lddmc_false, lddmc_false, mddnode_getdown(n_meta), mddnode_getdown(n_un))); + count++; + un = mddnode_getright(n_un); + if (un == lddmc_false) { + result = CALL(lddmc_relprod, set, rel, meta); + break; + } + n_un = GETNODE(un); + } else if (un_value > value) { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), lddmc_false)); + count++; + rel = mddnode_getright(n_rel); + if (rel == lddmc_false) { + result = un; + break; + } + n_rel = GETNODE(rel); + } else /* un_value == value */ { + lddmc_refs_spawn(SPAWN(lddmc_relprod_union_help, value, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_un))); + count++; + rel = mddnode_getright(n_rel); + un = mddnode_getright(n_un); + if (rel == lddmc_false) { + result = un; + break; + } else if (un == lddmc_false) { + result = CALL(lddmc_relprod, set, rel, meta); + break; + } + n_rel = GETNODE(rel); + n_un = GETNODE(un); + } + } + + // sync+union (one by one) + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + + if (m_val == 4) { + // sync+union with other variables + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprod_union)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } + + /* Write to cache */ + if (cache_put4(CACHE_MDD_RELPROD, set, rel, meta, un, result)) sylvan_stats_count(LDD_RELPROD_UNION_CACHEDPUT); + + return result; +} + +TASK_5(MDD, lddmc_relprev_help, uint32_t, val, MDD, set, MDD, rel, MDD, proj, MDD, uni) +{ + return lddmc_makenode(val, CALL(lddmc_relprev, set, rel, proj, uni), lddmc_false); +} + +/** + * Calculate all predecessors to a in uni according to rel[meta] + * follows the same semantics as relprod + * i.e. 0 (not in rel), 1 (read), 2 (write), 3 (only-read), 4 (only-write), -1 (end; rest=0) + */ +TASK_IMPL_4(MDD, lddmc_relprev, MDD, set, MDD, rel, MDD, meta, MDD, uni) +{ + if (set == lddmc_false) return lddmc_false; + if (rel == lddmc_false) return lddmc_false; + if (uni == lddmc_false) return lddmc_false; + + mddnode_t n_meta = GETNODE(meta); + uint32_t m_val = mddnode_getvalue(n_meta); + if (m_val == (uint32_t)-1) { + if (set == uni) return set; + else return lddmc_intersect(set, uni); + } + + if (m_val != 0) assert(set != lddmc_true && rel != lddmc_true && uni != lddmc_true); + + /* Skip nodes if possible */ + if (m_val == 0) { + // not in rel: match set and uni ('intersect') + if (!match_ldds(&set, &uni)) return lddmc_false; + } else if (mddnode_getcopy(GETNODE(rel))) { + // read+copy: no matching (pre is everything in uni) + // write+copy: no matching (match after split: set and uni) + // only-read+copy: match set and uni + // only-write+copy: no matching (match after split: set and uni) + if (m_val == 3) { + if (!match_ldds(&set, &uni)) return lddmc_false; + } + } else if (m_val == 1) { + // read: match uni and rel + if (!match_ldds(&uni, &rel)) return lddmc_false; + } else if (m_val == 2) { + // write: match set and rel + if (!match_ldds(&set, &rel)) return lddmc_false; + } else if (m_val == 3) { + // only-read: match uni and set and rel + mddnode_t n_set = GETNODE(set); + mddnode_t n_rel = GETNODE(rel); + mddnode_t n_uni = GETNODE(uni); + uint32_t n_set_value = mddnode_getvalue(n_set); + uint32_t n_rel_value = mddnode_getvalue(n_rel); + uint32_t n_uni_value = mddnode_getvalue(n_uni); + while (n_uni_value != n_rel_value || n_rel_value != n_set_value) { + if (n_uni_value < n_rel_value || n_uni_value < n_set_value) { + uni = mddnode_getright(n_uni); + if (uni == lddmc_false) return lddmc_false; + n_uni = GETNODE(uni); + n_uni_value = mddnode_getvalue(n_uni); + } + if (n_set_value < n_rel_value || n_set_value < n_uni_value) { + set = mddnode_getright(n_set); + if (set == lddmc_false) return lddmc_false; + n_set = GETNODE(set); + n_set_value = mddnode_getvalue(n_set); + } + if (n_rel_value < n_set_value || n_rel_value < n_uni_value) { + rel = mddnode_getright(n_rel); + if (rel == lddmc_false) return lddmc_false; + n_rel = GETNODE(rel); + n_rel_value = mddnode_getvalue(n_rel); + } + } + } else if (m_val == 4) { + // only-write: match set and rel (then use whole universe) + if (!match_ldds(&set, &rel)) return lddmc_false; + } + + /* Test gc */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_RELPREV); + + /* Access cache */ + MDD result; + if (cache_get4(CACHE_MDD_RELPREV, set, rel, meta, uni, &result)) { + sylvan_stats_count(LDD_RELPREV_CACHED); + return result; + } + + mddnode_t n_set = GETNODE(set); + mddnode_t n_rel = GETNODE(rel); + mddnode_t n_uni = GETNODE(uni); + + /* Recursive operations */ + if (m_val == 0) { // not in rel + // m_val == 0 : not in rel (intersection set and universe) + lddmc_refs_spawn(SPAWN(lddmc_relprev, mddnode_getright(n_set), rel, meta, mddnode_getright(n_uni))); + MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), rel, mddnode_getdown(n_meta), mddnode_getdown(n_uni)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_relprev)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n_set), down, right); + } else if (m_val == 1) { // read level + // result value is in case of copy: everything in uni! + // result value is in case of not-copy: match uni and rel! + lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel + if (mddnode_getcopy(n_rel)) { + // result is everything in uni + // spawn for every value to have been read (uni) + int count = 0; + for (;;) { + lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni)); + count++; + uni = mddnode_getright(n_uni); + if (uni == lddmc_false) break; + n_uni = GETNODE(uni); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } else { + // already matched + MDD down = CALL(lddmc_relprev, set, mddnode_getdown(n_rel), mddnode_getdown(n_meta), uni); + result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false); + } + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else if (m_val == 3) { // only-read level + // result value is in case of copy: match set and uni! (already done first match) + // result value is in case of not-copy: match set and uni and rel! + lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); // next in rel + if (mddnode_getcopy(n_rel)) { + // spawn for every matching set+uni + int count = 0; + for (;;) { + lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); + count++; + uni = mddnode_getright(n_uni); + if (!match_ldds(&set, &uni)) break; + n_set = GETNODE(set); + n_uni = GETNODE(uni); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } else { + // already matched + MDD down = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); + result = lddmc_makenode(mddnode_getvalue(n_uni), down, lddmc_false); + } + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else if (m_val == 2) { // write level + // note: the read level has already matched the uni that was read. + // write+copy: only for the one set equal to uni... + // write: match set and rel (already done) + lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); + if (mddnode_getcopy(n_rel)) { + MDD down = lddmc_follow(set, mddnode_getvalue(n_uni)); + if (down != lddmc_false) { + result = CALL(lddmc_relprev, down, mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); + } else { + result = lddmc_false; + } + } else { + result = CALL(lddmc_relprev, mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni)); + } + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } else if (m_val == 4) { // only-write level + // only-write+copy: match set and uni after spawn + // only-write: match set and rel (already done) + lddmc_refs_spawn(SPAWN(lddmc_relprev, set, mddnode_getright(n_rel), meta, uni)); + if (mddnode_getcopy(n_rel)) { + // spawn for every matching set+uni + int count = 0; + for (;;) { + if (!match_ldds(&set, &uni)) break; + n_set = GETNODE(set); + n_uni = GETNODE(uni); + lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); + count++; + uni = mddnode_getright(n_uni); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } else { + // spawn for every value in universe!! + int count = 0; + for (;;) { + lddmc_refs_spawn(SPAWN(lddmc_relprev_help, mddnode_getvalue(n_uni), mddnode_getdown(n_set), mddnode_getdown(n_rel), mddnode_getdown(n_meta), mddnode_getdown(n_uni))); + count++; + uni = mddnode_getright(n_uni); + if (uni == lddmc_false) break; + n_uni = GETNODE(uni); + } + + // sync+union (one by one) + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev_help)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + } + lddmc_refs_push(result); + MDD result2 = lddmc_refs_sync(SYNC(lddmc_relprev)); + lddmc_refs_push(result2); + result = CALL(lddmc_union, result, result2); + lddmc_refs_pop(2); + } + + /* Write to cache */ + if (cache_put4(CACHE_MDD_RELPREV, set, rel, meta, uni, result)) sylvan_stats_count(LDD_RELPREV_CACHEDPUT); + + return result; +} + +// Same 'proj' as project. So: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) +TASK_IMPL_4(MDD, lddmc_join, MDD, a, MDD, b, MDD, a_proj, MDD, b_proj) +{ + if (a == lddmc_false || b == lddmc_false) return lddmc_false; + + /* Test gc */ + sylvan_gc_test(); + + mddnode_t n_a_proj = GETNODE(a_proj); + mddnode_t n_b_proj = GETNODE(b_proj); + uint32_t a_proj_val = mddnode_getvalue(n_a_proj); + uint32_t b_proj_val = mddnode_getvalue(n_b_proj); + + while (a_proj_val == 0 && b_proj_val == 0) { + a_proj = mddnode_getdown(n_a_proj); + b_proj = mddnode_getdown(n_b_proj); + n_a_proj = GETNODE(a_proj); + n_b_proj = GETNODE(b_proj); + a_proj_val = mddnode_getvalue(n_a_proj); + b_proj_val = mddnode_getvalue(n_b_proj); + } + + if (a_proj_val == (uint32_t)-2) return b; // no a left + if (b_proj_val == (uint32_t)-2) return a; // no b left + if (a_proj_val == (uint32_t)-1 && b_proj_val == (uint32_t)-1) return CALL(lddmc_intersect, a, b); + + // At this point, only proj_val {-1, 0, 1}; max one with -1; max one with 0. + const int keep_a = a_proj_val != 0; + const int keep_b = b_proj_val != 0; + + if (keep_a && keep_b) { + // If both 'keep', then match values + if (!match_ldds(&a, &b)) return lddmc_false; + } + + sylvan_stats_count(LDD_JOIN); + + /* Access cache */ + MDD result; + if (cache_get4(CACHE_MDD_JOIN, a, b, a_proj, b_proj, &result)) { + sylvan_stats_count(LDD_JOIN_CACHED); + return result; + } + + /* Perform recursive calculation */ + const mddnode_t na = GETNODE(a); + const mddnode_t nb = GETNODE(b); + uint32_t val; + MDD down; + + // Make copies (for cache) + MDD _a_proj = a_proj, _b_proj = b_proj; + if (keep_a) { + if (keep_b) { + val = mddnode_getvalue(nb); + lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), mddnode_getright(nb), a_proj, b_proj)); + if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); + if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); + down = CALL(lddmc_join, mddnode_getdown(na), mddnode_getdown(nb), a_proj, b_proj); + } else { + val = mddnode_getvalue(na); + lddmc_refs_spawn(SPAWN(lddmc_join, mddnode_getright(na), b, a_proj, b_proj)); + if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); + if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); + down = CALL(lddmc_join, mddnode_getdown(na), b, a_proj, b_proj); + } + } else { + val = mddnode_getvalue(nb); + lddmc_refs_spawn(SPAWN(lddmc_join, a, mddnode_getright(nb), a_proj, b_proj)); + if (a_proj_val != (uint32_t)-1) a_proj = mddnode_getdown(n_a_proj); + if (b_proj_val != (uint32_t)-1) b_proj = mddnode_getdown(n_b_proj); + down = CALL(lddmc_join, a, mddnode_getdown(nb), a_proj, b_proj); + } + + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_join)); + lddmc_refs_pop(1); + result = lddmc_makenode(val, down, right); + + /* Write to cache */ + if (cache_put4(CACHE_MDD_JOIN, a, b, _a_proj, _b_proj, result)) sylvan_stats_count(LDD_JOIN_CACHEDPUT); + + return result; +} + +// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) +TASK_IMPL_2(MDD, lddmc_project, const MDD, mdd, const MDD, proj) +{ + if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty + if (mdd == lddmc_true) return lddmc_true; // projection of universe is universe... + + mddnode_t p_node = GETNODE(proj); + uint32_t p_val = mddnode_getvalue(p_node); + if (p_val == (uint32_t)-1) return mdd; + if (p_val == (uint32_t)-2) return lddmc_true; // because we always end with true. + + sylvan_gc_test(); + + sylvan_stats_count(LDD_PROJECT); + + MDD result; + if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, 0, &result)) { + sylvan_stats_count(LDD_PROJECT_CACHED); + return result; + } + + mddnode_t n = GETNODE(mdd); + + if (p_val == 1) { // keep + lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getright(n), proj)); + MDD down = CALL(lddmc_project, mddnode_getdown(n), mddnode_getdown(p_node)); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_project)); + lddmc_refs_pop(1); + result = lddmc_makenode(mddnode_getvalue(n), down, right); + } else { // quantify + if (mddnode_getdown(n) == lddmc_true) { // assume lowest level + result = lddmc_true; + } else { + int count = 0; + MDD p_down = mddnode_getdown(p_node), _mdd=mdd; + while (1) { + lddmc_refs_spawn(SPAWN(lddmc_project, mddnode_getdown(n), p_down)); + count++; + _mdd = mddnode_getright(n); + assert(_mdd != lddmc_true); + if (_mdd == lddmc_false) break; + n = GETNODE(_mdd); + } + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD down = lddmc_refs_sync(SYNC(lddmc_project)); + lddmc_refs_push(down); + result = CALL(lddmc_union, result, down); + lddmc_refs_pop(2); + } + } + } + + if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, 0, result)) sylvan_stats_count(LDD_PROJECT_CACHEDPUT); + + return result; +} + +// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) +TASK_IMPL_3(MDD, lddmc_project_minus, const MDD, mdd, const MDD, proj, MDD, avoid) +{ + // This implementation assumed "avoid" has correct depth + if (avoid == lddmc_true) return lddmc_false; + if (mdd == avoid) return lddmc_false; + if (mdd == lddmc_false) return lddmc_false; // projection of empty is empty + if (mdd == lddmc_true) return lddmc_true; // avoid != lddmc_true + + mddnode_t p_node = GETNODE(proj); + uint32_t p_val = mddnode_getvalue(p_node); + if (p_val == (uint32_t)-1) return lddmc_minus(mdd, avoid); + if (p_val == (uint32_t)-2) return lddmc_true; + + sylvan_gc_test(); + + sylvan_stats_count(LDD_PROJECT_MINUS); + + MDD result; + if (cache_get3(CACHE_MDD_PROJECT, mdd, proj, avoid, &result)) { + sylvan_stats_count(LDD_PROJECT_MINUS_CACHED); + return result; + } + + mddnode_t n = GETNODE(mdd); + + if (p_val == 1) { // keep + // move 'avoid' until it matches + uint32_t val = mddnode_getvalue(n); + MDD a_down = lddmc_false; + while (avoid != lddmc_false) { + mddnode_t a_node = GETNODE(avoid); + uint32_t a_val = mddnode_getvalue(a_node); + if (a_val > val) { + break; + } else if (a_val == val) { + a_down = mddnode_getdown(a_node); + break; + } + avoid = mddnode_getright(a_node); + } + lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getright(n), proj, avoid)); + MDD down = CALL(lddmc_project_minus, mddnode_getdown(n), mddnode_getdown(p_node), a_down); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_project_minus)); + lddmc_refs_pop(1); + result = lddmc_makenode(val, down, right); + } else { // quantify + if (mddnode_getdown(n) == lddmc_true) { // assume lowest level + result = lddmc_true; + } else { + int count = 0; + MDD p_down = mddnode_getdown(p_node), _mdd=mdd; + while (1) { + lddmc_refs_spawn(SPAWN(lddmc_project_minus, mddnode_getdown(n), p_down, avoid)); + count++; + _mdd = mddnode_getright(n); + assert(_mdd != lddmc_true); + if (_mdd == lddmc_false) break; + n = GETNODE(_mdd); + } + result = lddmc_false; + while (count--) { + lddmc_refs_push(result); + MDD down = lddmc_refs_sync(SYNC(lddmc_project_minus)); + lddmc_refs_push(down); + result = CALL(lddmc_union, result, down); + lddmc_refs_pop(2); + } + } + } + + if (cache_put3(CACHE_MDD_PROJECT, mdd, proj, avoid, result)) sylvan_stats_count(LDD_PROJECT_MINUS_CACHEDPUT); + + return result; +} + +MDD +lddmc_union_cube(MDD a, uint32_t* values, size_t count) +{ + if (a == lddmc_false) return lddmc_cube(values, count); + if (a == lddmc_true) { + assert(count == 0); + return lddmc_true; + } + assert(count != 0); + + mddnode_t na = GETNODE(a); + uint32_t na_value = mddnode_getvalue(na); + + /* Only create a new node if something actually changed */ + + if (na_value < *values) { + MDD right = lddmc_union_cube(mddnode_getright(na), values, count); + if (right == mddnode_getright(na)) return a; // no actual change + return lddmc_makenode(na_value, mddnode_getdown(na), right); + } else if (na_value == *values) { + MDD down = lddmc_union_cube(mddnode_getdown(na), values+1, count-1); + if (down == mddnode_getdown(na)) return a; // no actual change + return lddmc_makenode(na_value, down, mddnode_getright(na)); + } else /* na_value > *values */ { + return lddmc_makenode(*values, lddmc_cube(values+1, count-1), a); + } +} + +MDD +lddmc_union_cube_copy(MDD a, uint32_t* values, int* copy, size_t count) +{ + if (a == lddmc_false) return lddmc_cube_copy(values, copy, count); + if (a == lddmc_true) { + assert(count == 0); + return lddmc_true; + } + assert(count != 0); + + mddnode_t na = GETNODE(a); + + /* Only create a new node if something actually changed */ + + int na_copy = mddnode_getcopy(na); + if (na_copy && *copy) { + MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1); + if (down == mddnode_getdown(na)) return a; // no actual change + return lddmc_make_copynode(down, mddnode_getright(na)); + } else if (na_copy) { + MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count); + if (right == mddnode_getright(na)) return a; // no actual change + return lddmc_make_copynode(mddnode_getdown(na), right); + } else if (*copy) { + return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), a); + } + + uint32_t na_value = mddnode_getvalue(na); + if (na_value < *values) { + MDD right = lddmc_union_cube_copy(mddnode_getright(na), values, copy, count); + if (right == mddnode_getright(na)) return a; // no actual change + return lddmc_makenode(na_value, mddnode_getdown(na), right); + } else if (na_value == *values) { + MDD down = lddmc_union_cube_copy(mddnode_getdown(na), values+1, copy+1, count-1); + if (down == mddnode_getdown(na)) return a; // no actual change + return lddmc_makenode(na_value, down, mddnode_getright(na)); + } else /* na_value > *values */ { + return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), a); + } +} + +int +lddmc_member_cube(MDD a, uint32_t* values, size_t count) +{ + while (1) { + if (a == lddmc_false) return 0; + if (a == lddmc_true) return 1; + assert(count > 0); // size mismatch + + a = lddmc_follow(a, *values); + values++; + count--; + } +} + +int +lddmc_member_cube_copy(MDD a, uint32_t* values, int* copy, size_t count) +{ + while (1) { + if (a == lddmc_false) return 0; + if (a == lddmc_true) return 1; + assert(count > 0); // size mismatch + + if (*copy) a = lddmc_followcopy(a); + else a = lddmc_follow(a, *values); + values++; + count--; + } +} + +MDD +lddmc_cube(uint32_t* values, size_t count) +{ + if (count == 0) return lddmc_true; + return lddmc_makenode(*values, lddmc_cube(values+1, count-1), lddmc_false); +} + +MDD +lddmc_cube_copy(uint32_t* values, int* copy, size_t count) +{ + if (count == 0) return lddmc_true; + if (*copy) return lddmc_make_copynode(lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false); + else return lddmc_makenode(*values, lddmc_cube_copy(values+1, copy+1, count-1), lddmc_false); +} + +/** + * Count number of nodes for each level + */ + +static void +lddmc_nodecount_levels_mark(MDD mdd, size_t *variables) +{ + if (mdd <= lddmc_true) return; + mddnode_t n = GETNODE(mdd); + if (!mddnode_getmark(n)) { + mddnode_setmark(n, 1); + (*variables) += 1; + lddmc_nodecount_levels_mark(mddnode_getright(n), variables); + lddmc_nodecount_levels_mark(mddnode_getdown(n), variables+1); + } +} + +static void +lddmc_nodecount_levels_unmark(MDD mdd) +{ + if (mdd <= lddmc_true) return; + mddnode_t n = GETNODE(mdd); + if (mddnode_getmark(n)) { + mddnode_setmark(n, 0); + lddmc_nodecount_levels_unmark(mddnode_getright(n)); + lddmc_nodecount_levels_unmark(mddnode_getdown(n)); + } +} + +void +lddmc_nodecount_levels(MDD mdd, size_t *variables) +{ + lddmc_nodecount_levels_mark(mdd, variables); + lddmc_nodecount_levels_unmark(mdd); +} + +/** + * Count number of nodes in MDD + */ + +static size_t +lddmc_nodecount_mark(MDD mdd) +{ + if (mdd <= lddmc_true) return 0; + mddnode_t n = GETNODE(mdd); + if (mddnode_getmark(n)) return 0; + mddnode_setmark(n, 1); + return 1 + lddmc_nodecount_mark(mddnode_getdown(n)) + lddmc_nodecount_mark(mddnode_getright(n)); +} + +static void +lddmc_nodecount_unmark(MDD mdd) +{ + if (mdd <= lddmc_true) return; + mddnode_t n = GETNODE(mdd); + if (mddnode_getmark(n)) { + mddnode_setmark(n, 0); + lddmc_nodecount_unmark(mddnode_getright(n)); + lddmc_nodecount_unmark(mddnode_getdown(n)); + } +} + +size_t +lddmc_nodecount(MDD mdd) +{ + size_t result = lddmc_nodecount_mark(mdd); + lddmc_nodecount_unmark(mdd); + return result; +} + +/** + * CALCULATE NUMBER OF VAR ASSIGNMENTS THAT YIELD TRUE + */ + +TASK_IMPL_1(lddmc_satcount_double_t, lddmc_satcount_cached, MDD, mdd) +{ + if (mdd == lddmc_false) return 0.0; + if (mdd == lddmc_true) return 1.0; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + union { + lddmc_satcount_double_t d; + uint64_t s; + } hack; + + sylvan_stats_count(LDD_SATCOUNT); + + if (cache_get3(CACHE_MDD_SATCOUNT, mdd, 0, 0, &hack.s)) { + sylvan_stats_count(LDD_SATCOUNT_CACHED); + return hack.d; + } + + mddnode_t n = GETNODE(mdd); + + SPAWN(lddmc_satcount_cached, mddnode_getdown(n)); + lddmc_satcount_double_t right = CALL(lddmc_satcount_cached, mddnode_getright(n)); + hack.d = right + SYNC(lddmc_satcount_cached); + + if (cache_put3(CACHE_MDD_SATCOUNT, mdd, 0, 0, hack.s)) sylvan_stats_count(LDD_SATCOUNT_CACHEDPUT); + + return hack.d; +} + +TASK_IMPL_1(long double, lddmc_satcount, MDD, mdd) +{ + if (mdd == lddmc_false) return 0.0; + if (mdd == lddmc_true) return 1.0; + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + sylvan_stats_count(LDD_SATCOUNTL); + + union { + long double d; + struct { + uint64_t s1; + uint64_t s2; + } s; + } hack; + + if (cache_get3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, &hack.s.s1) && + cache_get3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, &hack.s.s2)) { + sylvan_stats_count(LDD_SATCOUNTL_CACHED); + return hack.d; + } + + mddnode_t n = GETNODE(mdd); + + SPAWN(lddmc_satcount, mddnode_getdown(n)); + long double right = CALL(lddmc_satcount, mddnode_getright(n)); + hack.d = right + SYNC(lddmc_satcount); + + int c1 = cache_put3(CACHE_MDD_SATCOUNTL1, mdd, 0, 0, hack.s.s1); + int c2 = cache_put3(CACHE_MDD_SATCOUNTL2, mdd, 0, 0, hack.s.s2); + if (c1 && c2) sylvan_stats_count(LDD_SATCOUNTL_CACHEDPUT); + + return hack.d; +} + +TASK_IMPL_5(MDD, lddmc_collect, MDD, mdd, lddmc_collect_cb, cb, void*, context, uint32_t*, values, size_t, count) +{ + if (mdd == lddmc_false) return lddmc_false; + if (mdd == lddmc_true) { + return WRAP(cb, values, count, context); + } + + mddnode_t n = GETNODE(mdd); + + lddmc_refs_spawn(SPAWN(lddmc_collect, mddnode_getright(n), cb, context, values, count)); + + uint32_t newvalues[count+1]; + if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count); + newvalues[count] = mddnode_getvalue(n); + MDD down = CALL(lddmc_collect, mddnode_getdown(n), cb, context, newvalues, count+1); + + if (down == lddmc_false) { + MDD result = lddmc_refs_sync(SYNC(lddmc_collect)); + return result; + } + + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_collect)); + + if (right == lddmc_false) { + lddmc_refs_pop(1); + return down; + } else { + lddmc_refs_push(right); + MDD result = CALL(lddmc_union, down, right); + lddmc_refs_pop(2); + return result; + } +} + +VOID_TASK_5(_lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count) +{ + if (mdd == lddmc_false) return; + if (mdd == lddmc_true) { + WRAP(cb, values, count, context); + return; + } + + mddnode_t n = GETNODE(mdd); + values[count] = mddnode_getvalue(n); + CALL(_lddmc_sat_all_nopar, mddnode_getdown(n), cb, context, values, count+1); + CALL(_lddmc_sat_all_nopar, mddnode_getright(n), cb, context, values, count); +} + +VOID_TASK_IMPL_3(lddmc_sat_all_nopar, MDD, mdd, lddmc_enum_cb, cb, void*, context) +{ + // determine depth + size_t count=0; + MDD _mdd = mdd; + while (_mdd > lddmc_true) { + _mdd = mddnode_getdown(GETNODE(_mdd)); + assert(_mdd != lddmc_false); + count++; + } + + uint32_t values[count]; + CALL(_lddmc_sat_all_nopar, mdd, cb, context, values, 0); +} + +VOID_TASK_IMPL_5(lddmc_sat_all_par, MDD, mdd, lddmc_enum_cb, cb, void*, context, uint32_t*, values, size_t, count) +{ + if (mdd == lddmc_false) return; + if (mdd == lddmc_true) { + WRAP(cb, values, count, context); + return; + } + + mddnode_t n = GETNODE(mdd); + + SPAWN(lddmc_sat_all_par, mddnode_getright(n), cb, context, values, count); + + uint32_t newvalues[count+1]; + if (count > 0) memcpy(newvalues, values, sizeof(uint32_t)*count); + newvalues[count] = mddnode_getvalue(n); + CALL(lddmc_sat_all_par, mddnode_getdown(n), cb, context, newvalues, count+1); + + SYNC(lddmc_sat_all_par); +} + +struct lddmc_match_sat_info +{ + MDD mdd; + MDD match; + MDD proj; + size_t count; + uint32_t values[0]; +}; + +// proj: -1 (rest 0), 0 (no match), 1 (match) +VOID_TASK_3(lddmc_match_sat, struct lddmc_match_sat_info *, info, lddmc_enum_cb, cb, void*, context) +{ + MDD a = info->mdd, b = info->match, proj = info->proj; + + if (a == lddmc_false || b == lddmc_false) return; + + if (a == lddmc_true) { + assert(b == lddmc_true); + WRAP(cb, info->values, info->count, context); + return; + } + + mddnode_t p_node = GETNODE(proj); + uint32_t p_val = mddnode_getvalue(p_node); + if (p_val == (uint32_t)-1) { + assert(b == lddmc_true); + CALL(lddmc_sat_all_par, a, cb, context, info->values, info->count); + return; + } + + /* Get nodes */ + mddnode_t na = GETNODE(a); + mddnode_t nb = GETNODE(b); + uint32_t na_value = mddnode_getvalue(na); + uint32_t nb_value = mddnode_getvalue(nb); + + /* Skip nodes if possible */ + if (p_val == 1) { + while (na_value != nb_value) { + if (na_value < nb_value) { + a = mddnode_getright(na); + if (a == lddmc_false) return; + na = GETNODE(a); + na_value = mddnode_getvalue(na); + } + if (nb_value < na_value) { + b = mddnode_getright(nb); + if (b == lddmc_false) return; + nb = GETNODE(b); + nb_value = mddnode_getvalue(nb); + } + } + } + + struct lddmc_match_sat_info *ri = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count])); + struct lddmc_match_sat_info *di = (struct lddmc_match_sat_info*)alloca(sizeof(struct lddmc_match_sat_info)+sizeof(uint32_t[info->count+1])); + + ri->mdd = mddnode_getright(na); + di->mdd = mddnode_getdown(na); + ri->match = b; + di->match = mddnode_getdown(nb); + ri->proj = proj; + di->proj = mddnode_getdown(p_node); + ri->count = info->count; + di->count = info->count+1; + if (ri->count > 0) memcpy(ri->values, info->values, sizeof(uint32_t[info->count])); + if (di->count > 0) memcpy(di->values, info->values, sizeof(uint32_t[info->count])); + di->values[info->count] = na_value; + + SPAWN(lddmc_match_sat, ri, cb, context); + CALL(lddmc_match_sat, di, cb, context); + SYNC(lddmc_match_sat); +} + +VOID_TASK_IMPL_5(lddmc_match_sat_par, MDD, mdd, MDD, match, MDD, proj, lddmc_enum_cb, cb, void*, context) +{ + struct lddmc_match_sat_info i; + i.mdd = mdd; + i.match = match; + i.proj = proj; + i.count = 0; + CALL(lddmc_match_sat, &i, cb, context); +} + +int +lddmc_sat_one(MDD mdd, uint32_t* values, size_t count) +{ + if (mdd == lddmc_false) return 0; + if (mdd == lddmc_true) return 1; + assert(count != 0); + mddnode_t n = GETNODE(mdd); + *values = mddnode_getvalue(n); + return lddmc_sat_one(mddnode_getdown(n), values+1, count-1); +} + +MDD +lddmc_sat_one_mdd(MDD mdd) +{ + if (mdd == lddmc_false) return lddmc_false; + if (mdd == lddmc_true) return lddmc_true; + mddnode_t n = GETNODE(mdd); + MDD down = lddmc_sat_one_mdd(mddnode_getdown(n)); + return lddmc_makenode(mddnode_getvalue(n), down, lddmc_false); +} + +TASK_IMPL_4(MDD, lddmc_compose, MDD, mdd, lddmc_compose_cb, cb, void*, context, int, depth) +{ + if (depth == 0 || mdd == lddmc_false || mdd == lddmc_true) { + return WRAP(cb, mdd, context); + } else { + mddnode_t n = GETNODE(mdd); + lddmc_refs_spawn(SPAWN(lddmc_compose, mddnode_getright(n), cb, context, depth)); + MDD down = lddmc_compose(mddnode_getdown(n), cb, context, depth-1); + lddmc_refs_push(down); + MDD right = lddmc_refs_sync(SYNC(lddmc_compose)); + lddmc_refs_pop(1); + return lddmc_makenode(mddnode_getvalue(n), down, right); + } +} + +VOID_TASK_IMPL_4(lddmc_visit_seq, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context) +{ + if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return; + + void* context_down = alloca(ctx_size); + void* context_right = alloca(ctx_size); + WRAP(cbs->lddmc_visit_init_context, context_down, context, 1); + WRAP(cbs->lddmc_visit_init_context, context_right, context, 0); + + CALL(lddmc_visit_seq, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down); + CALL(lddmc_visit_seq, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right); + + WRAP(cbs->lddmc_visit_post, mdd, context); +} + +VOID_TASK_IMPL_4(lddmc_visit_par, MDD, mdd, lddmc_visit_callbacks_t*, cbs, size_t, ctx_size, void*, context) +{ + if (WRAP(cbs->lddmc_visit_pre, mdd, context) == 0) return; + + void* context_down = alloca(ctx_size); + void* context_right = alloca(ctx_size); + WRAP(cbs->lddmc_visit_init_context, context_down, context, 1); + WRAP(cbs->lddmc_visit_init_context, context_right, context, 0); + + SPAWN(lddmc_visit_par, mddnode_getdown(GETNODE(mdd)), cbs, ctx_size, context_down); + CALL(lddmc_visit_par, mddnode_getright(GETNODE(mdd)), cbs, ctx_size, context_right); + SYNC(lddmc_visit_par); + + WRAP(cbs->lddmc_visit_post, mdd, context); +} + +/** + * GENERIC MARK/UNMARK METHODS + */ + +static inline int +lddmc_mark(mddnode_t node) +{ + if (mddnode_getmark(node)) return 0; + mddnode_setmark(node, 1); + return 1; +} + +static inline int +lddmc_unmark(mddnode_t node) +{ + if (mddnode_getmark(node)) { + mddnode_setmark(node, 0); + return 1; + } else { + return 0; + } +} + +static void +lddmc_unmark_rec(mddnode_t node) +{ + if (lddmc_unmark(node)) { + MDD node_right = mddnode_getright(node); + if (node_right > lddmc_true) lddmc_unmark_rec(GETNODE(node_right)); + MDD node_down = mddnode_getdown(node); + if (node_down > lddmc_true) lddmc_unmark_rec(GETNODE(node_down)); + } +} + +/************* + * DOT OUTPUT +*************/ + +static void +lddmc_fprintdot_rec(FILE* out, MDD mdd) +{ + // assert(mdd > lddmc_true); + + // check mark + mddnode_t n = GETNODE(mdd); + if (mddnode_getmark(n)) return; + mddnode_setmark(n, 1); + + // print the node + uint32_t val = mddnode_getvalue(n); + fprintf(out, "%" PRIu64 " [shape=record, label=\"", mdd); + if (mddnode_getcopy(n)) fprintf(out, " *"); + else fprintf(out, "<%u> %u", val, val); + MDD right = mddnode_getright(n); + while (right != lddmc_false) { + mddnode_t n2 = GETNODE(right); + uint32_t val2 = mddnode_getvalue(n2); + fprintf(out, "|<%u> %u", val2, val2); + right = mddnode_getright(n2); + // assert(right != lddmc_true); + } + fprintf(out, "\"];\n"); + + // recurse and print the edges + for (;;) { + MDD down = mddnode_getdown(n); + // assert(down != lddmc_false); + if (down > lddmc_true) { + lddmc_fprintdot_rec(out, down); + if (mddnode_getcopy(n)) { + fprintf(out, "%" PRIu64 ":c -> ", mdd); + } else { + fprintf(out, "%" PRIu64 ":%u -> ", mdd, mddnode_getvalue(n)); + } + if (mddnode_getcopy(GETNODE(down))) { + fprintf(out, "%" PRIu64 ":c [style=solid];\n", down); + } else { + fprintf(out, "%" PRIu64 ":%u [style=solid];\n", down, mddnode_getvalue(GETNODE(down))); + } + } + MDD right = mddnode_getright(n); + if (right == lddmc_false) break; + n = GETNODE(right); + } +} + +static void +lddmc_fprintdot_unmark(MDD mdd) +{ + if (mdd <= lddmc_true) return; + mddnode_t n = GETNODE(mdd); + if (mddnode_getmark(n)) { + mddnode_setmark(n, 0); + for (;;) { + lddmc_fprintdot_unmark(mddnode_getdown(n)); + mdd = mddnode_getright(n); + if (mdd == lddmc_false) return; + n = GETNODE(mdd); + } + } +} + +void +lddmc_fprintdot(FILE *out, MDD mdd) +{ + fprintf(out, "digraph \"DD\" {\n"); + fprintf(out, "graph [dpi = 300];\n"); + fprintf(out, "center = true;\n"); + fprintf(out, "edge [dir = forward];\n"); + + // Special case: false + if (mdd == lddmc_false) { + fprintf(out, "0 [shape=record, label=\"False\"];\n"); + fprintf(out, "}\n"); + return; + } + + // Special case: true + if (mdd == lddmc_true) { + fprintf(out, "1 [shape=record, label=\"True\"];\n"); + fprintf(out, "}\n"); + return; + } + + lddmc_fprintdot_rec(out, mdd); + lddmc_fprintdot_unmark(mdd); + + fprintf(out, "}\n"); +} + +void +lddmc_printdot(MDD mdd) +{ + lddmc_fprintdot(stdout, mdd); +} + +/** + * Some debug stuff + */ +void +lddmc_fprint(FILE *f, MDD mdd) +{ + lddmc_serialize_reset(); + size_t v = lddmc_serialize_add(mdd); + fprintf(f, "%zu,", v); + lddmc_serialize_totext(f); +} + +void +lddmc_print(MDD mdd) +{ + lddmc_fprint(stdout, mdd); +} + +/** + * SERIALIZATION + */ + +struct lddmc_ser { + MDD mdd; + size_t assigned; +}; + +// Define a AVL tree type with prefix 'lddmc_ser' holding +// nodes of struct lddmc_ser with the following compare() function... +AVL(lddmc_ser, struct lddmc_ser) +{ + if (left->mdd > right->mdd) return 1; + if (left->mdd < right->mdd) return -1; + return 0; +} + +// Define a AVL tree type with prefix 'lddmc_ser_reversed' holding +// nodes of struct lddmc_ser with the following compare() function... +AVL(lddmc_ser_reversed, struct lddmc_ser) +{ + if (left->assigned > right->assigned) return 1; + if (left->assigned < right->assigned) return -1; + return 0; +} + +// Initially, both sets are empty +static avl_node_t *lddmc_ser_set = NULL; +static avl_node_t *lddmc_ser_reversed_set = NULL; + +// Start counting (assigning numbers to MDDs) at 2 +static volatile size_t lddmc_ser_counter = 2; +static size_t lddmc_ser_done = 0; + +// Given a MDD, assign unique numbers to all nodes +static size_t +lddmc_serialize_assign_rec(MDD mdd) +{ + if (mdd <= lddmc_true) return mdd; + + mddnode_t n = GETNODE(mdd); + + struct lddmc_ser s, *ss; + s.mdd = mdd; + ss = lddmc_ser_search(lddmc_ser_set, &s); + if (ss == NULL) { + // assign dummy value + s.assigned = 0; + ss = lddmc_ser_put(&lddmc_ser_set, &s, NULL); + + // first assign recursively + lddmc_serialize_assign_rec(mddnode_getright(n)); + lddmc_serialize_assign_rec(mddnode_getdown(n)); + + // assign real value + ss->assigned = lddmc_ser_counter++; + + // put a copy in the reversed table + lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, ss); + } + + return ss->assigned; +} + +size_t +lddmc_serialize_add(MDD mdd) +{ + return lddmc_serialize_assign_rec(mdd); +} + +void +lddmc_serialize_reset() +{ + lddmc_ser_free(&lddmc_ser_set); + lddmc_ser_free(&lddmc_ser_reversed_set); + lddmc_ser_counter = 2; + lddmc_ser_done = 0; +} + +size_t +lddmc_serialize_get(MDD mdd) +{ + if (mdd <= lddmc_true) return mdd; + struct lddmc_ser s, *ss; + s.mdd = mdd; + ss = lddmc_ser_search(lddmc_ser_set, &s); + assert(ss != NULL); + return ss->assigned; +} + +MDD +lddmc_serialize_get_reversed(size_t value) +{ + if ((MDD)value <= lddmc_true) return (MDD)value; + struct lddmc_ser s, *ss; + s.assigned = value; + ss = lddmc_ser_reversed_search(lddmc_ser_reversed_set, &s); + assert(ss != NULL); + return ss->mdd; +} + +void +lddmc_serialize_totext(FILE *out) +{ + avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set); + struct lddmc_ser *s; + + fprintf(out, "["); + while ((s=lddmc_ser_reversed_iter_next(it))) { + MDD mdd = s->mdd; + mddnode_t n = GETNODE(mdd); + fprintf(out, "(%zu,v=%u,d=%zu,r=%zu),", s->assigned, + mddnode_getvalue(n), + lddmc_serialize_get(mddnode_getdown(n)), + lddmc_serialize_get(mddnode_getright(n))); + } + fprintf(out, "]"); + + lddmc_ser_reversed_iter_free(it); +} + +void +lddmc_serialize_tofile(FILE *out) +{ + size_t count = avl_count(lddmc_ser_reversed_set); + assert(count >= lddmc_ser_done); + assert(count == lddmc_ser_counter-2); + count -= lddmc_ser_done; + fwrite(&count, sizeof(size_t), 1, out); + + struct lddmc_ser *s; + avl_iter_t *it = lddmc_ser_reversed_iter(lddmc_ser_reversed_set); + + /* Skip already written entries */ + size_t index = 0; + while (index < lddmc_ser_done && (s=lddmc_ser_reversed_iter_next(it))) { + assert(s->assigned == index+2); + index++; + } + + while ((s=lddmc_ser_reversed_iter_next(it))) { + assert(s->assigned == index+2); + index++; + + mddnode_t n = GETNODE(s->mdd); + + struct mddnode node; + uint64_t right = lddmc_serialize_get(mddnode_getright(n)); + uint64_t down = lddmc_serialize_get(mddnode_getdown(n)); + if (mddnode_getcopy(n)) mddnode_makecopy(&node, right, down); + else mddnode_make(&node, mddnode_getvalue(n), right, down); + + assert(right <= index); + assert(down <= index); + + fwrite(&node, sizeof(struct mddnode), 1, out); + } + + lddmc_ser_done = lddmc_ser_counter-2; + lddmc_ser_reversed_iter_free(it); +} + +void +lddmc_serialize_fromfile(FILE *in) +{ + size_t count, i; + if (fread(&count, sizeof(size_t), 1, in) != 1) { + // TODO FIXME return error + printf("sylvan_serialize_fromfile: file format error, giving up\n"); + exit(-1); + } + + for (i=1; i<=count; i++) { + struct mddnode node; + if (fread(&node, sizeof(struct mddnode), 1, in) != 1) { + // TODO FIXME return error + printf("sylvan_serialize_fromfile: file format error, giving up\n"); + exit(-1); + } + + assert(mddnode_getright(&node) <= lddmc_ser_done+1); + assert(mddnode_getdown(&node) <= lddmc_ser_done+1); + + MDD right = lddmc_serialize_get_reversed(mddnode_getright(&node)); + MDD down = lddmc_serialize_get_reversed(mddnode_getdown(&node)); + + struct lddmc_ser s; + if (mddnode_getcopy(&node)) s.mdd = lddmc_make_copynode(down, right); + else s.mdd = lddmc_makenode(mddnode_getvalue(&node), down, right); + s.assigned = lddmc_ser_done+2; // starts at 0 but we want 2-based... + lddmc_ser_done++; + + lddmc_ser_insert(&lddmc_ser_set, &s); + lddmc_ser_reversed_insert(&lddmc_ser_reversed_set, &s); + } +} + +static void +lddmc_sha2_rec(MDD mdd, SHA256_CTX *ctx) +{ + if (mdd <= lddmc_true) { + SHA256_Update(ctx, (void*)&mdd, sizeof(uint64_t)); + return; + } + + mddnode_t node = GETNODE(mdd); + if (lddmc_mark(node)) { + uint32_t val = mddnode_getvalue(node); + SHA256_Update(ctx, (void*)&val, sizeof(uint32_t)); + lddmc_sha2_rec(mddnode_getdown(node), ctx); + lddmc_sha2_rec(mddnode_getright(node), ctx); + } +} + +void +lddmc_printsha(MDD mdd) +{ + lddmc_fprintsha(stdout, mdd); +} + +void +lddmc_fprintsha(FILE *out, MDD mdd) +{ + char buf[80]; + lddmc_getsha(mdd, buf); + fprintf(out, "%s", buf); +} + +void +lddmc_getsha(MDD mdd, char *target) +{ + SHA256_CTX ctx; + SHA256_Init(&ctx); + lddmc_sha2_rec(mdd, &ctx); + if (mdd > lddmc_true) lddmc_unmark_rec(GETNODE(mdd)); + SHA256_End(&ctx, target); +} + +#ifndef NDEBUG +size_t +lddmc_test_ismdd(MDD mdd) +{ + if (mdd == lddmc_true) return 1; + if (mdd == lddmc_false) return 0; + + int first = 1; + size_t depth = 0; + + if (mdd != lddmc_false) { + mddnode_t n = GETNODE(mdd); + if (mddnode_getcopy(n)) { + mdd = mddnode_getright(n); + depth = lddmc_test_ismdd(mddnode_getdown(n)); + assert(depth >= 1); + } + } + + uint32_t value = 0; + while (mdd != lddmc_false) { + assert(llmsset_is_marked(nodes, mdd)); + + mddnode_t n = GETNODE(mdd); + uint32_t next_value = mddnode_getvalue(n); + assert(mddnode_getcopy(n) == 0); + if (first) { + first = 0; + depth = lddmc_test_ismdd(mddnode_getdown(n)); + assert(depth >= 1); + } else { + assert(value < next_value); + assert(depth == lddmc_test_ismdd(mddnode_getdown(n))); + } + + value = next_value; + mdd = mddnode_getright(n); + } + + return 1 + depth; +} +#endif diff --git a/src/sylvan_ldd.h b/src/sylvan_ldd.h new file mode 100644 index 000000000..f104309b2 --- /dev/null +++ b/src/sylvan_ldd.h @@ -0,0 +1,288 @@ +/* + * Copyright 2011-2014 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* Do not include this file directly. Instead, include sylvan.h */ + +#ifndef SYLVAN_LDD_H +#define SYLVAN_LDD_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +typedef uint64_t MDD; // Note: low 40 bits only + +#define lddmc_false ((MDD)0) +#define lddmc_true ((MDD)1) + +/* Initialize LDD functionality */ +void sylvan_init_ldd(); + +/* Primitives */ +MDD lddmc_makenode(uint32_t value, MDD ifeq, MDD ifneq); +MDD lddmc_extendnode(MDD mdd, uint32_t value, MDD ifeq); +uint32_t lddmc_getvalue(MDD mdd); +MDD lddmc_getdown(MDD mdd); +MDD lddmc_getright(MDD mdd); +MDD lddmc_follow(MDD mdd, uint32_t value); + +/** + * Copy nodes in relations. + * A copy node represents 'read x, then write x' for every x. + * In a read-write relation, use copy nodes twice, once on read level, once on write level. + * Copy nodes are only supported by relprod, relprev and union. + */ + +/* Primitive for special 'copy node' (for relprod/relprev) */ +MDD lddmc_make_copynode(MDD ifeq, MDD ifneq); +int lddmc_iscopy(MDD mdd); +MDD lddmc_followcopy(MDD mdd); + +/* Add or remove external reference to MDD */ +MDD lddmc_ref(MDD a); +void lddmc_deref(MDD a); + +/* For use in custom mark functions */ +VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD) +#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd) + +/* Return the number of external references */ +size_t lddmc_count_refs(); + +/* Mark MDD for "notify on dead" */ +#define lddmc_notify_ondead(mdd) llmsset_notify_ondead(nodes, mdd) + +/* Sanity check - returns depth of MDD including 'true' terminal or 0 for empty set */ +#ifndef NDEBUG +size_t lddmc_test_ismdd(MDD mdd); +#endif + +/* Operations for model checking */ +TASK_DECL_2(MDD, lddmc_union, MDD, MDD); +#define lddmc_union(a, b) CALL(lddmc_union, a, b) + +TASK_DECL_2(MDD, lddmc_minus, MDD, MDD); +#define lddmc_minus(a, b) CALL(lddmc_minus, a, b) + +TASK_DECL_3(MDD, lddmc_zip, MDD, MDD, MDD*); +#define lddmc_zip(a, b, res) CALL(lddmc_zip, a, b, res) + +TASK_DECL_2(MDD, lddmc_intersect, MDD, MDD); +#define lddmc_intersect(a, b) CALL(lddmc_intersect, a, b) + +TASK_DECL_3(MDD, lddmc_match, MDD, MDD, MDD); +#define lddmc_match(a, b, proj) CALL(lddmc_match, a, b, proj) + +MDD lddmc_union_cube(MDD a, uint32_t* values, size_t count); +int lddmc_member_cube(MDD a, uint32_t* values, size_t count); +MDD lddmc_cube(uint32_t* values, size_t count); + +MDD lddmc_union_cube_copy(MDD a, uint32_t* values, int* copy, size_t count); +int lddmc_member_cube_copy(MDD a, uint32_t* values, int* copy, size_t count); +MDD lddmc_cube_copy(uint32_t* values, int* copy, size_t count); + +TASK_DECL_3(MDD, lddmc_relprod, MDD, MDD, MDD); +#define lddmc_relprod(a, b, proj) CALL(lddmc_relprod, a, b, proj) + +TASK_DECL_4(MDD, lddmc_relprod_union, MDD, MDD, MDD, MDD); +#define lddmc_relprod_union(a, b, meta, un) CALL(lddmc_relprod_union, a, b, meta, un) + +/** + * Calculate all predecessors to a in uni according to rel[proj] + * follows the same semantics as relprod + * i.e. 0 (not in rel), 1 (read+write), 2 (read), 3 (write), -1 (end; rest=0) + */ +TASK_DECL_4(MDD, lddmc_relprev, MDD, MDD, MDD, MDD); +#define lddmc_relprev(a, rel, proj, uni) CALL(lddmc_relprev, a, rel, proj, uni) + +// so: proj: -2 (end; quantify rest), -1 (end; keep rest), 0 (quantify), 1 (keep) +TASK_DECL_2(MDD, lddmc_project, MDD, MDD); +#define lddmc_project(mdd, proj) CALL(lddmc_project, mdd, proj) + +TASK_DECL_3(MDD, lddmc_project_minus, MDD, MDD, MDD); +#define lddmc_project_minus(mdd, proj, avoid) CALL(lddmc_project_minus, mdd, proj, avoid) + +TASK_DECL_4(MDD, lddmc_join, MDD, MDD, MDD, MDD); +#define lddmc_join(a, b, a_proj, b_proj) CALL(lddmc_join, a, b, a_proj, b_proj) + +/* Write a DOT representation */ +void lddmc_printdot(MDD mdd); +void lddmc_fprintdot(FILE *out, MDD mdd); + +void lddmc_fprint(FILE *out, MDD mdd); +void lddmc_print(MDD mdd); + +void lddmc_printsha(MDD mdd); +void lddmc_fprintsha(FILE *out, MDD mdd); +void lddmc_getsha(MDD mdd, char *target); // at least 65 bytes... + +/** + * Calculate number of satisfying variable assignments. + * The set of variables must be >= the support of the MDD. + * (i.e. all variables in the MDD must be in variables) + * + * The cached version uses the operation cache, but is limited to 64-bit floating point numbers. + */ + +typedef double lddmc_satcount_double_t; +// if this line below gives an error, modify the above typedef until fixed ;) +typedef char __lddmc_check_float_is_8_bytes[(sizeof(lddmc_satcount_double_t) == sizeof(uint64_t))?1:-1]; + +TASK_DECL_1(lddmc_satcount_double_t, lddmc_satcount_cached, MDD); +#define lddmc_satcount_cached(mdd) CALL(lddmc_satcount_cached, mdd) + +TASK_DECL_1(long double, lddmc_satcount, MDD); +#define lddmc_satcount(mdd) CALL(lddmc_satcount, mdd) + +/** + * A callback for enumerating functions like sat_all_par, collect and match + * Example: + * TASK_3(void*, my_function, uint32_t*, values, size_t, count, void*, context) ... + * For collect, use: + * TASK_3(MDD, ...) + */ +LACE_TYPEDEF_CB(void, lddmc_enum_cb, uint32_t*, size_t, void*); +LACE_TYPEDEF_CB(MDD, lddmc_collect_cb, uint32_t*, size_t, void*); + +VOID_TASK_DECL_5(lddmc_sat_all_par, MDD, lddmc_enum_cb, void*, uint32_t*, size_t); +#define lddmc_sat_all_par(mdd, cb, context) CALL(lddmc_sat_all_par, mdd, cb, context, 0, 0) + +VOID_TASK_DECL_3(lddmc_sat_all_nopar, MDD, lddmc_enum_cb, void*); +#define lddmc_sat_all_nopar(mdd, cb, context) CALL(lddmc_sat_all_nopar, mdd, cb, context) + +TASK_DECL_5(MDD, lddmc_collect, MDD, lddmc_collect_cb, void*, uint32_t*, size_t); +#define lddmc_collect(mdd, cb, context) CALL(lddmc_collect, mdd, cb, context, 0, 0) + +VOID_TASK_DECL_5(lddmc_match_sat_par, MDD, MDD, MDD, lddmc_enum_cb, void*); +#define lddmc_match_sat_par(mdd, match, proj, cb, context) CALL(lddmc_match_sat_par, mdd, match, proj, cb, context) + +int lddmc_sat_one(MDD mdd, uint32_t *values, size_t count); +MDD lddmc_sat_one_mdd(MDD mdd); +#define lddmc_pick_cube lddmc_sat_one_mdd + +/** + * Callback functions for visiting nodes. + * lddmc_visit_seq sequentially visits nodes, down first, then right. + * lddmc_visit_par visits nodes in parallel (down || right) + */ +LACE_TYPEDEF_CB(int, lddmc_visit_pre_cb, MDD, void*); // int pre(MDD, context) +LACE_TYPEDEF_CB(void, lddmc_visit_post_cb, MDD, void*); // void post(MDD, context) +LACE_TYPEDEF_CB(void, lddmc_visit_init_context_cb, void*, void*, int); // void init_context(context, parent, is_down) + +typedef struct lddmc_visit_node_callbacks { + lddmc_visit_pre_cb lddmc_visit_pre; + lddmc_visit_post_cb lddmc_visit_post; + lddmc_visit_init_context_cb lddmc_visit_init_context; +} lddmc_visit_callbacks_t; + +VOID_TASK_DECL_4(lddmc_visit_par, MDD, lddmc_visit_callbacks_t*, size_t, void*); +#define lddmc_visit_par(mdd, cbs, ctx_size, context) CALL(lddmc_visit_par, mdd, cbs, ctx_size, context); + +VOID_TASK_DECL_4(lddmc_visit_seq, MDD, lddmc_visit_callbacks_t*, size_t, void*); +#define lddmc_visit_seq(mdd, cbs, ctx_size, context) CALL(lddmc_visit_seq, mdd, cbs, ctx_size, context); + +size_t lddmc_nodecount(MDD mdd); +void lddmc_nodecount_levels(MDD mdd, size_t *variables); + +/** + * Functional composition + * For every node at depth , call function cb (MDD -> MDD). + * and replace the node by the result of the function + */ +LACE_TYPEDEF_CB(MDD, lddmc_compose_cb, MDD, void*); +TASK_DECL_4(MDD, lddmc_compose, MDD, lddmc_compose_cb, void*, int); +#define lddmc_compose(mdd, cb, context, depth) CALL(lddmc_compose, mdd, cb, context, depth) + +/** + * SAVING: + * use lddmc_serialize_add on every MDD you want to store + * use lddmc_serialize_get to retrieve the key of every stored MDD + * use lddmc_serialize_tofile + * + * LOADING: + * use lddmc_serialize_fromfile (implies lddmc_serialize_reset) + * use lddmc_serialize_get_reversed for every key + * + * MISC: + * use lddmc_serialize_reset to free all allocated structures + * use lddmc_serialize_totext to write a textual list of tuples of all MDDs. + * format: [(,,,,),...] + * + * for the old lddmc_print functions, use lddmc_serialize_totext + */ +size_t lddmc_serialize_add(MDD mdd); +size_t lddmc_serialize_get(MDD mdd); +MDD lddmc_serialize_get_reversed(size_t value); +void lddmc_serialize_reset(); +void lddmc_serialize_totext(FILE *out); +void lddmc_serialize_tofile(FILE *out); +void lddmc_serialize_fromfile(FILE *in); + +/* Infrastructure for internal markings */ +typedef struct lddmc_refs_internal +{ + size_t r_size, r_count; + size_t s_size, s_count; + MDD *results; + Task **spawns; +} *lddmc_refs_internal_t; + +extern DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + +static inline MDD +lddmc_refs_push(MDD ldd) +{ + LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + if (lddmc_refs_key->r_count >= lddmc_refs_key->r_size) { + lddmc_refs_key->r_size *= 2; + lddmc_refs_key->results = (MDD*)realloc(lddmc_refs_key->results, sizeof(MDD) * lddmc_refs_key->r_size); + } + lddmc_refs_key->results[lddmc_refs_key->r_count++] = ldd; + return ldd; +} + +static inline void +lddmc_refs_pop(int amount) +{ + LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + lddmc_refs_key->r_count-=amount; +} + +static inline void +lddmc_refs_spawn(Task *t) +{ + LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + if (lddmc_refs_key->s_count >= lddmc_refs_key->s_size) { + lddmc_refs_key->s_size *= 2; + lddmc_refs_key->spawns = (Task**)realloc(lddmc_refs_key->spawns, sizeof(Task*) * lddmc_refs_key->s_size); + } + lddmc_refs_key->spawns[lddmc_refs_key->s_count++] = t; +} + +static inline MDD +lddmc_refs_sync(MDD result) +{ + LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t); + lddmc_refs_key->s_count--; + return result; +} + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_mtbdd.c b/src/sylvan_mtbdd.c new file mode 100644 index 000000000..a4eb1bd62 --- /dev/null +++ b/src/sylvan_mtbdd.c @@ -0,0 +1,2542 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* Primitives */ +int +mtbdd_isleaf(MTBDD bdd) +{ + if (bdd == mtbdd_true || bdd == mtbdd_false) return 1; + return mtbddnode_isleaf(GETNODE(bdd)); +} + +// for nodes +uint32_t +mtbdd_getvar(MTBDD node) +{ + return mtbddnode_getvariable(GETNODE(node)); +} + +MTBDD +mtbdd_getlow(MTBDD mtbdd) +{ + return node_getlow(mtbdd, GETNODE(mtbdd)); +} + +MTBDD +mtbdd_gethigh(MTBDD mtbdd) +{ + return node_gethigh(mtbdd, GETNODE(mtbdd)); +} + +// for leaves +uint32_t +mtbdd_gettype(MTBDD leaf) +{ + return mtbddnode_gettype(GETNODE(leaf)); +} + +uint64_t +mtbdd_getvalue(MTBDD leaf) +{ + return mtbddnode_getvalue(GETNODE(leaf)); +} + +// for leaf type 0 (integer) +int64_t +mtbdd_getint64(MTBDD leaf) +{ + uint64_t value = mtbdd_getvalue(leaf); + return *(int64_t*)&value; +} + +// for leaf type 1 (double) +double +mtbdd_getdouble(MTBDD leaf) +{ + uint64_t value = mtbdd_getvalue(leaf); + return *(double*)&value; +} + +/** + * Implementation of garbage collection + */ + +/* Recursively mark MDD nodes as 'in use' */ +VOID_TASK_IMPL_1(mtbdd_gc_mark_rec, MDD, mtbdd) +{ + if (mtbdd == mtbdd_true) return; + if (mtbdd == mtbdd_false) return; + + if (llmsset_mark(nodes, mtbdd&(~mtbdd_complement))) { + mtbddnode_t n = GETNODE(mtbdd); + if (!mtbddnode_isleaf(n)) { + SPAWN(mtbdd_gc_mark_rec, mtbddnode_getlow(n)); + CALL(mtbdd_gc_mark_rec, mtbddnode_gethigh(n)); + SYNC(mtbdd_gc_mark_rec); + } + } +} + +/** + * External references + */ + +refs_table_t mtbdd_refs; +refs_table_t mtbdd_protected; +static int mtbdd_protected_created = 0; + +MDD +mtbdd_ref(MDD a) +{ + if (a == mtbdd_true || a == mtbdd_false) return a; + refs_up(&mtbdd_refs, a); + return a; +} + +void +mtbdd_deref(MDD a) +{ + if (a == mtbdd_true || a == mtbdd_false) return; + refs_down(&mtbdd_refs, a); +} + +size_t +mtbdd_count_refs() +{ + return refs_count(&mtbdd_refs); +} + +void +mtbdd_protect(MTBDD *a) +{ + if (!mtbdd_protected_created) { + // In C++, sometimes mtbdd_protect is called before Sylvan is initialized. Just create a table. + protect_create(&mtbdd_protected, 4096); + mtbdd_protected_created = 1; + } + protect_up(&mtbdd_protected, (size_t)a); +} + +void +mtbdd_unprotect(MTBDD *a) +{ + if (mtbdd_protected.refs_table != NULL) protect_down(&mtbdd_protected, (size_t)a); +} + +size_t +mtbdd_count_protected() +{ + return protect_count(&mtbdd_protected); +} + +/* Called during garbage collection */ +VOID_TASK_0(mtbdd_gc_mark_external_refs) +{ + // iterate through refs hash table, mark all found + size_t count=0; + uint64_t *it = refs_iter(&mtbdd_refs, 0, mtbdd_refs.refs_size); + while (it != NULL) { + SPAWN(mtbdd_gc_mark_rec, refs_next(&mtbdd_refs, &it, mtbdd_refs.refs_size)); + count++; + } + while (count--) { + SYNC(mtbdd_gc_mark_rec); + } +} + +VOID_TASK_0(mtbdd_gc_mark_protected) +{ + // iterate through refs hash table, mark all found + size_t count=0; + uint64_t *it = protect_iter(&mtbdd_protected, 0, mtbdd_protected.refs_size); + while (it != NULL) { + BDD *to_mark = (BDD*)protect_next(&mtbdd_protected, &it, mtbdd_protected.refs_size); + SPAWN(mtbdd_gc_mark_rec, *to_mark); + count++; + } + while (count--) { + SYNC(mtbdd_gc_mark_rec); + } +} + +/* Infrastructure for internal markings */ +DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + +VOID_TASK_0(mtbdd_refs_mark_task) +{ + LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + size_t i, j=0; + for (i=0; ir_count; i++) { + if (j >= 40) { + while (j--) SYNC(mtbdd_gc_mark_rec); + j=0; + } + SPAWN(mtbdd_gc_mark_rec, mtbdd_refs_key->results[i]); + j++; + } + for (i=0; is_count; i++) { + Task *t = mtbdd_refs_key->spawns[i]; + if (!TASK_IS_STOLEN(t)) break; + if (TASK_IS_COMPLETED(t)) { + if (j >= 40) { + while (j--) SYNC(mtbdd_gc_mark_rec); + j=0; + } + SPAWN(mtbdd_gc_mark_rec, *(BDD*)TASK_RESULT(t)); + j++; + } + } + while (j--) SYNC(mtbdd_gc_mark_rec); +} + +VOID_TASK_0(mtbdd_refs_mark) +{ + TOGETHER(mtbdd_refs_mark_task); +} + +VOID_TASK_0(mtbdd_refs_init_task) +{ + mtbdd_refs_internal_t s = (mtbdd_refs_internal_t)malloc(sizeof(struct mtbdd_refs_internal)); + s->r_size = 128; + s->r_count = 0; + s->s_size = 128; + s->s_count = 0; + s->results = (BDD*)malloc(sizeof(BDD) * 128); + s->spawns = (Task**)malloc(sizeof(Task*) * 128); + SET_THREAD_LOCAL(mtbdd_refs_key, s); +} + +VOID_TASK_0(mtbdd_refs_init) +{ + INIT_THREAD_LOCAL(mtbdd_refs_key); + TOGETHER(mtbdd_refs_init_task); + sylvan_gc_add_mark(10, TASK(mtbdd_refs_mark)); +} + +/** + * Handling of custom leaves "registry" + */ + +typedef struct +{ + mtbdd_hash_cb hash_cb; + mtbdd_equals_cb equals_cb; + mtbdd_create_cb create_cb; + mtbdd_destroy_cb destroy_cb; +} customleaf_t; + +static customleaf_t *cl_registry; +static size_t cl_registry_count; + +static void +_mtbdd_create_cb(uint64_t *a, uint64_t *b) +{ + // for leaf + if ((*a & 0x4000000000000000) == 0) return; // huh? + uint32_t type = *a & 0xffffffff; + if (type >= cl_registry_count) return; // not in registry + customleaf_t *c = cl_registry + type; + if (c->create_cb == NULL) return; // not in registry + c->create_cb(b); +} + +static void +_mtbdd_destroy_cb(uint64_t a, uint64_t b) +{ + // for leaf + if ((a & 0x4000000000000000) == 0) return; // huh? + uint32_t type = a & 0xffffffff; + if (type >= cl_registry_count) return; // not in registry + customleaf_t *c = cl_registry + type; + if (c->destroy_cb == NULL) return; // not in registry + c->destroy_cb(b); +} + +static uint64_t +_mtbdd_hash_cb(uint64_t a, uint64_t b, uint64_t seed) +{ + // for leaf + if ((a & 0x4000000000000000) == 0) return llmsset_hash(a, b, seed); + uint32_t type = a & 0xffffffff; + if (type >= cl_registry_count) return llmsset_hash(a, b, seed); + customleaf_t *c = cl_registry + type; + if (c->hash_cb == NULL) return llmsset_hash(a, b, seed); + return c->hash_cb(b, seed ^ a); +} + +static int +_mtbdd_equals_cb(uint64_t a, uint64_t b, uint64_t aa, uint64_t bb) +{ + // for leaf + if (a != aa) return 0; + if ((a & 0x4000000000000000) == 0) return b == bb ? 1 : 0; + if ((aa & 0x4000000000000000) == 0) return b == bb ? 1 : 0; + uint32_t type = a & 0xffffffff; + if (type >= cl_registry_count) return b == bb ? 1 : 0; + customleaf_t *c = cl_registry + type; + if (c->equals_cb == NULL) return b == bb ? 1 : 0; + return c->equals_cb(b, bb); +} + +uint32_t +mtbdd_register_custom_leaf(mtbdd_hash_cb hash_cb, mtbdd_equals_cb equals_cb, mtbdd_create_cb create_cb, mtbdd_destroy_cb destroy_cb) +{ + uint32_t type = cl_registry_count; + if (type == 0) type = 3; + if (cl_registry == NULL) { + cl_registry = (customleaf_t *)calloc(sizeof(customleaf_t), (type+1)); + cl_registry_count = type+1; + llmsset_set_custom(nodes, _mtbdd_hash_cb, _mtbdd_equals_cb, _mtbdd_create_cb, _mtbdd_destroy_cb); + } else if (cl_registry_count <= type) { + cl_registry = (customleaf_t *)realloc(cl_registry, sizeof(customleaf_t) * (type+1)); + memset(cl_registry + cl_registry_count, 0, sizeof(customleaf_t) * (type+1-cl_registry_count)); + cl_registry_count = type+1; + } + customleaf_t *c = cl_registry + type; + c->hash_cb = hash_cb; + c->equals_cb = equals_cb; + c->create_cb = create_cb; + c->destroy_cb = destroy_cb; + return type; +} + +/** + * Initialize and quit functions + */ + +static void +mtbdd_quit() +{ + refs_free(&mtbdd_refs); + if (mtbdd_protected_created) { + protect_free(&mtbdd_protected); + mtbdd_protected_created = 0; + } + if (cl_registry != NULL) { + free(cl_registry); + cl_registry = NULL; + cl_registry_count = 0; + } +} + +void +sylvan_init_mtbdd() +{ + sylvan_register_quit(mtbdd_quit); + sylvan_gc_add_mark(10, TASK(mtbdd_gc_mark_external_refs)); + sylvan_gc_add_mark(10, TASK(mtbdd_gc_mark_protected)); + + // Sanity check + if (sizeof(struct mtbddnode) != 16) { + fprintf(stderr, "Invalid size of mtbdd nodes: %ld\n", sizeof(struct mtbddnode)); + exit(1); + } + + refs_create(&mtbdd_refs, 1024); + if (!mtbdd_protected_created) { + protect_create(&mtbdd_protected, 4096); + mtbdd_protected_created = 1; + } + + LACE_ME; + CALL(mtbdd_refs_init); + + cl_registry = NULL; + cl_registry_count = 0; +} + +/** + * Primitives + */ +MTBDD +mtbdd_makeleaf(uint32_t type, uint64_t value) +{ + struct mtbddnode n; + mtbddnode_makeleaf(&n, type, value); + + int custom = type < cl_registry_count && cl_registry[type].hash_cb != NULL ? 1 : 0; + + int created; + uint64_t index = custom ? llmsset_lookupc(nodes, n.a, n.b, &created) : llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + LACE_ME; + + sylvan_gc(); + + index = custom ? llmsset_lookupc(nodes, n.a, n.b, &created) : llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); + exit(1); + } + } + + return (MTBDD)index; +} + +MTBDD +mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high) +{ + if (low == high) return low; + + // Normalization to keep canonicity + // low will have no mark + + struct mtbddnode n; + int mark, created; + + if (MTBDD_HASMARK(low)) { + mark = 1; + low = MTBDD_TOGGLEMARK(low); + high = MTBDD_TOGGLEMARK(high); + } else { + mark = 0; + } + + mtbddnode_makenode(&n, var, low, high); + + MTBDD result; + uint64_t index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + LACE_ME; + + mtbdd_refs_push(low); + mtbdd_refs_push(high); + sylvan_gc(); + mtbdd_refs_pop(2); + + index = llmsset_lookup(nodes, n.a, n.b, &created); + if (index == 0) { + fprintf(stderr, "BDD Unique table full, %zu of %zu buckets filled!\n", llmsset_count_marked(nodes), llmsset_get_size(nodes)); + exit(1); + } + } + + result = index; + return mark ? result | mtbdd_complement : result; +} + +/* Operations */ + +/** + * Calculate greatest common divisor + * Source: http://lemire.me/blog/archives/2013/12/26/fastest-way-to-compute-the-greatest-common-divisor/ + */ +uint32_t +gcd(uint32_t u, uint32_t v) +{ + int shift; + if (u == 0) return v; + if (v == 0) return u; + shift = __builtin_ctz(u | v); + u >>= __builtin_ctz(u); + do { + v >>= __builtin_ctz(v); + if (u > v) { + unsigned int t = v; + v = u; + u = t; + } + v = v - u; + } while (v != 0); + return u << shift; +} + +/** + * Create leaves of unsigned/signed integers and doubles + */ + +MTBDD +mtbdd_int64(int64_t value) +{ + return mtbdd_makeleaf(0, *(uint64_t*)&value); +} + +MTBDD +mtbdd_double(double value) +{ + return mtbdd_makeleaf(1, *(uint64_t*)&value); +} + +MTBDD +mtbdd_fraction(int64_t nom, uint64_t denom) +{ + if (nom == 0) return mtbdd_makeleaf(2, 1); + uint32_t c = gcd(nom < 0 ? -nom : nom, denom); + nom /= c; + denom /= c; + if (nom > 2147483647 || nom < -2147483647 || denom > 4294967295) fprintf(stderr, "mtbdd_fraction: fraction overflow\n"); + return mtbdd_makeleaf(2, (nom<<32)|denom); +} + +/** + * Create the cube of variables in arr. + */ +MTBDD +mtbdd_fromarray(uint32_t* arr, size_t length) +{ + if (length == 0) return mtbdd_true; + else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true); + else return mtbdd_makenode(*arr, mtbdd_false, mtbdd_fromarray(arr+1, length-1)); +} + +/** + * Create a MTBDD cube representing the conjunction of variables in their positive or negative + * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). + * Use cube[idx]==3 for "s=s'" in interleaved variables (matches with next variable) + * is the cube of variables + */ +MTBDD +mtbdd_cube(MTBDD variables, uint8_t *cube, MTBDD terminal) +{ + if (variables == mtbdd_true) return terminal; + mtbddnode_t n = GETNODE(variables); + + BDD result; + switch (*cube) { + case 0: + result = mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); + result = mtbdd_makenode(mtbddnode_getvariable(n), result, mtbdd_false); + return result; + case 1: + result = mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); + result = mtbdd_makenode(mtbddnode_getvariable(n), mtbdd_false, result); + return result; + case 2: + return mtbdd_cube(node_gethigh(variables, n), cube+1, terminal); + case 3: + { + MTBDD variables2 = node_gethigh(variables, n); + mtbddnode_t n2 = GETNODE(variables2); + uint32_t var2 = mtbddnode_getvariable(n2); + result = mtbdd_cube(node_gethigh(variables2, n2), cube+2, terminal); + BDD low = mtbdd_makenode(var2, result, mtbdd_false); + mtbdd_refs_push(low); + BDD high = mtbdd_makenode(var2, mtbdd_false, result); + mtbdd_refs_pop(1); + result = mtbdd_makenode(mtbddnode_getvariable(n), low, high); + return result; + } + default: + return mtbdd_false; // ? + } +} + +/** + * Same as mtbdd_cube, but also performs "or" with existing MTBDD, + * effectively adding an item to the set + */ +TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube, MTBDD, terminal) +{ + /* Terminal cases */ + if (mtbdd == terminal) return terminal; + if (mtbdd == mtbdd_false) return mtbdd_cube(vars, cube, terminal); + if (vars == mtbdd_true) return terminal; + + sylvan_gc_test(); + + mtbddnode_t nv = GETNODE(vars); + uint32_t v = mtbddnode_getvariable(nv); + + mtbddnode_t na = GETNODE(mtbdd); + uint32_t va = mtbddnode_getvariable(na); + + if (va < v) { + MTBDD low = node_getlow(mtbdd, na); + MTBDD high = node_gethigh(mtbdd, na); + SPAWN(mtbdd_union_cube, high, vars, cube, terminal); + BDD new_low = mtbdd_union_cube(low, vars, cube, terminal); + mtbdd_refs_push(new_low); + BDD new_high = SYNC(mtbdd_union_cube); + mtbdd_refs_pop(1); + if (new_low != low || new_high != high) return mtbdd_makenode(va, new_low, new_high); + else return mtbdd; + } else if (va == v) { + MTBDD low = node_getlow(mtbdd, na); + MTBDD high = node_gethigh(mtbdd, na); + switch (*cube) { + case 0: + { + MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal); + if (new_low != low) return mtbdd_makenode(v, new_low, high); + else return mtbdd; + } + case 1: + { + MTBDD new_high = mtbdd_union_cube(high, node_gethigh(vars, nv), cube+1, terminal); + if (new_high != high) return mtbdd_makenode(v, low, new_high); + return mtbdd; + } + case 2: + { + SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal); + MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal); + mtbdd_refs_push(new_low); + MTBDD new_high = SYNC(mtbdd_union_cube); + mtbdd_refs_pop(1); + if (new_low != low || new_high != high) return mtbdd_makenode(v, new_low, new_high); + return mtbdd; + } + case 3: + { + return mtbdd_false; // currently not implemented + } + default: + return mtbdd_false; + } + } else /* va > v */ { + switch (*cube) { + case 0: + { + MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); + return mtbdd_makenode(v, new_low, mtbdd_false); + } + case 1: + { + MTBDD new_high = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); + return mtbdd_makenode(v, mtbdd_false, new_high); + } + case 2: + { + SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal); + MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal); + mtbdd_refs_push(new_low); + MTBDD new_high = SYNC(mtbdd_union_cube); + mtbdd_refs_pop(1); + return mtbdd_makenode(v, new_low, new_high); + } + case 3: + { + return mtbdd_false; // currently not implemented + } + default: + return mtbdd_false; + } + } +} + +/** + * Apply a binary operation to and . + */ +TASK_IMPL_3(MTBDD, mtbdd_apply, MTBDD, a, MTBDD, b, mtbdd_apply_op, op) +{ + /* Check terminal case */ + MTBDD result = WRAP(op, &a, &b); + if (result != mtbdd_invalid) return result; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + if (cache_get3(CACHE_MTBDD_APPLY, a, b, (size_t)op, &result)) return result; + + /* Get top variable */ + int la = mtbdd_isleaf(a); + int lb = mtbdd_isleaf(b); + mtbddnode_t na, nb; + uint32_t va, vb; + if (!la) { + na = GETNODE(a); + va = mtbddnode_getvariable(na); + } else { + na = 0; + va = 0xffffffff; + } + if (!lb) { + nb = GETNODE(b); + vb = mtbddnode_getvariable(nb); + } else { + nb = 0; + vb = 0xffffffff; + } + uint32_t v = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + if (!la && va == v) { + alow = node_getlow(a, na); + ahigh = node_gethigh(a, na); + } else { + alow = a; + ahigh = a; + } + if (!lb && vb == v) { + blow = node_getlow(b, nb); + bhigh = node_gethigh(b, nb); + } else { + blow = b; + bhigh = b; + } + + /* Recursive */ + mtbdd_refs_spawn(SPAWN(mtbdd_apply, ahigh, bhigh, op)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_apply, alow, blow, op)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_apply)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(v, low, high); + + /* Store in cache */ + cache_put3(CACHE_MTBDD_APPLY, a, b, (size_t)op, result); + return result; +} + +/** + * Apply a binary operation to and with parameter

    + */ +TASK_IMPL_5(MTBDD, mtbdd_applyp, MTBDD, a, MTBDD, b, size_t, p, mtbdd_applyp_op, op, uint64_t, opid) +{ + /* Check terminal case */ + MTBDD result = WRAP(op, &a, &b, p); + if (result != mtbdd_invalid) return result; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + if (cache_get3(opid, a, b, p, &result)) return result; + + /* Get top variable */ + int la = mtbdd_isleaf(a); + int lb = mtbdd_isleaf(b); + mtbddnode_t na, nb; + uint32_t va, vb; + if (!la) { + na = GETNODE(a); + va = mtbddnode_getvariable(na); + } else { + na = 0; + va = 0xffffffff; + } + if (!lb) { + nb = GETNODE(b); + vb = mtbddnode_getvariable(nb); + } else { + nb = 0; + vb = 0xffffffff; + } + uint32_t v = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + if (!la && va == v) { + alow = node_getlow(a, na); + ahigh = node_gethigh(a, na); + } else { + alow = a; + ahigh = a; + } + if (!lb && vb == v) { + blow = node_getlow(b, nb); + bhigh = node_gethigh(b, nb); + } else { + blow = b; + bhigh = b; + } + + /* Recursive */ + mtbdd_refs_spawn(SPAWN(mtbdd_applyp, ahigh, bhigh, p, op, opid)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_applyp, alow, blow, p, op, opid)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_applyp)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(v, low, high); + + /* Store in cache */ + cache_put3(opid, a, b, p, result); + return result; +} + +/** + * Apply a unary operation to

    . + */ +TASK_IMPL_3(MTBDD, mtbdd_uapply, MTBDD, dd, mtbdd_uapply_op, op, size_t, param) +{ + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, &result)) return result; + + /* Check terminal case */ + result = WRAP(op, dd, param); + if (result != mtbdd_invalid) { + /* Store in cache */ + cache_put3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, result); + return result; + } + + /* Get cofactors */ + mtbddnode_t ndd = GETNODE(dd); + MTBDD ddlow = node_getlow(dd, ndd); + MTBDD ddhigh = node_gethigh(dd, ndd); + + /* Recursive */ + mtbdd_refs_spawn(SPAWN(mtbdd_uapply, ddhigh, op, param)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_uapply, ddlow, op, param)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_uapply)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(mtbddnode_getvariable(ndd), low, high); + + /* Store in cache */ + cache_put3(CACHE_MTBDD_UAPPLY, dd, (size_t)op, param, result); + return result; +} + +TASK_2(MTBDD, mtbdd_uop_times_uint, MTBDD, a, size_t, k) +{ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + int64_t v = mtbdd_getint64(a); + return mtbdd_int64(v*k); + } else if (mtbddnode_gettype(na) == 1) { + double d = mtbdd_getdouble(a); + return mtbdd_double(d*k); + } else if (mtbddnode_gettype(na) == 2) { + uint64_t v = mtbddnode_getvalue(na); + int64_t n = (int32_t)(v>>32); + uint32_t d = v; + uint32_t c = gcd(d, (uint32_t)k); + return mtbdd_fraction(n*(k/c), d/c); + } + } + + return mtbdd_invalid; +} + +TASK_2(MTBDD, mtbdd_uop_pow_uint, MTBDD, a, size_t, k) +{ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + int64_t v = mtbdd_getint64(a); + return mtbdd_int64(pow(v, k)); + } else if (mtbddnode_gettype(na) == 1) { + double d = mtbdd_getdouble(a); + return mtbdd_double(pow(d, k)); + } else if (mtbddnode_gettype(na) == 2) { + uint64_t v = mtbddnode_getvalue(na); + return mtbdd_fraction(pow((int32_t)(v>>32), k), (uint32_t)v); + } + } + + return mtbdd_invalid; +} + +TASK_IMPL_3(MTBDD, mtbdd_abstract_op_plus, MTBDD, a, MTBDD, b, int, k) +{ + if (k==0) { + return mtbdd_apply(a, b, TASK(mtbdd_op_plus)); + } else { + uint64_t factor = 1ULL< from using the operation + */ +TASK_IMPL_3(MTBDD, mtbdd_abstract, MTBDD, a, MTBDD, v, mtbdd_abstract_op, op) +{ + /* Check terminal case */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + if (v == mtbdd_true) return a; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* a != constant, v != constant */ + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + /* Count number of variables */ + uint64_t k = 0; + while (v != mtbdd_true) { + k++; + v = node_gethigh(v, GETNODE(v)); + } + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, &result)) return result; + + /* Compute result */ + result = WRAP(op, a, a, k); + + /* Store in cache */ + cache_put3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, result); + return result; + } + + /* Possibly skip k variables */ + mtbddnode_t nv = GETNODE(v); + uint32_t var_a = mtbddnode_getvariable(na); + uint32_t var_v = mtbddnode_getvariable(nv); + uint64_t k = 0; + while (var_v < var_a) { + k++; + v = node_gethigh(v, nv); + if (v == mtbdd_true) break; + nv = GETNODE(v); + var_v = mtbddnode_getvariable(nv); + } + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, &result)) return result; + + /* Recursive */ + if (v == mtbdd_true) { + result = a; + } else if (var_a < var_v) { + mtbdd_refs_spawn(SPAWN(mtbdd_abstract, node_gethigh(a, na), v, op)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_abstract, node_getlow(a, na), v, op)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_abstract)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(var_a, low, high); + } else /* var_a == var_v */ { + mtbdd_refs_spawn(SPAWN(mtbdd_abstract, node_gethigh(a, na), node_gethigh(v, nv), op)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_abstract, node_getlow(a, na), node_gethigh(v, nv), op)); + MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_abstract))); + result = WRAP(op, low, high, 0); + mtbdd_refs_pop(2); + } + + if (k) { + mtbdd_refs_push(result); + result = WRAP(op, result, result, k); + mtbdd_refs_pop(1); + } + + /* Store in cache */ + cache_put3(CACHE_MTBDD_ABSTRACT, a, v | (k << 40), (size_t)op, result); + return result; +} + +/** + * Binary operation Plus (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". + */ +TASK_IMPL_2(MTBDD, mtbdd_op_plus, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false) return b; + if (b == mtbdd_false) return a; + + // Handle Boolean MTBDDs: interpret as Or + if (a == mtbdd_true) return mtbdd_true; + if (b == mtbdd_true) return mtbdd_true; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // both integer + return mtbdd_int64(*(int64_t*)(&val_a) + *(int64_t*)(&val_b)); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + return mtbdd_double(*(double*)(&val_a) + *(double*)(&val_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + int64_t nom_a = (int32_t)(val_a>>32); + int64_t nom_b = (int32_t)(val_b>>32); + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // common cases + if (nom_a == 0) return b; + if (nom_b == 0) return a; + // equalize denominators + uint32_t c = gcd(denom_a, denom_b); + nom_a *= denom_b/c; + nom_b *= denom_a/c; + denom_a *= denom_b/c; + // add + return mtbdd_fraction(nom_a + nom_b, denom_a); + } + } + + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Binary operation Minus (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". + */ +TASK_IMPL_2(MTBDD, mtbdd_op_minus, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false) return mtbdd_negate(b); + if (b == mtbdd_false) return a; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // both integer + return mtbdd_int64(*(int64_t*)(&val_a) - *(int64_t*)(&val_b)); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + return mtbdd_double(*(double*)(&val_a) - *(double*)(&val_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + int64_t nom_a = (int32_t)(val_a>>32); + int64_t nom_b = (int32_t)(val_b>>32); + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // common cases + if (nom_b == 0) return a; + // equalize denominators + uint32_t c = gcd(denom_a, denom_b); + nom_a *= denom_b/c; + nom_b *= denom_a/c; + denom_a *= denom_b/c; + // subtract + return mtbdd_fraction(nom_a - nom_b, denom_a); + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Times (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_times, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; + + // Handle Boolean MTBDDs: interpret as And + if (a == mtbdd_true) return b; + if (b == mtbdd_true) return a; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // both integer + return mtbdd_int64(*(int64_t*)(&val_a) * *(int64_t*)(&val_b)); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + return mtbdd_double(*(double*)(&val_a) * *(double*)(&val_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + int64_t nom_a = (int32_t)(val_a>>32); + int64_t nom_b = (int32_t)(val_b>>32); + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // multiply! + uint32_t c = gcd(nom_b < 0 ? -nom_b : nom_b, denom_a); + uint32_t d = gcd(nom_a < 0 ? -nom_a : nom_a, denom_b); + nom_a /= d; + denom_a /= c; + nom_a *= (nom_b/c); + denom_a *= (denom_b/d); + return mtbdd_fraction(nom_a, denom_a); + } + } + + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Binary operation Minimum (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_IMPL_2(MTBDD, mtbdd_op_min, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_true) return b; + if (b == mtbdd_true) return a; + if (a == b) return a; + + // Special case where "false" indicates a partial function + if (a == mtbdd_false && b != mtbdd_false && mtbddnode_isleaf(GETNODE(b))) return b; + if (b == mtbdd_false && a != mtbdd_false && mtbddnode_isleaf(GETNODE(a))) return a; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // both integer + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + return va < vb ? a : b; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double va = *(double*)&val_a; + double vb = *(double*)&val_b; + return va < vb ? a : b; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + int64_t nom_a = (int32_t)(val_a>>32); + int64_t nom_b = (int32_t)(val_b>>32); + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // equalize denominators + uint32_t c = gcd(denom_a, denom_b); + nom_a *= denom_b/c; + nom_b *= denom_a/c; + // compute lowest + return nom_a < nom_b ? a : b; + } + } + + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Binary operation Maximum (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_IMPL_2(MTBDD, mtbdd_op_max, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_true) return a; + if (b == mtbdd_true) return b; + if (a == mtbdd_false) return b; + if (b == mtbdd_false) return a; + if (a == b) return a; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // both integer + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + return va > vb ? a : b; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + return vval_a > vval_b ? a : b; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + int64_t nom_a = (int32_t)(val_a>>32); + int64_t nom_b = (int32_t)(val_b>>32); + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // equalize denominators + uint32_t c = gcd(denom_a, denom_b); + nom_a *= denom_b/c; + nom_b *= denom_a/c; + // compute highest + return nom_a > nom_b ? a : b; + } + } + + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +TASK_IMPL_2(MTBDD, mtbdd_op_negate, MTBDD, a, size_t, k) +{ + // if a is false, then it is a partial function. Keep partial! + if (a == mtbdd_false) return mtbdd_false; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + int64_t v = mtbdd_getint64(a); + return mtbdd_int64(-v); + } else if (mtbddnode_gettype(na) == 1) { + double d = mtbdd_getdouble(a); + return mtbdd_double(-d); + } else if (mtbddnode_gettype(na) == 2) { + uint64_t v = mtbddnode_getvalue(na); + return mtbdd_fraction(-(int32_t)(v>>32), (uint32_t)v); + } + } + + return mtbdd_invalid; + (void)k; // unused variable +} + +/** + * Compute IF THEN ELSE . + * must be a Boolean MTBDD (or standard BDD). + */ +TASK_IMPL_3(MTBDD, mtbdd_ite, MTBDD, f, MTBDD, g, MTBDD, h) +{ + /* Terminal cases */ + if (f == mtbdd_true) return g; + if (f == mtbdd_false) return h; + if (g == h) return g; + if (g == mtbdd_true && h == mtbdd_false) return f; + if (h == mtbdd_true && g == mtbdd_false) return MTBDD_TOGGLEMARK(f); + + // If all MTBDD's are Boolean, then there could be further optimizations (see sylvan_bdd.c) + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_ITE, f, g, h, &result)) return result; + + /* Get top variable */ + int lg = mtbdd_isleaf(g); + int lh = mtbdd_isleaf(h); + mtbddnode_t nf = GETNODE(f); + mtbddnode_t ng = lg ? 0 : GETNODE(g); + mtbddnode_t nh = lh ? 0 : GETNODE(h); + uint32_t vf = mtbddnode_getvariable(nf); + uint32_t vg = lg ? 0 : mtbddnode_getvariable(ng); + uint32_t vh = lh ? 0 : mtbddnode_getvariable(nh); + uint32_t v = vf; + if (!lg && vg < v) v = vg; + if (!lh && vh < v) v = vh; + + /* Get cofactors */ + MTBDD flow, fhigh, glow, ghigh, hlow, hhigh; + flow = (vf == v) ? node_getlow(f, nf) : f; + fhigh = (vf == v) ? node_gethigh(f, nf) : f; + glow = (!lg && vg == v) ? node_getlow(g, ng) : g; + ghigh = (!lg && vg == v) ? node_gethigh(g, ng) : g; + hlow = (!lh && vh == v) ? node_getlow(h, nh) : h; + hhigh = (!lh && vh == v) ? node_gethigh(h, nh) : h; + + /* Recursive calls */ + mtbdd_refs_spawn(SPAWN(mtbdd_ite, fhigh, ghigh, hhigh)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_ite, flow, glow, hlow)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_ite)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(v, low, high); + + /* Store in cache */ + cache_put3(CACHE_MTBDD_ITE, f, g, h, result); + return result; +} + +/** + * Monad that converts double/fraction to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; + */ +TASK_IMPL_2(MTBDD, mtbdd_op_threshold_double, MTBDD, a, size_t, svalue) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_invalid; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + double value = *(double*)&svalue; + if (mtbddnode_gettype(na) == 1) { + return mtbdd_getdouble(a) >= value ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2) { + double d = (double)mtbdd_getnumer(a); + d /= mtbdd_getdenom(a); + return d >= value ? mtbdd_true : mtbdd_false; + } + } + + return mtbdd_invalid; +} + +/** + * Monad that converts double/fraction to a Boolean BDD, translate terminals > value to 1 and to 0 otherwise; + */ +TASK_IMPL_2(MTBDD, mtbdd_op_strict_threshold_double, MTBDD, a, size_t, svalue) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_invalid; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + double value = *(double*)&svalue; + if (mtbddnode_gettype(na) == 1) { + return mtbdd_getdouble(a) > value ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2) { + double d = (double)mtbdd_getnumer(a); + d /= mtbdd_getdenom(a); + return d > value ? mtbdd_true : mtbdd_false; + } + } + + return mtbdd_invalid; +} + +TASK_IMPL_2(MTBDD, mtbdd_threshold_double, MTBDD, dd, double, d) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_threshold_double), *(size_t*)&d); +} + +TASK_IMPL_2(MTBDD, mtbdd_strict_threshold_double, MTBDD, dd, double, d) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_strict_threshold_double), *(size_t*)&d); +} + +/** + * Compare two Double MTBDDs, returns Boolean True if they are equal within some value epsilon + */ +TASK_4(MTBDD, mtbdd_equal_norm_d2, MTBDD, a, MTBDD, b, size_t, svalue, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_true; + if (a == mtbdd_false) return mtbdd_false; + if (b == mtbdd_false) return mtbdd_false; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + // assume Double MTBDD + double va = mtbdd_getdouble(a); + double vb = mtbdd_getdouble(b); + va -= vb; + if (va < 0) va = -va; + return (va < *(double*)&svalue) ? mtbdd_true : mtbdd_false; + } + + if (b < a) { + MTBDD t = a; + a = b; + b = t; + } + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_EQUAL_NORM, a, b, svalue, &result)) return result; + + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_equal_norm_d2, ahigh, bhigh, svalue, shortcircuit); + result = CALL(mtbdd_equal_norm_d2, alow, blow, svalue, shortcircuit); + if (result == mtbdd_false) *shortcircuit = 1; + if (result != SYNC(mtbdd_equal_norm_d2)) result = mtbdd_false; + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_EQUAL_NORM, a, b, svalue, result); + return result; +} + +TASK_IMPL_3(MTBDD, mtbdd_equal_norm_d, MTBDD, a, MTBDD, b, double, d) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_equal_norm_d2, a, b, *(size_t*)&d, &shortcircuit); +} + +/** + * Compare two Double MTBDDs, returns Boolean True if they are equal within some value epsilon + * This version computes the relative difference vs the value in a. + */ +TASK_4(MTBDD, mtbdd_equal_norm_rel_d2, MTBDD, a, MTBDD, b, size_t, svalue, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_true; + if (a == mtbdd_false) return mtbdd_false; + if (b == mtbdd_false) return mtbdd_false; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + // assume Double MTBDD + double va = mtbdd_getdouble(a); + double vb = mtbdd_getdouble(b); + if (va == 0) return mtbdd_false; + va = (va - vb) / va; + if (va < 0) va = -va; + return (va < *(double*)&svalue) ? mtbdd_true : mtbdd_false; + } + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL, a, b, svalue, &result)) return result; + + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_equal_norm_rel_d2, ahigh, bhigh, svalue, shortcircuit); + result = CALL(mtbdd_equal_norm_rel_d2, alow, blow, svalue, shortcircuit); + if (result == mtbdd_false) *shortcircuit = 1; + if (result != SYNC(mtbdd_equal_norm_rel_d2)) result = mtbdd_false; + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_EQUAL_NORM_REL, a, b, svalue, result); + return result; +} + +TASK_IMPL_3(MTBDD, mtbdd_equal_norm_rel_d, MTBDD, a, MTBDD, b, double, d) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_equal_norm_rel_d2, a, b, *(size_t*)&d, &shortcircuit); +} + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) <= b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_3(MTBDD, mtbdd_leq_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_true; + + /* For partial functions, just return true */ + if (a == mtbdd_false) return mtbdd_true; + if (b == mtbdd_false) return mtbdd_true; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_LEQ, a, b, 0, &result)) return result; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + uint64_t va = mtbddnode_getvalue(na); + uint64_t vb = mtbddnode_getvalue(nb); + + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // type 0 = integer + result = *(int64_t*)(&va) <= *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // type 1 = double + double vva = *(double*)&va; + double vvb = *(double*)&vb; + result = vva <= vvb ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // type 2 = fraction + int64_t nom_a = (int32_t)(va>>32); + int64_t nom_b = (int32_t)(vb>>32); + uint64_t da = va&0xffffffff; + uint64_t db = vb&0xffffffff; + // equalize denominators + uint32_t c = gcd(da, db); + nom_a *= db/c; + nom_b *= da/c; + result = nom_a <= nom_b ? mtbdd_true : mtbdd_false; + } + } else { + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_leq_rec, ahigh, bhigh, shortcircuit); + result = CALL(mtbdd_leq_rec, alow, blow, shortcircuit); + if (result != SYNC(mtbdd_leq_rec)) result = mtbdd_false; + } + + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_LEQ, a, b, 0, result); + return result; +} + +TASK_IMPL_2(MTBDD, mtbdd_leq, MTBDD, a, MTBDD, b) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_leq_rec, a, b, &shortcircuit); +} + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) < b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_3(MTBDD, mtbdd_less_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_false; + + /* For partial functions, just return true */ + if (a == mtbdd_false) return mtbdd_true; + if (b == mtbdd_false) return mtbdd_true; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_LESS, a, b, 0, &result)) return result; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + uint64_t va = mtbddnode_getvalue(na); + uint64_t vb = mtbddnode_getvalue(nb); + + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // type 0 = integer + result = *(int64_t*)(&va) < *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // type 1 = double + double vva = *(double*)&va; + double vvb = *(double*)&vb; + result = vva < vvb ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // type 2 = fraction + int64_t nom_a = (int32_t)(va>>32); + int64_t nom_b = (int32_t)(vb>>32); + uint64_t da = va&0xffffffff; + uint64_t db = vb&0xffffffff; + // equalize denominators + uint32_t c = gcd(da, db); + nom_a *= db/c; + nom_b *= da/c; + result = nom_a < nom_b ? mtbdd_true : mtbdd_false; + } + } else { + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_less_rec, ahigh, bhigh, shortcircuit); + result = CALL(mtbdd_less_rec, alow, blow, shortcircuit); + if (result != SYNC(mtbdd_less_rec)) result = mtbdd_false; + } + + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_LESS, a, b, 0, result); + return result; +} + +TASK_IMPL_2(MTBDD, mtbdd_less, MTBDD, a, MTBDD, b) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_less_rec, a, b, &shortcircuit); +} + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) >= b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_3(MTBDD, mtbdd_geq_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_true; + + /* For partial functions, just return true */ + if (a == mtbdd_false) return mtbdd_true; + if (b == mtbdd_false) return mtbdd_true; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_GEQ, a, b, 0, &result)) return result; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + uint64_t va = mtbddnode_getvalue(na); + uint64_t vb = mtbddnode_getvalue(nb); + + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // type 0 = integer + result = *(int64_t*)(&va) >= *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // type 1 = double + double vva = *(double*)&va; + double vvb = *(double*)&vb; + result = vva >= vvb ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // type 2 = fraction + int64_t nom_a = (int32_t)(va>>32); + int64_t nom_b = (int32_t)(vb>>32); + uint64_t da = va&0xffffffff; + uint64_t db = vb&0xffffffff; + // equalize denominators + uint32_t c = gcd(da, db); + nom_a *= db/c; + nom_b *= da/c; + result = nom_a >= nom_b ? mtbdd_true : mtbdd_false; + } + } else { + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_geq_rec, ahigh, bhigh, shortcircuit); + result = CALL(mtbdd_geq_rec, alow, blow, shortcircuit); + if (result != SYNC(mtbdd_geq_rec)) result = mtbdd_false; + } + + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_GEQ, a, b, 0, result); + return result; +} + +TASK_IMPL_2(MTBDD, mtbdd_geq, MTBDD, a, MTBDD, b) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_geq_rec, a, b, &shortcircuit); +} + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) > b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_3(MTBDD, mtbdd_greater_rec, MTBDD, a, MTBDD, b, int*, shortcircuit) +{ + /* Check short circuit */ + if (*shortcircuit) return mtbdd_false; + + /* Check terminal case */ + if (a == b) return mtbdd_false; + + /* For partial functions, just return true */ + if (a == mtbdd_false) return mtbdd_true; + if (b == mtbdd_false) return mtbdd_true; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_GREATER, a, b, 0, &result)) return result; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + int la = mtbddnode_isleaf(na); + int lb = mtbddnode_isleaf(nb); + + if (la && lb) { + uint64_t va = mtbddnode_getvalue(na); + uint64_t vb = mtbddnode_getvalue(nb); + + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + // type 0 = integer + result = *(int64_t*)(&va) > *(int64_t*)(&vb) ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // type 1 = double + double vva = *(double*)&va; + double vvb = *(double*)&vb; + result = vva > vvb ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // type 2 = fraction + int64_t nom_a = (int32_t)(va>>32); + int64_t nom_b = (int32_t)(vb>>32); + uint64_t da = va&0xffffffff; + uint64_t db = vb&0xffffffff; + // equalize denominators + uint32_t c = gcd(da, db); + nom_a *= db/c; + nom_b *= da/c; + result = nom_a > nom_b ? mtbdd_true : mtbdd_false; + } + } else { + /* Get top variable */ + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = va == var ? node_getlow(a, na) : a; + ahigh = va == var ? node_gethigh(a, na) : a; + blow = vb == var ? node_getlow(b, nb) : b; + bhigh = vb == var ? node_gethigh(b, nb) : b; + + SPAWN(mtbdd_greater_rec, ahigh, bhigh, shortcircuit); + result = CALL(mtbdd_greater_rec, alow, blow, shortcircuit); + if (result != SYNC(mtbdd_greater_rec)) result = mtbdd_false; + } + + if (result == mtbdd_false) *shortcircuit = 1; + + /* Store in cache */ + cache_put3(CACHE_MTBDD_GREATER, a, b, 0, result); + return result; +} + +TASK_IMPL_2(MTBDD, mtbdd_greater, MTBDD, a, MTBDD, b) +{ + /* the implementation checks shortcircuit in every task and if the two + MTBDDs are not equal module epsilon, then the computation tree quickly aborts */ + int shortcircuit = 0; + return CALL(mtbdd_greater_rec, a, b, &shortcircuit); +} + +/** + * Multiply and , and abstract variables using summation. + * This is similar to the "and_exists" operation in BDDs. + */ +TASK_IMPL_3(MTBDD, mtbdd_and_exists, MTBDD, a, MTBDD, b, MTBDD, v) +{ + /* Check terminal case */ + if (v == mtbdd_true) return mtbdd_apply(a, b, TASK(mtbdd_op_times)); + MTBDD result = CALL(mtbdd_op_times, &a, &b); + if (result != mtbdd_invalid) { + mtbdd_refs_push(result); + result = mtbdd_abstract(result, v, TASK(mtbdd_abstract_op_plus)); + mtbdd_refs_pop(1); + return result; + } + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + if (cache_get3(CACHE_MTBDD_AND_EXISTS, a, b, v, &result)) return result; + + /* Now, v is not a constant, and either a or b is not a constant */ + + /* Get top variable */ + int la = mtbdd_isleaf(a); + int lb = mtbdd_isleaf(b); + mtbddnode_t na = la ? 0 : GETNODE(a); + mtbddnode_t nb = lb ? 0 : GETNODE(b); + uint32_t va = la ? 0xffffffff : mtbddnode_getvariable(na); + uint32_t vb = lb ? 0xffffffff : mtbddnode_getvariable(nb); + uint32_t var = va < vb ? va : vb; + + mtbddnode_t nv = GETNODE(v); + uint32_t vv = mtbddnode_getvariable(nv); + + if (vv < var) { + /* Recursive, then abstract result */ + result = CALL(mtbdd_and_exists, a, b, node_gethigh(v, nv)); + mtbdd_refs_push(result); + result = mtbdd_apply(result, result, TASK(mtbdd_op_plus)); + mtbdd_refs_pop(1); + } else { + /* Get cofactors */ + MTBDD alow, ahigh, blow, bhigh; + alow = (!la && va == var) ? node_getlow(a, na) : a; + ahigh = (!la && va == var) ? node_gethigh(a, na) : a; + blow = (!lb && vb == var) ? node_getlow(b, nb) : b; + bhigh = (!lb && vb == var) ? node_gethigh(b, nb) : b; + + if (vv == var) { + /* Recursive, then abstract result */ + mtbdd_refs_spawn(SPAWN(mtbdd_and_exists, ahigh, bhigh, node_gethigh(v, nv))); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_and_exists, alow, blow, node_gethigh(v, nv))); + MTBDD high = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_and_exists))); + result = CALL(mtbdd_apply, low, high, TASK(mtbdd_op_plus)); + mtbdd_refs_pop(2); + } else /* vv > v */ { + /* Recursive, then create node */ + mtbdd_refs_spawn(SPAWN(mtbdd_and_exists, ahigh, bhigh, v)); + MTBDD low = mtbdd_refs_push(CALL(mtbdd_and_exists, alow, blow, v)); + MTBDD high = mtbdd_refs_sync(SYNC(mtbdd_and_exists)); + mtbdd_refs_pop(1); + result = mtbdd_makenode(var, low, high); + } + } + + /* Store in cache */ + cache_put3(CACHE_MTBDD_AND_EXISTS, a, b, v, result); + return result; +} + +/** + * Calculate the support of a MTBDD, i.e. the cube of all variables that appear in the MTBDD nodes. + */ +TASK_IMPL_1(MTBDD, mtbdd_support, MTBDD, dd) +{ + /* Terminal case */ + if (mtbdd_isleaf(dd)) return mtbdd_true; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_SUPPORT, dd, 0, 0, &result)) return result; + + /* Recursive calls */ + mtbddnode_t n = GETNODE(dd); + mtbdd_refs_spawn(SPAWN(mtbdd_support, node_getlow(dd, n))); + MTBDD high = mtbdd_refs_push(CALL(mtbdd_support, node_gethigh(dd, n))); + MTBDD low = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_support))); + + /* Compute result */ + result = mtbdd_makenode(mtbddnode_getvariable(n), mtbdd_false, mtbdd_times(low, high)); + mtbdd_refs_pop(2); + + /* Write to cache */ + cache_put3(CACHE_MTBDD_SUPPORT, dd, 0, 0, result); + return result; +} + +/** + * Function composition, for each node with variable which has a pair in , + * replace the node by the result of mtbdd_ite(, , ). + * Each in must be a Boolean MTBDD. + */ +TASK_IMPL_2(MTBDD, mtbdd_compose, MTBDD, a, MTBDDMAP, map) +{ + /* Terminal case */ + if (mtbdd_isleaf(a) || mtbdd_map_isempty(map)) return a; + + /* Determine top level */ + mtbddnode_t n = GETNODE(a); + uint32_t v = mtbddnode_getvariable(n); + + /* Find in map */ + while (mtbdd_map_key(map) < v) { + map = mtbdd_map_next(map); + if (mtbdd_map_isempty(map)) return a; + } + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_COMPOSE, a, map, 0, &result)) return result; + + /* Recursive calls */ + mtbdd_refs_spawn(SPAWN(mtbdd_compose, node_getlow(a, n), map)); + MTBDD high = mtbdd_refs_push(CALL(mtbdd_compose, node_gethigh(a, n), map)); + MTBDD low = mtbdd_refs_push(mtbdd_refs_sync(SYNC(mtbdd_compose))); + + /* Calculate result */ + MTBDD r = mtbdd_map_key(map) == v ? mtbdd_map_value(map) : mtbdd_makenode(v, mtbdd_false, mtbdd_true); + mtbdd_refs_push(r); + result = CALL(mtbdd_ite, r, high, low); + mtbdd_refs_pop(3); + + /* Store in cache */ + cache_put3(CACHE_MTBDD_COMPOSE, a, map, 0, result); + return result; +} + +/** + * Compute minimum leaf in the MTBDD (for Integer, Double, Rational MTBDDs) + */ +TASK_IMPL_1(MTBDD, mtbdd_minimum, MTBDD, a) +{ + /* Check terminal case */ + if (a == mtbdd_false) return mtbdd_false; + mtbddnode_t na = GETNODE(a); + if (mtbddnode_isleaf(na)) return a; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_MINIMUM, a, 0, 0, &result)) return result; + + /* Call recursive */ + SPAWN(mtbdd_minimum, node_getlow(a, na)); + MTBDD high = CALL(mtbdd_minimum, node_gethigh(a, na)); + MTBDD low = SYNC(mtbdd_minimum); + + /* Determine lowest */ + mtbddnode_t nl = GETNODE(low); + mtbddnode_t nh = GETNODE(high); + + if (mtbddnode_gettype(nl) == 0 && mtbddnode_gettype(nh) == 0) { + result = mtbdd_getint64(low) < mtbdd_getint64(high) ? low : high; + } else if (mtbddnode_gettype(nl) == 1 && mtbddnode_gettype(nh) == 1) { + result = mtbdd_getdouble(low) < mtbdd_getdouble(high) ? low : high; + } else if (mtbddnode_gettype(nl) == 2 && mtbddnode_gettype(nh) == 2) { + // type 2 = fraction + int64_t nom_l = mtbdd_getnumer(low); + int64_t nom_h = mtbdd_getnumer(high); + uint64_t denom_l = mtbdd_getdenom(low); + uint64_t denom_h = mtbdd_getdenom(high); + // equalize denominators + uint32_t c = gcd(denom_l, denom_h); + nom_l *= denom_h/c; + nom_h *= denom_l/c; + result = nom_l < nom_h ? low : high; + } + + /* Store in cache */ + cache_put3(CACHE_MTBDD_MINIMUM, a, 0, 0, result); + return result; +} + +/** + * Compute maximum leaf in the MTBDD (for Integer, Double, Rational MTBDDs) + */ +TASK_IMPL_1(MTBDD, mtbdd_maximum, MTBDD, a) +{ + /* Check terminal case */ + if (a == mtbdd_false) return mtbdd_false; + mtbddnode_t na = GETNODE(a); + if (mtbddnode_isleaf(na)) return a; + + /* Maybe perform garbage collection */ + sylvan_gc_test(); + + /* Check cache */ + MTBDD result; + if (cache_get3(CACHE_MTBDD_MAXIMUM, a, 0, 0, &result)) return result; + + /* Call recursive */ + SPAWN(mtbdd_maximum, node_getlow(a, na)); + MTBDD high = CALL(mtbdd_maximum, node_gethigh(a, na)); + MTBDD low = SYNC(mtbdd_maximum); + + /* Determine highest */ + mtbddnode_t nl = GETNODE(low); + mtbddnode_t nh = GETNODE(high); + + if (mtbddnode_gettype(nl) == 0 && mtbddnode_gettype(nh) == 0) { + result = mtbdd_getint64(low) > mtbdd_getint64(high) ? low : high; + } else if (mtbddnode_gettype(nl) == 1 && mtbddnode_gettype(nh) == 1) { + result = mtbdd_getdouble(low) > mtbdd_getdouble(high) ? low : high; + } else if (mtbddnode_gettype(nl) == 2 && mtbddnode_gettype(nh) == 2) { + // type 2 = fraction + int64_t nom_l = mtbdd_getnumer(low); + int64_t nom_h = mtbdd_getnumer(high); + uint64_t denom_l = mtbdd_getdenom(low); + uint64_t denom_h = mtbdd_getdenom(high); + // equalize denominators + uint32_t c = gcd(denom_l, denom_h); + nom_l *= denom_h/c; + nom_h *= denom_l/c; + result = nom_l > nom_h ? low : high; + } + + /* Store in cache */ + cache_put3(CACHE_MTBDD_MAXIMUM, a, 0, 0, result); + return result; +} + +/** + * Calculate the number of satisfying variable assignments according to . + */ +TASK_IMPL_2(double, mtbdd_satcount, MTBDD, dd, size_t, nvars) +{ + /* Trivial cases */ + if (dd == mtbdd_false) return 0.0; + if (mtbdd_isleaf(dd)) return powl(2.0L, nvars); + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + union { + double d; + uint64_t s; + } hack; + + /* Consult cache */ + if (cache_get3(CACHE_BDD_SATCOUNT, dd, 0, nvars, &hack.s)) { + sylvan_stats_count(BDD_SATCOUNT_CACHED); + return hack.d; + } + + SPAWN(mtbdd_satcount, mtbdd_gethigh(dd), nvars-1); + double low = CALL(mtbdd_satcount, mtbdd_getlow(dd), nvars-1); + hack.d = low + SYNC(mtbdd_satcount); + + cache_put3(CACHE_BDD_SATCOUNT, dd, 0, nvars, hack.s); + return hack.d; +} + +MTBDD +mtbdd_enum_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb) +{ + if (dd == mtbdd_false) { + // the leaf dd is skipped + return mtbdd_false; + } else if (mtbdd_isleaf(dd)) { + // a leaf for which the filter returns 0 is skipped + if (filter_cb != NULL && filter_cb(dd) == 0) return mtbdd_false; + // ok, we have a leaf that is not skipped, go for it! + while (variables != mtbdd_true) { + *arr++ = 2; + variables = mtbdd_gethigh(variables); + } + return dd; + } else { + // if variables == true, then dd must be a leaf. But then this line is unreachable. + // if this assertion fails, then is not the support of
    . + assert(variables != mtbdd_true); + + // get next variable from + uint32_t v = mtbdd_getvar(variables); + variables = mtbdd_gethigh(variables); + + // check if MTBDD is on this variable + mtbddnode_t n = GETNODE(dd); + if (mtbddnode_getvariable(n) != v) { + *arr = 2; + return mtbdd_enum_first(dd, variables, arr+1, filter_cb); + } + + // first maybe follow low + MTBDD res = mtbdd_enum_first(node_getlow(dd, n), variables, arr+1, filter_cb); + if (res != mtbdd_false) { + *arr = 0; + return res; + } + + // if not low, try following high + res = mtbdd_enum_first(node_gethigh(dd, n), variables, arr+1, filter_cb); + if (res != mtbdd_false) { + *arr = 1; + return res; + } + + // we've tried low and high, return false + return mtbdd_false; + } +} + +MTBDD +mtbdd_enum_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb) +{ + if (mtbdd_isleaf(dd)) { + // we find the leaf in 'enum_next', then we've seen it before... + return mtbdd_false; + } else { + // if variables == true, then dd must be a leaf. But then this line is unreachable. + // if this assertion fails, then is not the support of
    . + assert(variables != mtbdd_true); + + variables = mtbdd_gethigh(variables); + + if (*arr == 0) { + // previous was low + mtbddnode_t n = GETNODE(dd); + MTBDD res = mtbdd_enum_next(node_getlow(dd, n), variables, arr+1, filter_cb); + if (res != mtbdd_false) { + return res; + } else { + // try to find new in high branch + res = mtbdd_enum_first(node_gethigh(dd, n), variables, arr+1, filter_cb); + if (res != mtbdd_false) { + *arr = 1; + return res; + } else { + return mtbdd_false; + } + } + } else if (*arr == 1) { + // previous was high + mtbddnode_t n = GETNODE(dd); + return mtbdd_enum_next(node_gethigh(dd, n), variables, arr+1, filter_cb); + } else { + // previous was either + return mtbdd_enum_next(dd, variables, arr+1, filter_cb); + } + } +} + +/** + * Helper function for recursive unmarking + */ +static void +mtbdd_unmark_rec(MTBDD mtbdd) +{ + mtbddnode_t n = GETNODE(mtbdd); + if (!mtbddnode_getmark(n)) return; + mtbddnode_setmark(n, 0); + if (mtbddnode_isleaf(n)) return; + mtbdd_unmark_rec(mtbddnode_getlow(n)); + mtbdd_unmark_rec(mtbddnode_gethigh(n)); +} + +/** + * Count number of leaves in MTBDD + */ + +static size_t +mtbdd_leafcount_mark(MTBDD mtbdd) +{ + if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf + if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf + mtbddnode_t n = GETNODE(mtbdd); + if (mtbddnode_getmark(n)) return 0; + mtbddnode_setmark(n, 1); + if (mtbddnode_isleaf(n)) return 1; // count leaf as 1 + return mtbdd_leafcount_mark(mtbddnode_getlow(n)) + mtbdd_leafcount_mark(mtbddnode_gethigh(n)); +} + +size_t +mtbdd_leafcount(MTBDD mtbdd) +{ + size_t result = mtbdd_leafcount_mark(mtbdd); + mtbdd_unmark_rec(mtbdd); + return result; +} + +/** + * Count number of nodes in MTBDD + */ + +static size_t +mtbdd_nodecount_mark(MTBDD mtbdd) +{ + if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf + if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf + mtbddnode_t n = GETNODE(mtbdd); + if (mtbddnode_getmark(n)) return 0; + mtbddnode_setmark(n, 1); + if (mtbddnode_isleaf(n)) return 1; // count leaf as 1 + return 1 + mtbdd_nodecount_mark(mtbddnode_getlow(n)) + mtbdd_nodecount_mark(mtbddnode_gethigh(n)); +} + +size_t +mtbdd_nodecount(MTBDD mtbdd) +{ + size_t result = mtbdd_nodecount_mark(mtbdd); + mtbdd_unmark_rec(mtbdd); + return result; +} + +TASK_2(int, mtbdd_test_isvalid_rec, MTBDD, dd, uint32_t, parent_var) +{ + // check if True/False leaf + if (dd == mtbdd_true || dd == mtbdd_false) return 1; + + // check if index is in array + uint64_t index = dd & (~mtbdd_complement); + assert(index > 1 && index < nodes->table_size); + if (index <= 1 || index >= nodes->table_size) return 0; + + // check if marked + int marked = llmsset_is_marked(nodes, index); + assert(marked); + if (marked == 0) return 0; + + // check if leaf + mtbddnode_t n = GETNODE(dd); + if (mtbddnode_isleaf(n)) return 1; // we're fine + + // check variable order + uint32_t var = mtbddnode_getvariable(n); + assert(var > parent_var); + if (var <= parent_var) return 0; + + // check cache + uint64_t result; + if (cache_get3(CACHE_BDD_ISBDD, dd, 0, 0, &result)) { + return result; + } + + // check recursively + SPAWN(mtbdd_test_isvalid_rec, node_getlow(dd, n), var); + result = (uint64_t)CALL(mtbdd_test_isvalid_rec, node_gethigh(dd, n), var); + if (!SYNC(mtbdd_test_isvalid_rec)) result = 0; + + // put in cache and return result + cache_put3(CACHE_BDD_ISBDD, dd, 0, 0, result); + return result; +} + +TASK_IMPL_1(int, mtbdd_test_isvalid, MTBDD, dd) +{ + // check if True/False leaf + if (dd == mtbdd_true || dd == mtbdd_false) return 1; + + // check if index is in array + uint64_t index = dd & (~mtbdd_complement); + assert(index > 1 && index < nodes->table_size); + if (index <= 1 || index >= nodes->table_size) return 0; + + // check if marked + int marked = llmsset_is_marked(nodes, index); + assert(marked); + if (marked == 0) return 0; + + // check if leaf + mtbddnode_t n = GETNODE(dd); + if (mtbddnode_isleaf(n)) return 1; // we're fine + + // check recursively + uint32_t var = mtbddnode_getvariable(n); + SPAWN(mtbdd_test_isvalid_rec, node_getlow(dd, n), var); + int result = CALL(mtbdd_test_isvalid_rec, node_gethigh(dd, n), var); + if (!SYNC(mtbdd_test_isvalid_rec)) result = 0; + return result; +} + +/** + * Export to .dot file + */ + +static void +mtbdd_fprintdot_rec(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb) +{ + mtbddnode_t n = GETNODE(mtbdd); // also works for mtbdd_false + if (mtbddnode_getmark(n)) return; + mtbddnode_setmark(n, 1); + + if (mtbdd == mtbdd_true || mtbdd == mtbdd_false) { + fprintf(out, "0 [shape=box, style=filled, label=\"F\"];\n"); + } else if (mtbddnode_isleaf(n)) { + uint32_t type = mtbddnode_gettype(n); + uint64_t value = mtbddnode_getvalue(n); + fprintf(out, "%" PRIu64 " [shape=box, style=filled, label=\"", MTBDD_STRIPMARK(mtbdd)); + switch (type) { + case 0: + fprintf(out, "%" PRIu64, value); + break; + case 1: + fprintf(out, "%f", *(double*)&value); + break; + case 2: + fprintf(out, "%u/%u", (uint32_t)(value>>32), (uint32_t)value); + break; + default: + cb(out, type, value); + break; + } + fprintf(out, "\"];\n"); + } else { + fprintf(out, "%" PRIu64 " [label=\"%" PRIu32 "\"];\n", + MTBDD_STRIPMARK(mtbdd), mtbddnode_getvariable(n)); + + mtbdd_fprintdot_rec(out, mtbddnode_getlow(n), cb); + mtbdd_fprintdot_rec(out, mtbddnode_gethigh(n), cb); + + fprintf(out, "%" PRIu64 " -> %" PRIu64 " [style=dashed];\n", + MTBDD_STRIPMARK(mtbdd), mtbddnode_getlow(n)); + fprintf(out, "%" PRIu64 " -> %" PRIu64 " [style=solid dir=both arrowtail=%s];\n", + MTBDD_STRIPMARK(mtbdd), MTBDD_STRIPMARK(mtbddnode_gethigh(n)), + mtbddnode_getcomp(n) ? "dot" : "none"); + } +} + +void +mtbdd_fprintdot(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb) +{ + fprintf(out, "digraph \"DD\" {\n"); + fprintf(out, "graph [dpi = 300];\n"); + fprintf(out, "center = true;\n"); + fprintf(out, "edge [dir = forward];\n"); + fprintf(out, "root [style=invis];\n"); + fprintf(out, "root -> %" PRIu64 " [style=solid dir=both arrowtail=%s];\n", + MTBDD_STRIPMARK(mtbdd), MTBDD_HASMARK(mtbdd) ? "dot" : "none"); + + mtbdd_fprintdot_rec(out, mtbdd, cb); + mtbdd_unmark_rec(mtbdd); + + fprintf(out, "}\n"); +} + +/** + * Return 1 if the map contains the key, 0 otherwise. + */ +int +mtbdd_map_contains(MTBDDMAP map, uint32_t key) +{ + while (!mtbdd_map_isempty(map)) { + mtbddnode_t n = GETNODE(map); + uint32_t k = mtbddnode_getvariable(n); + if (k == key) return 1; + if (k > key) return 0; + map = node_getlow(map, n); + } + + return 0; +} + +/** + * Retrieve the number of keys in the map. + */ +size_t +mtbdd_map_count(MTBDDMAP map) +{ + size_t r = 0; + + while (!mtbdd_map_isempty(map)) { + r++; + map = mtbdd_map_next(map); + } + + return r; +} + +/** + * Add the pair to the map, overwrites if key already in map. + */ +MTBDDMAP +mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value) +{ + if (mtbdd_map_isempty(map)) return mtbdd_makenode(key, mtbdd_map_empty(), value); + + mtbddnode_t n = GETNODE(map); + uint32_t k = mtbddnode_getvariable(n); + + if (k < key) { + // add recursively and rebuild tree + MTBDDMAP low = mtbdd_map_add(node_getlow(map, n), key, value); + return mtbdd_makenode(k, low, node_gethigh(map, n)); + } else if (k > key) { + return mtbdd_makenode(key, map, value); + } else { + // replace old + return mtbdd_makenode(key, node_getlow(map, n), value); + } +} + +/** + * Add all values from map2 to map1, overwrites if key already in map1. + */ +MTBDDMAP +mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2) +{ + if (mtbdd_map_isempty(map1)) return map2; + if (mtbdd_map_isempty(map2)) return map1; + + mtbddnode_t n1 = GETNODE(map1); + mtbddnode_t n2 = GETNODE(map2); + uint32_t k1 = mtbddnode_getvariable(n1); + uint32_t k2 = mtbddnode_getvariable(n2); + + MTBDDMAP result; + if (k1 < k2) { + MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), map2); + result = mtbdd_makenode(k1, low, node_gethigh(map1, n1)); + } else if (k1 > k2) { + MTBDDMAP low = mtbdd_map_addall(map1, node_getlow(map2, n2)); + result = mtbdd_makenode(k2, low, node_gethigh(map2, n2)); + } else { + MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), node_getlow(map2, n2)); + result = mtbdd_makenode(k2, low, node_gethigh(map2, n2)); + } + + return result; +} + +/** + * Remove the key from the map and return the result + */ +MTBDDMAP +mtbdd_map_remove(MTBDDMAP map, uint32_t key) +{ + if (mtbdd_map_isempty(map)) return map; + + mtbddnode_t n = GETNODE(map); + uint32_t k = mtbddnode_getvariable(n); + + if (k < key) { + MTBDDMAP low = mtbdd_map_remove(node_getlow(map, n), key); + return mtbdd_makenode(k, low, node_gethigh(map, n)); + } else if (k > key) { + return map; + } else { + return node_getlow(map, n); + } +} + +/** + * Remove all keys in the cube from the map and return the result + */ +MTBDDMAP +mtbdd_map_removeall(MTBDDMAP map, MTBDD variables) +{ + if (mtbdd_map_isempty(map)) return map; + if (variables == mtbdd_true) return map; + + mtbddnode_t n1 = GETNODE(map); + mtbddnode_t n2 = GETNODE(variables); + uint32_t k1 = mtbddnode_getvariable(n1); + uint32_t k2 = mtbddnode_getvariable(n2); + + if (k1 < k2) { + MTBDDMAP low = mtbdd_map_removeall(node_getlow(map, n1), variables); + return mtbdd_makenode(k1, low, node_gethigh(map, n1)); + } else if (k1 > k2) { + return mtbdd_map_removeall(map, node_gethigh(variables, n2)); + } else { + return mtbdd_map_removeall(node_getlow(map, n1), node_gethigh(variables, n2)); + } +} + +#include "sylvan_mtbdd_storm.c" diff --git a/src/sylvan_mtbdd.h b/src/sylvan_mtbdd.h new file mode 100644 index 000000000..1a7de3f57 --- /dev/null +++ b/src/sylvan_mtbdd.h @@ -0,0 +1,608 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This is an implementation of Multi-Terminal Binary Decision Diagrams. + * They encode functions on Boolean variables to any domain. + * + * Three domains are supported by default: Boolean, Integer and Real. + * Boolean MTBDDs are identical to BDDs (as supported by the bdd subpackage). + * Integer MTBDDs are encoded using "int64_t" terminals. + * Real MTBDDs are encoded using "double" terminals. + * + * Labels of Boolean variables of MTBDD nodes are 24-bit integers. + * + * Custom terminals are supported. + * + * Terminal type "0" is the Integer type, type "1" is the Real type. + * Type "2" is the Fraction type, consisting of two 32-bit integers (numerator and denominator) + * For non-Boolean MTBDDs, mtbdd_false is used for partial functions, i.e. mtbdd_false + * indicates that the function is not defined for a certain input. + */ + +/* Do not include this file directly. Instead, include sylvan.h */ + +#ifndef SYLVAN_MTBDD_H +#define SYLVAN_MTBDD_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * An MTBDD is a 64-bit value. The low 40 bits are an index into the unique table. + * The highest 1 bit is the complement edge, indicating negation. + * For Boolean MTBDDs, this means "not X", for Integer and Real MTBDDs, this means "-X". + */ +typedef uint64_t MTBDD; +typedef MTBDD MTBDDMAP; + +/** + * mtbdd_true is only used in Boolean MTBDDs. mtbdd_false has multiple roles (see above). + */ +#define mtbdd_complement ((MTBDD)0x8000000000000000LL) +#define mtbdd_false ((MTBDD)0) +#define mtbdd_true (mtbdd_false|mtbdd_complement) +#define mtbdd_invalid ((MTBDD)0xffffffffffffffffLL) + +/** + * Initialize MTBDD functionality. + * This initializes internal and external referencing datastructures, + * and registers them in the garbage collection framework. + */ +void sylvan_init_mtbdd(); + +/** + * Create a MTBDD terminal of type and value . + * For custom types, the value could be a pointer to some external struct. + */ +MTBDD mtbdd_makeleaf(uint32_t type, uint64_t value); + +/** + * Create an internal MTBDD node of Boolean variable , with low edge and high edge . + * is a 24-bit integer. + */ +MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high); + +/** + * Returns 1 is the MTBDD is a terminal, or 0 otherwise. + */ +int mtbdd_isleaf(MTBDD mtbdd); +#define mtbdd_isnode(mtbdd) (mtbdd_isleaf(mtbdd) ? 0 : 1) + +/** + * For MTBDD terminals, returns and + */ +uint32_t mtbdd_gettype(MTBDD terminal); +uint64_t mtbdd_getvalue(MTBDD terminal); + +/** + * For internal MTBDD nodes, returns , and + */ +uint32_t mtbdd_getvar(MTBDD node); +MTBDD mtbdd_getlow(MTBDD node); +MTBDD mtbdd_gethigh(MTBDD node); + +/** + * Compute the complement of the MTBDD. + * For Boolean MTBDDs, this means "not X". + */ +#define mtbdd_hascomp(dd) ((dd & mtbdd_complement) ? 1 : 0) +#define mtbdd_comp(dd) (dd ^ mtbdd_complement) +#define mtbdd_not(dd) (dd ^ mtbdd_complement) + +/** + * Create terminals representing int64_t (type 0), double (type 1), or fraction (type 2) values + */ +MTBDD mtbdd_int64(int64_t value); +MTBDD mtbdd_double(double value); +MTBDD mtbdd_fraction(int64_t numer, uint64_t denom); + +/** + * Get the value of a terminal (for Integer, Real and Fraction terminals, types 0, 1 and 2) + */ +int64_t mtbdd_getint64(MTBDD terminal); +double mtbdd_getdouble(MTBDD terminal); +#define mtbdd_getnumer(terminal) ((int32_t)(mtbdd_getvalue(terminal)>>32)) +#define mtbdd_getdenom(terminal) ((uint32_t)(mtbdd_getvalue(terminal)&0xffffffff)) + +/** + * Create the conjunction of variables in arr. + * I.e. arr[0] \and arr[1] \and ... \and arr[length-1] + */ +MTBDD mtbdd_fromarray(uint32_t* arr, size_t length); + +/** + * Create a MTBDD cube representing the conjunction of variables in their positive or negative + * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any). + * Use cube[idx]==3 for "s=s'" in interleaved variables (matches with next variable) + * is the cube of variables (var1 \and var2 \and ... \and varn) + */ +MTBDD mtbdd_cube(MTBDD variables, uint8_t *cube, MTBDD terminal); + +/** + * Same as mtbdd_cube, but extends with the assignment \to . + * If already assigns a value to the cube, the new value is taken. + * Does not support cube[idx]==3. + */ +#define mtbdd_union_cube(mtbdd, variables, cube, terminal) CALL(mtbdd_union_cube, mtbdd, variables, cube, terminal) +TASK_DECL_4(BDD, mtbdd_union_cube, MTBDD, MTBDD, uint8_t*, MTBDD); + +/** + * Count the number of satisfying assignments (minterms) leading to a non-false leaf + */ +TASK_DECL_2(double, mtbdd_satcount, MTBDD, size_t); +#define mtbdd_satcount(dd, nvars) CALL(mtbdd_satcount, dd, nvars) + +/** + * Count the number of MTBDD leaves (excluding mtbdd_false and mtbdd_true) in the MTBDD + */ +size_t mtbdd_leafcount(MTBDD mtbdd); + +/** + * Count the number of MTBDD nodes and terminals (excluding mtbdd_false and mtbdd_true) in a MTBDD + */ +size_t mtbdd_nodecount(MTBDD mtbdd); + +/** + * Callback function types for binary ("dyadic") and unary ("monadic") operations. + * The callback function returns either the MTBDD that is the result of applying op to the MTBDDs, + * or mtbdd_invalid if op cannot be applied. + * The binary function may swap the two parameters (if commutative) to improve caching. + * The unary function is allowed an extra parameter (be careful of caching) + */ +LACE_TYPEDEF_CB(MTBDD, mtbdd_apply_op, MTBDD*, MTBDD*); +LACE_TYPEDEF_CB(MTBDD, mtbdd_applyp_op, MTBDD*, MTBDD*, size_t); +LACE_TYPEDEF_CB(MTBDD, mtbdd_uapply_op, MTBDD, size_t); + +/** + * Apply a binary operation to and . + * Callback is consulted before the cache, thus the application to terminals is not cached. + */ +TASK_DECL_3(MTBDD, mtbdd_apply, MTBDD, MTBDD, mtbdd_apply_op); +#define mtbdd_apply(a, b, op) CALL(mtbdd_apply, a, b, op) + +/** + * Apply a binary operation with id to and with parameter

    + * Callback is consulted before the cache, thus the application to terminals is not cached. + */ +TASK_DECL_5(MTBDD, mtbdd_applyp, MTBDD, MTBDD, size_t, mtbdd_applyp_op, uint64_t); +#define mtbdd_applyp(a, b, p, op, opid) CALL(mtbdd_applyp, a, b, p, op, opid) + +/** + * Apply a unary operation to

    . + * Callback is consulted after the cache, thus the application to a terminal is cached. + */ +TASK_DECL_3(MTBDD, mtbdd_uapply, MTBDD, mtbdd_uapply_op, size_t); +#define mtbdd_uapply(dd, op, param) CALL(mtbdd_uapply, dd, op, param) + +/** + * Callback function types for abstraction. + * MTBDD mtbdd_abstract_op(MTBDD a, MTBDD b, int k). + * The function is either called with k==0 (apply to two arguments) or k>0 (k skipped BDD variables) + * k == 0 => res := apply op to a and b + * k > 0 => res := apply op to op(a, a, k-1) and op(a, a, k-1) + */ +LACE_TYPEDEF_CB(MTBDD, mtbdd_abstract_op, MTBDD, MTBDD, int); + +/** + * Abstract the variables in from using the binary operation . + */ +TASK_DECL_3(MTBDD, mtbdd_abstract, MTBDD, MTBDD, mtbdd_abstract_op); +#define mtbdd_abstract(a, v, op) CALL(mtbdd_abstract, a, v, op) + +/** + * Unary operation Negate. + * Supported domains: Integer, Real, Fraction + */ +TASK_DECL_2(MTBDD, mtbdd_op_negate, MTBDD, size_t); + +/** + * Binary operation Plus (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". + */ +TASK_DECL_2(MTBDD, mtbdd_op_plus, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, mtbdd_abstract_op_plus, MTBDD, MTBDD, int); + +/** + * Binary operation Minus (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDDs, mtbdd_false is interpreted as "0" or "0.0". + */ +TASK_DECL_2(MTBDD, mtbdd_op_minus, MTBDD*, MTBDD*); + +/** + * Binary operation Times (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_DECL_2(MTBDD, mtbdd_op_times, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, mtbdd_abstract_op_times, MTBDD, MTBDD, int); + +/** + * Binary operation Minimum (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_min, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, mtbdd_abstract_op_min, MTBDD, MTBDD, int); + +/** + * Binary operation Maximum (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_max, MTBDD*, MTBDD*); +TASK_DECL_3(MTBDD, mtbdd_abstract_op_max, MTBDD, MTBDD, int); + +/** + * Compute -a + */ +#define mtbdd_negate(a) mtbdd_uapply(a, TASK(mtbdd_op_negate), 0) + +/** + * Compute a + b + */ +#define mtbdd_plus(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_plus)) + +/** + * Compute a - b + */ +#define mtbdd_minus(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_minus)) + +/** + * Compute a * b + */ +#define mtbdd_times(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_times)) + +/** + * Compute min(a, b) + */ +#define mtbdd_min(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_min)) + +/** + * Compute max(a, b) + */ +#define mtbdd_max(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_max)) + +/** + * Abstract the variables in from by taking the sum of all values + */ +#define mtbdd_abstract_plus(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_plus)) + +/** + * Abstract the variables in from by taking the product of all values + */ +#define mtbdd_abstract_times(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_times)) + +/** + * Abstract the variables in from by taking the minimum of all values + */ +#define mtbdd_abstract_min(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_min)) + +/** + * Abstract the variables in from by taking the maximum of all values + */ +#define mtbdd_abstract_max(dd, v) mtbdd_abstract(dd, v, TASK(mtbdd_abstract_op_max)) + +/** + * Compute IF THEN ELSE . + * must be a Boolean MTBDD (or standard BDD). + */ +TASK_DECL_3(MTBDD, mtbdd_ite, MTBDD, MTBDD, MTBDD); +#define mtbdd_ite(f, g, h) CALL(mtbdd_ite, f, g, h); + +/** + * Multiply and , and abstract variables using summation. + * This is similar to the "and_exists" operation in BDDs. + */ +TASK_DECL_3(MTBDD, mtbdd_and_exists, MTBDD, MTBDD, MTBDD); +#define mtbdd_and_exists(a, b, vars) CALL(mtbdd_and_exists, a, b, vars) + +/** + * Monad that converts double to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_op_threshold_double, MTBDD, size_t) + +/** + * Monad that converts double to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_op_strict_threshold_double, MTBDD, size_t) + +/** + * Convert double to a Boolean MTBDD, translate terminals >= value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_threshold_double, MTBDD, double); +#define mtbdd_threshold_double(dd, value) CALL(mtbdd_threshold_double, dd, value) + +/** + * Convert double to a Boolean MTBDD, translate terminals > value to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_strict_threshold_double, MTBDD, double); +#define mtbdd_strict_threshold_double(dd, value) CALL(mtbdd_strict_threshold_double, dd, value) + +/** + * For two Double MTBDDs, calculate whether they are equal module some value epsilon + * i.e. abs(a-b) < e + */ +TASK_DECL_3(MTBDD, mtbdd_equal_norm_d, MTBDD, MTBDD, double); +#define mtbdd_equal_norm_d(a, b, epsilon) CALL(mtbdd_equal_norm_d, a, b, epsilon) + +/** + * For two Double MTBDDs, calculate whether they are equal modulo some value epsilon + * This version computes the relative difference vs the value in a. + * i.e. abs((a-b)/a) < e + */ +TASK_DECL_3(MTBDD, mtbdd_equal_norm_rel_d, MTBDD, MTBDD, double); +#define mtbdd_equal_norm_rel_d(a, b, epsilon) CALL(mtbdd_equal_norm_rel_d, a, b, epsilon) + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) <= b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_DECL_2(MTBDD, mtbdd_leq, MTBDD, MTBDD); +#define mtbdd_leq(a, b) CALL(mtbdd_leq, a, b) + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) < b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_DECL_2(MTBDD, mtbdd_less, MTBDD, MTBDD); +#define mtbdd_less(a, b) CALL(mtbdd_less, a, b) + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) >= b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_DECL_2(MTBDD, mtbdd_geq, MTBDD, MTBDD); +#define mtbdd_geq(a, b) CALL(mtbdd_geq, a, b) + +/** + * For two MTBDDs a, b, return mtbdd_true if all common assignments a(s) > b(s), mtbdd_false otherwise. + * For domains not in a / b, assume True. + */ +TASK_DECL_2(MTBDD, mtbdd_greater, MTBDD, MTBDD); +#define mtbdd_greater(a, b) CALL(mtbdd_greater, a, b) + +/** + * Calculate the support of a MTBDD, i.e. the cube of all variables that appear in the MTBDD nodes. + */ +TASK_DECL_1(MTBDD, mtbdd_support, MTBDD); +#define mtbdd_support(dd) CALL(mtbdd_support, dd) + +/** + * Function composition, for each node with variable which has a pair in , + * replace the node by the result of mtbdd_ite(, , ). + * Each in must be a Boolean MTBDD. + */ +TASK_DECL_2(MTBDD, mtbdd_compose, MTBDD, MTBDDMAP); +#define mtbdd_compose(dd, map) CALL(mtbdd_compose, dd, map) + +/** + * Compute minimal leaf in the MTBDD (for Integer, Double, Rational MTBDDs) + */ +TASK_DECL_1(MTBDD, mtbdd_minimum, MTBDD); +#define mtbdd_minimum(dd) CALL(mtbdd_minimum, dd) + +/** + * Compute maximal leaf in the MTBDD (for Integer, Double, Rational MTBDDs) + */ +TASK_DECL_1(MTBDD, mtbdd_maximum, MTBDD); +#define mtbdd_maximum(dd) CALL(mtbdd_maximum, dd) + +/** + * Given a MTBDD
    and a cube of variables expected in
    , + * mtbdd_enum_first and mtbdd_enum_next enumerates the unique paths in
    that lead to a non-False leaf. + * + * The function returns the leaf (or mtbdd_false if no new path is found) and encodes the path + * in the supplied array : 0 for a low edge, 1 for a high edge, and 2 if the variable is skipped. + * + * The supplied array must be large enough for all variables in . + * + * Usage: + * MTBDD leaf = mtbdd_enum_first(dd, variables, arr, NULL); + * while (leaf != mtbdd_false) { + * .... // do something with arr/leaf + * leaf = mtbdd_enum_next(dd, variables, arr, NULL); + * } + * + * The callback is an optional function that returns 0 when the given terminal node should be skipped. + */ +typedef int (*mtbdd_enum_filter_cb)(MTBDD); +MTBDD mtbdd_enum_first(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb); +MTBDD mtbdd_enum_next(MTBDD dd, MTBDD variables, uint8_t *arr, mtbdd_enum_filter_cb filter_cb); + +/** + * For debugging. + * Tests if all nodes in the MTBDD are correctly ``marked'' in the nodes table. + * Tests if variables in the internal nodes appear in-order. + * In Debug mode, this will cause assertion failures instead of returning 0. + * Returns 1 if all is fine, or 0 otherwise. + */ +TASK_DECL_1(int, mtbdd_test_isvalid, MTBDD); +#define mtbdd_test_isvalid(mtbdd) CALL(mtbdd_test_isvalid, mtbdd) + +/** + * Write a DOT representation of a MTBDD + * The callback function is required for custom terminals. + */ +typedef void (*print_terminal_label_cb)(FILE *out, uint32_t type, uint64_t value); +void mtbdd_fprintdot(FILE *out, MTBDD mtbdd, print_terminal_label_cb cb); +#define mtbdd_printdot(mtbdd, cb) mtbdd_fprintdot(stdout, mtbdd, cb) + +/** + * MTBDDMAP, maps uint32_t variables to MTBDDs. + * A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD + */ +#define mtbdd_map_empty() mtbdd_false +#define mtbdd_map_isempty(map) (map == mtbdd_false ? 1 : 0) +#define mtbdd_map_key(map) mtbdd_getvar(map) +#define mtbdd_map_value(map) mtbdd_gethigh(map) +#define mtbdd_map_next(map) mtbdd_getlow(map) + +/** + * Return 1 if the map contains the key, 0 otherwise. + */ +int mtbdd_map_contains(MTBDDMAP map, uint32_t key); + +/** + * Retrieve the number of keys in the map. + */ +size_t mtbdd_map_count(MTBDDMAP map); + +/** + * Add the pair to the map, overwrites if key already in map. + */ +MTBDDMAP mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value); + +/** + * Add all values from map2 to map1, overwrites if key already in map1. + */ +MTBDDMAP mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2); + +/** + * Remove the key from the map and return the result + */ +MTBDDMAP mtbdd_map_remove(MTBDDMAP map, uint32_t key); + +/** + * Remove all keys in the cube from the map and return the result + */ +MTBDDMAP mtbdd_map_removeall(MTBDDMAP map, MTBDD variables); + +/** + * Custom node types + * Overrides standard hash/equality/notify_on_dead behavior + * hash(value, seed) return hash version + * equals(value1, value2) return 1 if equal, 0 if not equal + * create(&value) replace value by new value for object allocation + * destroy(value) + * NOTE: equals(value1, value2) must imply: hash(value1, seed) == hash(value2,seed) + * NOTE: new value of create must imply: equals(old, new) + */ +typedef uint64_t (*mtbdd_hash_cb)(uint64_t, uint64_t); +typedef int (*mtbdd_equals_cb)(uint64_t, uint64_t); +typedef void (*mtbdd_create_cb)(uint64_t*); +typedef void (*mtbdd_destroy_cb)(uint64_t); + +/** + * Registry callback handlers for . + */ +uint32_t mtbdd_register_custom_leaf(mtbdd_hash_cb hash_cb, mtbdd_equals_cb equals_cb, mtbdd_create_cb create_cb, mtbdd_destroy_cb destroy_cb); + +/** + * Garbage collection + * Sylvan supplies two default methods to handle references to nodes, but the user + * is encouraged to implement custom handling. Simply add a handler using sylvan_gc_add_mark + * and let the handler call mtbdd_gc_mark_rec for every MTBDD that should be saved + * during garbage collection. + */ + +/** + * Call mtbdd_gc_mark_rec for every mtbdd you want to keep in your custom mark functions. + */ +VOID_TASK_DECL_1(mtbdd_gc_mark_rec, MTBDD); +#define mtbdd_gc_mark_rec(mtbdd) CALL(mtbdd_gc_mark_rec, mtbdd) + +/** + * Default external referencing. During garbage collection, MTBDDs marked with mtbdd_ref will + * be kept in the forest. + * It is recommended to prefer mtbdd_protect and mtbdd_unprotect. + */ +MTBDD mtbdd_ref(MTBDD a); +void mtbdd_deref(MTBDD a); +size_t mtbdd_count_refs(); + +/** + * Default external pointer referencing. During garbage collection, the pointers are followed and the MTBDD + * that they refer to are kept in the forest. + */ +void mtbdd_protect(MTBDD* ptr); +void mtbdd_unprotect(MTBDD* ptr); +size_t mtbdd_count_protected(); + +/** + * If sylvan_set_ondead is set to a callback, then this function marks MTBDDs (terminals). + * When they are dead after the mark phase in garbage collection, the callback is called for marked MTBDDs. + * The ondead callback can either perform cleanup or resurrect dead terminals. + */ +#define mtbdd_notify_ondead(dd) llmsset_notify_ondead(nodes, dd&~mtbdd_complement) + +/** + * Infrastructure for internal references (per-thread, e.g. during MTBDD operations) + * Use mtbdd_refs_push and mtbdd_refs_pop to put MTBDDs on a thread-local reference stack. + * Use mtbdd_refs_spawn and mtbdd_refs_sync around SPAWN and SYNC operations when the result + * of the spawned Task is a MTBDD that must be kept during garbage collection. + */ +typedef struct mtbdd_refs_internal +{ + size_t r_size, r_count; + size_t s_size, s_count; + MTBDD *results; + Task **spawns; +} *mtbdd_refs_internal_t; + +extern DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + +static inline MTBDD +mtbdd_refs_push(MTBDD mtbdd) +{ + LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + if (mtbdd_refs_key->r_count >= mtbdd_refs_key->r_size) { + mtbdd_refs_key->r_size *= 2; + mtbdd_refs_key->results = (MTBDD*)realloc(mtbdd_refs_key->results, sizeof(MTBDD) * mtbdd_refs_key->r_size); + } + mtbdd_refs_key->results[mtbdd_refs_key->r_count++] = mtbdd; + return mtbdd; +} + +static inline void +mtbdd_refs_pop(int amount) +{ + LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + mtbdd_refs_key->r_count-=amount; +} + +static inline void +mtbdd_refs_spawn(Task *t) +{ + LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + if (mtbdd_refs_key->s_count >= mtbdd_refs_key->s_size) { + mtbdd_refs_key->s_size *= 2; + mtbdd_refs_key->spawns = (Task**)realloc(mtbdd_refs_key->spawns, sizeof(Task*) * mtbdd_refs_key->s_size); + } + mtbdd_refs_key->spawns[mtbdd_refs_key->s_count++] = t; +} + +static inline MTBDD +mtbdd_refs_sync(MTBDD result) +{ + LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t); + mtbdd_refs_key->s_count--; + return result; +} + +#include "sylvan_mtbdd_storm.h" + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/src/sylvan_mtbdd_int.h b/src/sylvan_mtbdd_int.h new file mode 100644 index 000000000..940250b9a --- /dev/null +++ b/src/sylvan_mtbdd_int.h @@ -0,0 +1,128 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Internals for MTBDDs + */ + +#ifndef SYLVAN_MTBDD_INT_H +#define SYLVAN_MTBDD_INT_H + +/** + * MTBDD node structure + */ +typedef struct __attribute__((packed)) mtbddnode { + uint64_t a, b; +} * mtbddnode_t; // 16 bytes + +#define GETNODE(mtbdd) ((mtbddnode_t)llmsset_index_to_ptr(nodes, mtbdd&0x000000ffffffffff)) + +/** + * Complement handling macros + */ +#define MTBDD_HASMARK(s) (s&mtbdd_complement?1:0) +#define MTBDD_TOGGLEMARK(s) (s^mtbdd_complement) +#define MTBDD_STRIPMARK(s) (s&~mtbdd_complement) +#define MTBDD_TRANSFERMARK(from, to) (to ^ (from & mtbdd_complement)) +// Equal under mark +#define MTBDD_EQUALM(a, b) ((((a)^(b))&(~mtbdd_complement))==0) + +// Leaf: a = L=1, M, type; b = value +// Node: a = L=0, C, M, high; b = variable, low +// Only complement edge on "high" + +static inline int +mtbddnode_isleaf(mtbddnode_t n) +{ + return n->a & 0x4000000000000000 ? 1 : 0; +} + +static inline uint32_t +mtbddnode_gettype(mtbddnode_t n) +{ + return n->a & 0x00000000ffffffff; +} + +static inline uint64_t +mtbddnode_getvalue(mtbddnode_t n) +{ + return n->b; +} + +static inline int +mtbddnode_getcomp(mtbddnode_t n) +{ + return n->a & 0x8000000000000000 ? 1 : 0; +} + +static inline uint64_t +mtbddnode_getlow(mtbddnode_t n) +{ + return n->b & 0x000000ffffffffff; // 40 bits +} + +static inline uint64_t +mtbddnode_gethigh(mtbddnode_t n) +{ + return n->a & 0x800000ffffffffff; // 40 bits plus high bit of first +} + +static inline uint32_t +mtbddnode_getvariable(mtbddnode_t n) +{ + return (uint32_t)(n->b >> 40); +} + +static inline int +mtbddnode_getmark(mtbddnode_t n) +{ + return n->a & 0x2000000000000000 ? 1 : 0; +} + +static inline void +mtbddnode_setmark(mtbddnode_t n, int mark) +{ + if (mark) n->a |= 0x2000000000000000; + else n->a &= 0xdfffffffffffffff; +} + +static inline void +mtbddnode_makeleaf(mtbddnode_t n, uint32_t type, uint64_t value) +{ + n->a = 0x4000000000000000 | (uint64_t)type; + n->b = value; +} + +static inline void +mtbddnode_makenode(mtbddnode_t n, uint32_t var, uint64_t low, uint64_t high) +{ + n->a = high; + n->b = ((uint64_t)var)<<40 | low; +} + +static MTBDD +node_getlow(MTBDD mtbdd, mtbddnode_t node) +{ + return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_getlow(node)); +} + +static MTBDD +node_gethigh(MTBDD mtbdd, mtbddnode_t node) +{ + return MTBDD_TRANSFERMARK(mtbdd, mtbddnode_gethigh(node)); +} + +#endif diff --git a/src/sylvan_mtbdd_storm.c b/src/sylvan_mtbdd_storm.c new file mode 100644 index 000000000..dab4860c0 --- /dev/null +++ b/src/sylvan_mtbdd_storm.c @@ -0,0 +1,514 @@ +/** + * Generate SHA2 structural hashes. + * Hashes are independent of location. + * Mainly useful for debugging purposes. + */ +static void +mtbdd_sha2_rec(MTBDD mtbdd, SHA256_CTX *ctx) +{ + if (mtbdd == sylvan_true || mtbdd == sylvan_false) { + SHA256_Update(ctx, (void*)&mtbdd, sizeof(MTBDD)); + return; + } + + mtbddnode_t node = GETNODE(mtbdd); + if (mtbddnode_isleaf(node)) { + uint64_t val = mtbddnode_getvalue(node); + SHA256_Update(ctx, (void*)&val, sizeof(uint64_t)); + } else if (mtbddnode_getmark(node) == 0) { + mtbddnode_setmark(node, 1); + uint32_t level = mtbddnode_getvariable(node); + if (MTBDD_STRIPMARK(mtbddnode_gethigh(node))) level |= 0x80000000; + SHA256_Update(ctx, (void*)&level, sizeof(uint32_t)); + mtbdd_sha2_rec(mtbddnode_gethigh(node), ctx); + mtbdd_sha2_rec(mtbddnode_getlow(node), ctx); + } +} + +void +mtbdd_getsha(MTBDD mtbdd, char *target) +{ + SHA256_CTX ctx; + SHA256_Init(&ctx); + mtbdd_sha2_rec(mtbdd, &ctx); + if (mtbdd != sylvan_true && mtbdd != sylvan_false) mtbdd_unmark_rec(mtbdd); + SHA256_End(&ctx, target); +} + +/** + * Binary operation Times (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Integer or Double. + * If either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_divide, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false; + + // Do not handle Boolean MTBDDs... + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + + if (va == 0) return a; + else if (vb == 0) return b; + else { + MTBDD result; + if (va == 1) result = b; + else if (vb == 1) result = a; + else result = mtbdd_int64(va*vb); + return result; + } + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + if (vval_a == 0.0) return a; + else if (vval_b == 0.0) return b; + else { + MTBDD result; + if (vval_a == 0.0 || vval_b == 1.0) result = a; + result = mtbdd_double(vval_a / vval_b); + return result; + } + } + else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + uint64_t nom_a = val_a>>32; + uint64_t nom_b = val_b>>32; + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + // multiply! + uint32_t c = gcd(denom_b, denom_a); + uint32_t d = gcd(nom_a, nom_b); + nom_a /= d; + denom_a /= c; + nom_a *= (denom_b/c); + denom_a *= (nom_b/d); + // compute result + MTBDD result = mtbdd_fraction(nom_a, denom_a); + return result; + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Equals (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_equals, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; + if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + if (va == vb) return mtbdd_true; + return mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + if (vval_a == vval_b) return mtbdd_true; + return mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + uint64_t nom_a = val_a>>32; + uint64_t nom_b = val_b>>32; + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + if (nom_a == nom_b && denom_a == denom_b) return mtbdd_true; + return mtbdd_false; + } + } + + if (a < b) { + *pa = b; + *pb = a; + } + + return mtbdd_invalid; +} + +/** + * Binary operation Equals (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_less, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; + if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + if (va < vb) return mtbdd_true; + return mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + if (vval_a < vval_b) return mtbdd_true; + return mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + uint64_t nom_a = val_a>>32; + uint64_t nom_b = val_b>>32; + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + return nom_a * denom_b < nom_b * denom_a ? mtbdd_true : mtbdd_false; + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Equals (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_less_or_equal, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + if (a == mtbdd_false && b == mtbdd_false) return mtbdd_true; + if (a == mtbdd_true && b == mtbdd_true) return mtbdd_true; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + int64_t va = *(int64_t*)(&val_a); + int64_t vb = *(int64_t*)(&val_b); + return va <= vb ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + if (vval_a <= vval_b) return mtbdd_true; + return mtbdd_false; + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + // both fraction + uint64_t nom_a = val_a>>32; + uint64_t nom_b = val_b>>32; + uint64_t denom_a = val_a&0xffffffff; + uint64_t denom_b = val_b&0xffffffff; + nom_a *= denom_b; + nom_b *= denom_a; + return nom_a <= nom_b ? mtbdd_true : mtbdd_false; + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Pow (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_pow, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + assert(0); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + return mtbdd_double(pow(vval_a, vval_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + assert(0); + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Mod (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_mod, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + assert(0); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + return mtbdd_double(fmod(vval_a, vval_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + assert(0); + } + } + + return mtbdd_invalid; +} + +/** + * Binary operation Log (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_IMPL_2(MTBDD, mtbdd_op_logxy, MTBDD*, pa, MTBDD*, pb) +{ + MTBDD a = *pa, b = *pb; + + mtbddnode_t na = GETNODE(a); + mtbddnode_t nb = GETNODE(b); + + if (mtbddnode_isleaf(na) && mtbddnode_isleaf(nb)) { + uint64_t val_a = mtbddnode_getvalue(na); + uint64_t val_b = mtbddnode_getvalue(nb); + if (mtbddnode_gettype(na) == 0 && mtbddnode_gettype(nb) == 0) { + assert(0); + } else if (mtbddnode_gettype(na) == 1 && mtbddnode_gettype(nb) == 1) { + // both double + double vval_a = *(double*)&val_a; + double vval_b = *(double*)&val_b; + return mtbdd_double(log(vval_a) / log(vval_b)); + } else if (mtbddnode_gettype(na) == 2 && mtbddnode_gettype(nb) == 2) { + assert(0); + } + } + + return mtbdd_invalid; +} + +TASK_IMPL_2(MTBDD, mtbdd_op_not_zero, MTBDD, a, size_t, v) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + return mtbdd_getint64(a) != 0 ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 1) { + return mtbdd_getdouble(a) != 0.0 ? mtbdd_true : mtbdd_false; + } else if (mtbddnode_gettype(na) == 2) { + return mtbdd_getnumer(a) != 0 ? mtbdd_true : mtbdd_false; + } + } + + // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). + (void)v; + + return mtbdd_invalid; +} + +TASK_IMPL_1(MTBDD, mtbdd_not_zero, MTBDD, dd) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_not_zero), 0); +} + +TASK_IMPL_2(MTBDD, mtbdd_op_floor, MTBDD, a, size_t, v) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + return a; + } else if (mtbddnode_gettype(na) == 1) { + MTBDD result = mtbdd_double(floor(mtbdd_getdouble(a))); + return result; + } else if (mtbddnode_gettype(na) == 2) { + MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a), 1); + return result; + } + } + + // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). + (void)v; + + return mtbdd_invalid; +} + +TASK_IMPL_1(MTBDD, mtbdd_floor, MTBDD, dd) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_floor), 0); +} + +TASK_IMPL_2(MTBDD, mtbdd_op_ceil, MTBDD, a, size_t, v) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_false; + if (a == mtbdd_true) return mtbdd_true; + + // a != constant + mtbddnode_t na = GETNODE(a); + + if (mtbddnode_isleaf(na)) { + if (mtbddnode_gettype(na) == 0) { + return a; + } else if (mtbddnode_gettype(na) == 1) { + MTBDD result = mtbdd_double(ceil(mtbdd_getdouble(a))); + return result; + } else if (mtbddnode_gettype(na) == 2) { + MTBDD result = mtbdd_fraction(mtbdd_getnumer(a) / mtbdd_getdenom(a) + 1, 1); + return result; + } + } + + // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). + (void)v; + + return mtbdd_invalid; +} + +TASK_IMPL_1(MTBDD, mtbdd_ceil, MTBDD, dd) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_ceil), 0); +} + +TASK_IMPL_2(MTBDD, mtbdd_op_bool_to_double, MTBDD, a, size_t, v) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_double(0); + if (a == mtbdd_true) return mtbdd_double(1.0); + + // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). + (void)v; + + return mtbdd_invalid; +} + +TASK_IMPL_1(MTBDD, mtbdd_bool_to_double, MTBDD, dd) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_bool_to_double), 0); +} + +TASK_IMPL_2(MTBDD, mtbdd_op_bool_to_int64, MTBDD, a, size_t, v) +{ + /* We only expect "double" terminals, or false */ + if (a == mtbdd_false) return mtbdd_int64(0); + if (a == mtbdd_true) return mtbdd_int64(1); + + // Ugly hack to get rid of the error "unused variable v" (because there is no version of uapply without a parameter). + (void)v; + + return mtbdd_invalid; +} + +TASK_IMPL_1(MTBDD, mtbdd_bool_to_int64, MTBDD, dd) +{ + return mtbdd_uapply(dd, TASK(mtbdd_op_bool_to_int64), 0); +} + +/** + * Calculate the number of satisfying variable assignments according to . + */ +TASK_IMPL_2(double, mtbdd_non_zero_count, MTBDD, dd, size_t, nvars) +{ + /* Trivial cases */ + if (dd == mtbdd_false) return 0.0; + + mtbddnode_t na = GETNODE(dd); + + if (mtbdd_isleaf(dd)) { + if (mtbddnode_gettype(na) == 0) { + return mtbdd_getint64(dd) != 0 ? powl(2.0L, nvars) : 0.0; + } else if (mtbddnode_gettype(na) == 1) { + return mtbdd_getdouble(dd) != 0 ? powl(2.0L, nvars) : 0.0; + } else if (mtbddnode_gettype(na) == 2) { + return mtbdd_getnumer(dd) != 0 ? powl(2.0L, nvars) : 0.0; + } + } + + /* Perhaps execute garbage collection */ + sylvan_gc_test(); + + union { + double d; + uint64_t s; + } hack; + + /* Consult cache */ + if (cache_get3(CACHE_MTBDD_NONZERO_COUNT, dd, 0, nvars, &hack.s)) { + sylvan_stats_count(CACHE_MTBDD_NONZERO_COUNT); + return hack.d; + } + + SPAWN(mtbdd_non_zero_count, mtbdd_gethigh(dd), nvars-1); + double low = CALL(mtbdd_non_zero_count, mtbdd_getlow(dd), nvars-1); + hack.d = low + SYNC(mtbdd_non_zero_count); + + cache_put3(CACHE_MTBDD_NONZERO_COUNT, dd, 0, nvars, hack.s); + return hack.d; +} + +int mtbdd_iszero(MTBDD dd) { + if (mtbdd_gettype(dd) == 0) { + return mtbdd_getint64(dd) == 0; + } else if (mtbdd_gettype(dd) == 1) { + return mtbdd_getdouble(dd) == 0; + } else if (mtbdd_gettype(dd) == 2) { + return mtbdd_getnumer(dd) == 0; + } + return 0; +} + +int mtbdd_isnonzero(MTBDD dd) { + return mtbdd_iszero(dd) ? 0 : 1; +} \ No newline at end of file diff --git a/src/sylvan_mtbdd_storm.h b/src/sylvan_mtbdd_storm.h new file mode 100644 index 000000000..38fa6668b --- /dev/null +++ b/src/sylvan_mtbdd_storm.h @@ -0,0 +1,111 @@ +void mtbdd_getsha(MTBDD mtbdd, char *target); // target must be at least 65 bytes... + +/** + * Binary operation Divide (for MTBDDs of same type) + * Only for MTBDDs where all leaves are Integer or Double. + * If either operand is mtbdd_false (not defined), + * then the result is mtbdd_false (i.e. not defined). + */ +TASK_DECL_2(MTBDD, mtbdd_op_divide, MTBDD*, MTBDD*); +#define mtbdd_divide(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_divide)) + +/** + * Binary operation equals (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_equals, MTBDD*, MTBDD*); +#define mtbdd_equals(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_equals)) + +/** + * Binary operation Less (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_less, MTBDD*, MTBDD*); +#define mtbdd_less_as_bdd(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_less)) + +/** + * Binary operation Less (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_less_or_equal, MTBDD*, MTBDD*); +#define mtbdd_less_or_equal_as_bdd(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_less_or_equal)) + +/** + * Binary operation Pow (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Integer, Double or a Fraction. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_pow, MTBDD*, MTBDD*); +#define mtbdd_pow(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_pow)) + +/** + * Binary operation Mod (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Integer, Double or a Fraction. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_mod, MTBDD*, MTBDD*); +#define mtbdd_mod(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_mod)) + +/** + * Binary operation Log (for MTBDDs of same type) + * Only for MTBDDs where either all leaves are Double or a Fraction. + * For Integer/Double MTBDD, if either operand is mtbdd_false (not defined), + * then the result is the other operand. + */ +TASK_DECL_2(MTBDD, mtbdd_op_logxy, MTBDD*, MTBDD*); +#define mtbdd_logxy(a, b) mtbdd_apply(a, b, TASK(mtbdd_op_logxy)) + +/** + * Monad that converts double to a Boolean MTBDD, translate terminals != 0 to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_op_not_zero, MTBDD, size_t) +TASK_DECL_1(MTBDD, mtbdd_not_zero, MTBDD) +#define mtbdd_not_zero(dd) CALL(mtbdd_not_zero, dd) + +/** + * Monad that floors all values Double and Fraction values. + */ +TASK_DECL_2(MTBDD, mtbdd_op_floor, MTBDD, size_t) +TASK_DECL_1(MTBDD, mtbdd_floor, MTBDD) +#define mtbdd_floor(dd) CALL(mtbdd_floor, dd) + +/** + * Monad that ceils all values Double and Fraction values. + */ +TASK_DECL_2(MTBDD, mtbdd_op_ceil, MTBDD, size_t) +TASK_DECL_1(MTBDD, mtbdd_ceil, MTBDD) +#define mtbdd_ceil(dd) CALL(mtbdd_ceil, dd) + +/** + * Monad that converts Boolean to a Double MTBDD, translate terminals true to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_op_bool_to_double, MTBDD, size_t) +TASK_DECL_1(MTBDD, mtbdd_bool_to_double, MTBDD) +#define mtbdd_bool_to_double(dd) CALL(mtbdd_bool_to_double, dd) + +/** + * Monad that converts Boolean to a uint MTBDD, translate terminals true to 1 and to 0 otherwise; + */ +TASK_DECL_2(MTBDD, mtbdd_op_bool_to_int64, MTBDD, size_t) +TASK_DECL_1(MTBDD, mtbdd_bool_to_int64, MTBDD) +#define mtbdd_bool_to_int64(dd) CALL(mtbdd_bool_to_int64, dd) + +/** + * Count the number of assignments (minterms) leading to a non-zero + */ +TASK_DECL_2(double, mtbdd_non_zero_count, MTBDD, size_t); +#define mtbdd_non_zero_count(dd, nvars) CALL(mtbdd_non_zero_count, dd, nvars) + +// Checks whether the given MTBDD (does represents a zero leaf. +int mtbdd_iszero(MTBDD); +int mtbdd_isnonzero(MTBDD); + +#define mtbdd_regular(dd) (dd & ~mtbdd_complement) diff --git a/src/sylvan_obj.cpp b/src/sylvan_obj.cpp new file mode 100644 index 000000000..a573c7a49 --- /dev/null +++ b/src/sylvan_obj.cpp @@ -0,0 +1,1039 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +using namespace sylvan; + +/*** + * Implementation of class Bdd + */ + +int +Bdd::operator==(const Bdd& other) const +{ + return bdd == other.bdd; +} + +int +Bdd::operator!=(const Bdd& other) const +{ + return bdd != other.bdd; +} + +Bdd +Bdd::operator=(const Bdd& right) +{ + bdd = right.bdd; + return *this; +} + +int +Bdd::operator<=(const Bdd& other) const +{ + // TODO: better implementation, since we are not interested in the BDD result + LACE_ME; + BDD r = sylvan_ite(this->bdd, sylvan_not(other.bdd), sylvan_false); + return r == sylvan_false; +} + +int +Bdd::operator>=(const Bdd& other) const +{ + // TODO: better implementation, since we are not interested in the BDD result + return other <= *this; +} + +int +Bdd::operator<(const Bdd& other) const +{ + return bdd != other.bdd && *this <= other; +} + +int +Bdd::operator>(const Bdd& other) const +{ + return bdd != other.bdd && *this >= other; +} + +Bdd +Bdd::operator!() const +{ + return Bdd(sylvan_not(bdd)); +} + +Bdd +Bdd::operator~() const +{ + return Bdd(sylvan_not(bdd)); +} + +Bdd +Bdd::operator*(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_and(bdd, other.bdd)); +} + +Bdd +Bdd::operator*=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_and(bdd, other.bdd); + return *this; +} + +Bdd +Bdd::operator&(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_and(bdd, other.bdd)); +} + +Bdd +Bdd::operator&=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_and(bdd, other.bdd); + return *this; +} + +Bdd +Bdd::operator+(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_or(bdd, other.bdd)); +} + +Bdd +Bdd::operator+=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_or(bdd, other.bdd); + return *this; +} + +Bdd +Bdd::operator|(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_or(bdd, other.bdd)); +} + +Bdd +Bdd::operator|=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_or(bdd, other.bdd); + return *this; +} + +Bdd +Bdd::operator^(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_xor(bdd, other.bdd)); +} + +Bdd +Bdd::operator^=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_xor(bdd, other.bdd); + return *this; +} + +Bdd +Bdd::operator-(const Bdd& other) const +{ + LACE_ME; + return Bdd(sylvan_and(bdd, sylvan_not(other.bdd))); +} + +Bdd +Bdd::operator-=(const Bdd& other) +{ + LACE_ME; + bdd = sylvan_and(bdd, sylvan_not(other.bdd)); + return *this; +} + +Bdd +Bdd::AndAbstract(const Bdd &g, const BddSet &cube) const +{ + LACE_ME; + return sylvan_and_exists(bdd, g.bdd, cube.set.bdd); +} + +Bdd +Bdd::ExistAbstract(const BddSet &cube) const +{ + LACE_ME; + return sylvan_exists(bdd, cube.set.bdd); +} + +Bdd +Bdd::UnivAbstract(const BddSet &cube) const +{ + LACE_ME; + return sylvan_forall(bdd, cube.set.bdd); +} + +Bdd +Bdd::Ite(const Bdd &g, const Bdd &h) const +{ + LACE_ME; + return sylvan_ite(bdd, g.bdd, h.bdd); +} + +Bdd +Bdd::And(const Bdd &g) const +{ + LACE_ME; + return sylvan_and(bdd, g.bdd); +} + +Bdd +Bdd::Or(const Bdd &g) const +{ + LACE_ME; + return sylvan_or(bdd, g.bdd); +} + +Bdd +Bdd::Nand(const Bdd &g) const +{ + LACE_ME; + return sylvan_nand(bdd, g.bdd); +} + +Bdd +Bdd::Nor(const Bdd &g) const +{ + LACE_ME; + return sylvan_nor(bdd, g.bdd); +} + +Bdd +Bdd::Xor(const Bdd &g) const +{ + LACE_ME; + return sylvan_xor(bdd, g.bdd); +} + +Bdd +Bdd::Xnor(const Bdd &g) const +{ + LACE_ME; + return sylvan_equiv(bdd, g.bdd); +} + +int +Bdd::Leq(const Bdd &g) const +{ + // TODO: better implementation, since we are not interested in the BDD result + LACE_ME; + BDD r = sylvan_ite(bdd, sylvan_not(g.bdd), sylvan_false); + return r == sylvan_false; +} + +Bdd +Bdd::RelPrev(const Bdd& relation, const BddSet& cube) const +{ + LACE_ME; + return sylvan_relprev(relation.bdd, bdd, cube.set.bdd); +} + +Bdd +Bdd::RelNext(const Bdd &relation, const BddSet &cube) const +{ + LACE_ME; + return sylvan_relnext(bdd, relation.bdd, cube.set.bdd); +} + +Bdd +Bdd::Closure() const +{ + LACE_ME; + return sylvan_closure(bdd); +} + +Bdd +Bdd::Constrain(const Bdd &c) const +{ + LACE_ME; + return sylvan_constrain(bdd, c.bdd); +} + +Bdd +Bdd::Restrict(const Bdd &c) const +{ + LACE_ME; + return sylvan_restrict(bdd, c.bdd); +} + +Bdd +Bdd::Compose(const BddMap &m) const +{ + LACE_ME; + return sylvan_compose(bdd, m.bdd); +} + +Bdd +Bdd::Permute(const std::vector& from, const std::vector& to) const +{ + LACE_ME; + + /* Create a map */ + BddMap map; + for (int i=from.size()-1; i>=0; i--) { + map.put(from[i], Bdd::bddVar(to[i])); + } + + return sylvan_compose(bdd, map.bdd); +} + +Bdd +Bdd::Support() const +{ + LACE_ME; + return sylvan_support(bdd); +} + +BDD +Bdd::GetBDD() const +{ + return bdd; +} + +void +Bdd::PrintDot(FILE *out) const +{ + sylvan_fprintdot(out, bdd); +} + +void +Bdd::GetShaHash(char *string) const +{ + sylvan_getsha(bdd, string); +} + +std::string +Bdd::GetShaHash() const +{ + char buf[65]; + sylvan_getsha(bdd, buf); + return std::string(buf); +} + +double +Bdd::SatCount(const BddSet &variables) const +{ + LACE_ME; + return sylvan_satcount(bdd, variables.set.bdd); +} + +double +Bdd::SatCount(size_t nvars) const +{ + LACE_ME; + // Note: the mtbdd_satcount can be called without initializing the MTBDD module. + return mtbdd_satcount(bdd, nvars); +} + +void +Bdd::PickOneCube(const BddSet &variables, uint8_t *values) const +{ + LACE_ME; + sylvan_sat_one(bdd, variables.set.bdd, values); +} + +std::vector +Bdd::PickOneCube(const BddSet &variables) const +{ + std::vector result = std::vector(); + + BDD bdd = this->bdd; + BDD vars = variables.set.bdd; + + if (bdd == sylvan_false) return result; + + for (; !sylvan_set_isempty(vars); vars = sylvan_set_next(vars)) { + uint32_t var = sylvan_set_var(vars); + if (bdd == sylvan_true) { + // pick 0 + result.push_back(false); + } else { + if (sylvan_var(bdd) != var) { + // pick 0 + result.push_back(false); + } else { + if (sylvan_low(bdd) == sylvan_false) { + // pick 1 + result.push_back(true); + bdd = sylvan_high(bdd); + } else { + // pick 0 + result.push_back(false); + bdd = sylvan_low(bdd); + } + } + } + } + + return result; +} + +Bdd +Bdd::PickOneCube() const +{ + LACE_ME; + return Bdd(sylvan_sat_one_bdd(bdd)); +} + +Bdd +Bdd::UnionCube(const BddSet &variables, uint8_t *values) const +{ + LACE_ME; + return sylvan_union_cube(bdd, variables.set.bdd, values); +} + +Bdd +Bdd::UnionCube(const BddSet &variables, std::vector values) const +{ + LACE_ME; + uint8_t *data = values.data(); + return sylvan_union_cube(bdd, variables.set.bdd, data); +} + +/** + * @brief Generate a cube representing a set of variables + */ +Bdd +Bdd::VectorCube(const std::vector variables) +{ + Bdd result = Bdd::bddOne(); + for (int i=variables.size()-1; i>=0; i--) { + result *= variables[i]; + } + return result; +} + +/** + * @brief Generate a cube representing a set of variables + */ +Bdd +Bdd::VariablesCube(std::vector variables) +{ + BDD result = sylvan_true; + for (int i=variables.size()-1; i>=0; i--) { + result = sylvan_makenode(variables[i], sylvan_false, result); + } + return result; +} + +size_t +Bdd::NodeCount() const +{ + return sylvan_nodecount(bdd); +} + +Bdd +Bdd::bddOne() +{ + return sylvan_true; +} + +Bdd +Bdd::bddZero() +{ + return sylvan_false; +} + +Bdd +Bdd::bddVar(uint32_t index) +{ + LACE_ME; + return sylvan_ithvar(index); +} + +Bdd +Bdd::bddCube(const BddSet &variables, uint8_t *values) +{ + LACE_ME; + return sylvan_cube(variables.set.bdd, values); +} + +Bdd +Bdd::bddCube(const BddSet &variables, std::vector values) +{ + LACE_ME; + uint8_t *data = values.data(); + return sylvan_cube(variables.set.bdd, data); +} + +int +Bdd::isConstant() const +{ + return bdd == sylvan_true || bdd == sylvan_false; +} + +int +Bdd::isTerminal() const +{ + return bdd == sylvan_true || bdd == sylvan_false; +} + +int +Bdd::isOne() const +{ + return bdd == sylvan_true; +} + +int +Bdd::isZero() const +{ + return bdd == sylvan_false; +} + +uint32_t +Bdd::TopVar() const +{ + return sylvan_var(bdd); +} + +Bdd +Bdd::Then() const +{ + return Bdd(sylvan_high(bdd)); +} + +Bdd +Bdd::Else() const +{ + return Bdd(sylvan_low(bdd)); +} + +/*** + * Implementation of class BddMap + */ + +BddMap::BddMap(uint32_t key_variable, const Bdd value) +{ + bdd = sylvan_map_add(sylvan_map_empty(), key_variable, value.bdd); +} + + +BddMap +BddMap::operator+(const Bdd& other) const +{ + return BddMap(sylvan_map_addall(bdd, other.bdd)); +} + +BddMap +BddMap::operator+=(const Bdd& other) +{ + bdd = sylvan_map_addall(bdd, other.bdd); + return *this; +} + +BddMap +BddMap::operator-(const Bdd& other) const +{ + return BddMap(sylvan_map_removeall(bdd, other.bdd)); +} + +BddMap +BddMap::operator-=(const Bdd& other) +{ + bdd = sylvan_map_removeall(bdd, other.bdd); + return *this; +} + +void +BddMap::put(uint32_t key, Bdd value) +{ + bdd = sylvan_map_add(bdd, key, value.bdd); +} + +void +BddMap::removeKey(uint32_t key) +{ + bdd = sylvan_map_remove(bdd, key); +} + +size_t +BddMap::size() const +{ + return sylvan_map_count(bdd); +} + +int +BddMap::isEmpty() const +{ + return sylvan_map_isempty(bdd); +} + + +/*** + * Implementation of class Mtbdd + */ + +Mtbdd +Mtbdd::int64Terminal(int64_t value) +{ + return mtbdd_int64(value); +} + +Mtbdd +Mtbdd::doubleTerminal(double value) +{ + return mtbdd_double(value); +} + +Mtbdd +Mtbdd::fractionTerminal(int64_t nominator, uint64_t denominator) +{ + return mtbdd_fraction(nominator, denominator); +} + +Mtbdd +Mtbdd::terminal(uint32_t type, uint64_t value) +{ + return mtbdd_makeleaf(type, value); +} + +Mtbdd +Mtbdd::mtbddVar(uint32_t variable) +{ + return mtbdd_makenode(variable, mtbdd_false, mtbdd_true); +} + +Mtbdd +Mtbdd::mtbddOne() +{ + return mtbdd_true; +} + +Mtbdd +Mtbdd::mtbddZero() +{ + return mtbdd_false; +} + +Mtbdd +Mtbdd::mtbddCube(const BddSet &variables, uint8_t *values, const Mtbdd &terminal) +{ + LACE_ME; + return mtbdd_cube(variables.set.bdd, values, terminal.mtbdd); +} + +Mtbdd +Mtbdd::mtbddCube(const BddSet &variables, std::vector values, const Mtbdd &terminal) +{ + LACE_ME; + uint8_t *data = values.data(); + return mtbdd_cube(variables.set.bdd, data, terminal.mtbdd); +} + +int +Mtbdd::isTerminal() const +{ + return mtbdd_isleaf(mtbdd); +} + +int +Mtbdd::isLeaf() const +{ + return mtbdd_isleaf(mtbdd); +} + +int +Mtbdd::isOne() const +{ + return mtbdd == mtbdd_true; +} + +int +Mtbdd::isZero() const +{ + return mtbdd == mtbdd_false; +} + +uint32_t +Mtbdd::TopVar() const +{ + return mtbdd_getvar(mtbdd); +} + +Mtbdd +Mtbdd::Then() const +{ + return mtbdd_isnode(mtbdd) ? mtbdd_gethigh(mtbdd) : mtbdd; +} + +Mtbdd +Mtbdd::Else() const +{ + return mtbdd_isnode(mtbdd) ? mtbdd_getlow(mtbdd) : mtbdd; +} + +Mtbdd +Mtbdd::Negate() const +{ + LACE_ME; + return mtbdd_negate(mtbdd); +} + +Mtbdd +Mtbdd::Apply(const Mtbdd &other, mtbdd_apply_op op) const +{ + LACE_ME; + return mtbdd_apply(mtbdd, other.mtbdd, op); +} + +Mtbdd +Mtbdd::UApply(mtbdd_uapply_op op, size_t param) const +{ + LACE_ME; + return mtbdd_uapply(mtbdd, op, param); +} + +Mtbdd +Mtbdd::Abstract(const BddSet &variables, mtbdd_abstract_op op) const +{ + LACE_ME; + return mtbdd_abstract(mtbdd, variables.set.bdd, op); +} + +Mtbdd +Mtbdd::Ite(const Mtbdd &g, const Mtbdd &h) const +{ + LACE_ME; + return mtbdd_ite(mtbdd, g.mtbdd, h.mtbdd); +} + +Mtbdd +Mtbdd::Plus(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_plus(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Times(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_times(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Min(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_min(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Max(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_max(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::AbstractPlus(const BddSet &variables) const +{ + LACE_ME; + return mtbdd_abstract_plus(mtbdd, variables.set.bdd); +} + +Mtbdd +Mtbdd::AbstractTimes(const BddSet &variables) const +{ + LACE_ME; + return mtbdd_abstract_times(mtbdd, variables.set.bdd); +} + +Mtbdd +Mtbdd::AbstractMin(const BddSet &variables) const +{ + LACE_ME; + return mtbdd_abstract_min(mtbdd, variables.set.bdd); +} + +Mtbdd +Mtbdd::AbstractMax(const BddSet &variables) const +{ + LACE_ME; + return mtbdd_abstract_max(mtbdd, variables.set.bdd); +} + +Mtbdd +Mtbdd::AndExists(const Mtbdd &other, const BddSet &variables) const +{ + LACE_ME; + return mtbdd_and_exists(mtbdd, other.mtbdd, variables.set.bdd); +} + +int +Mtbdd::operator==(const Mtbdd& other) const +{ + return mtbdd == other.mtbdd; +} + +int +Mtbdd::operator!=(const Mtbdd& other) const +{ + return mtbdd != other.mtbdd; +} + +Mtbdd +Mtbdd::operator=(const Mtbdd& right) +{ + mtbdd = right.mtbdd; + return *this; +} + +Mtbdd +Mtbdd::operator!() const +{ + return mtbdd_not(mtbdd); +} + +Mtbdd +Mtbdd::operator~() const +{ + return mtbdd_not(mtbdd); +} + +Mtbdd +Mtbdd::operator*(const Mtbdd& other) const +{ + LACE_ME; + return mtbdd_times(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::operator*=(const Mtbdd& other) +{ + LACE_ME; + mtbdd = mtbdd_times(mtbdd, other.mtbdd); + return *this; +} + +Mtbdd +Mtbdd::operator+(const Mtbdd& other) const +{ + LACE_ME; + return mtbdd_plus(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::operator+=(const Mtbdd& other) +{ + LACE_ME; + mtbdd = mtbdd_plus(mtbdd, other.mtbdd); + return *this; +} + +Mtbdd +Mtbdd::operator-(const Mtbdd& other) const +{ + LACE_ME; + return mtbdd_minus(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::operator-=(const Mtbdd& other) +{ + LACE_ME; + mtbdd = mtbdd_minus(mtbdd, other.mtbdd); + return *this; +} + +Mtbdd +Mtbdd::MtbddThreshold(double value) const +{ + LACE_ME; + return mtbdd_threshold_double(mtbdd, value); +} + +Mtbdd +Mtbdd::MtbddStrictThreshold(double value) const +{ + LACE_ME; + return mtbdd_strict_threshold_double(mtbdd, value); +} + +Bdd +Mtbdd::BddThreshold(double value) const +{ + LACE_ME; + return mtbdd_threshold_double(mtbdd, value); +} + +Bdd +Mtbdd::BddStrictThreshold(double value) const +{ + LACE_ME; + return mtbdd_strict_threshold_double(mtbdd, value); +} + +Mtbdd +Mtbdd::Support() const +{ + LACE_ME; + return mtbdd_support(mtbdd); +} + +MTBDD +Mtbdd::GetMTBDD() const +{ + return mtbdd; +} + +Mtbdd +Mtbdd::Compose(MtbddMap &m) const +{ + LACE_ME; + return mtbdd_compose(mtbdd, m.mtbdd); +} + +Mtbdd +Mtbdd::Permute(const std::vector& from, const std::vector& to) const +{ + LACE_ME; + + /* Create a map */ + MtbddMap map; + for (int i=from.size()-1; i>=0; i--) { + map.put(from[i], Bdd::bddVar(to[i])); + } + + return mtbdd_compose(mtbdd, map.mtbdd); +} + +double +Mtbdd::SatCount(size_t nvars) const +{ + LACE_ME; + return mtbdd_satcount(mtbdd, nvars); +} + +double +Mtbdd::SatCount(const BddSet &variables) const +{ + return SatCount(sylvan_set_count(variables.set.bdd)); +} + +size_t +Mtbdd::NodeCount() const +{ + LACE_ME; + return mtbdd_nodecount(mtbdd); +} + + +/*** + * Implementation of class MtbddMap + */ + +MtbddMap::MtbddMap(uint32_t key_variable, Mtbdd value) +{ + mtbdd = mtbdd_map_add(mtbdd_map_empty(), key_variable, value.mtbdd); +} + +MtbddMap +MtbddMap::operator+(const Mtbdd& other) const +{ + return MtbddMap(mtbdd_map_addall(mtbdd, other.mtbdd)); +} + +MtbddMap +MtbddMap::operator+=(const Mtbdd& other) +{ + mtbdd = mtbdd_map_addall(mtbdd, other.mtbdd); + return *this; +} + +MtbddMap +MtbddMap::operator-(const Mtbdd& other) const +{ + return MtbddMap(mtbdd_map_removeall(mtbdd, other.mtbdd)); +} + +MtbddMap +MtbddMap::operator-=(const Mtbdd& other) +{ + mtbdd = mtbdd_map_removeall(mtbdd, other.mtbdd); + return *this; +} + +void +MtbddMap::put(uint32_t key, Mtbdd value) +{ + mtbdd = mtbdd_map_add(mtbdd, key, value.mtbdd); +} + +void +MtbddMap::removeKey(uint32_t key) +{ + mtbdd = mtbdd_map_remove(mtbdd, key); +} + +size_t +MtbddMap::size() +{ + return mtbdd_map_count(mtbdd); +} + +int +MtbddMap::isEmpty() +{ + return mtbdd_map_isempty(mtbdd); +} + + +/*** + * Implementation of class Sylvan + */ + +void +Sylvan::initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize) +{ + sylvan_init_package(initialTableSize, maxTableSize, initialCacheSize, maxCacheSize); +} + +void +Sylvan::initBdd(int granularity) +{ + sylvan_init_bdd(granularity); +} + +void +Sylvan::initMtbdd() +{ + sylvan_init_mtbdd(); +} + +void +Sylvan::quitPackage() +{ + sylvan_quit(); +} + +#include "sylvan_obj_storm.cpp" diff --git a/src/sylvan_obj.hpp b/src/sylvan_obj.hpp new file mode 100644 index 000000000..e7f77f6ca --- /dev/null +++ b/src/sylvan_obj.hpp @@ -0,0 +1,855 @@ +/* + * Copyright 2011-2015 Formal Methods and Tools, University of Twente + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SYLVAN_OBJ_H +#define SYLVAN_OBJ_H + +#include +#include + +#include +#include + +namespace sylvan { + +class BddSet; +class BddMap; +class Mtbdd; + +class Bdd { + friend class Sylvan; + friend class BddSet; + friend class BddMap; + friend class Mtbdd; + +public: + Bdd() { bdd = sylvan_false; sylvan_protect(&bdd); } + Bdd(const BDD from) : bdd(from) { sylvan_protect(&bdd); } + Bdd(const Bdd &from) : bdd(from.bdd) { sylvan_protect(&bdd); } + Bdd(const uint32_t var) { bdd = sylvan_ithvar(var); sylvan_protect(&bdd); } + ~Bdd() { sylvan_unprotect(&bdd); } + + /** + * @brief Creates a Bdd representing just the variable index in its positive form + * The variable index must be a 0<=index<=2^23 (we use 24 bits internally) + */ + static Bdd bddVar(uint32_t index); + + /** + * @brief Returns the Bdd representing "True" + */ + static Bdd bddOne(); + + /** + * @brief Returns the Bdd representing "False" + */ + static Bdd bddZero(); + + /** + * @brief Returns the Bdd representing a cube of variables, according to the given values. + * @param variables the variables that will be in the cube in their positive or negative form + * @param values a character array describing how the variables will appear in the result + * The length of string must be equal to the number of variables in the cube. + * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, + * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will + * be skipped. + */ + static Bdd bddCube(const BddSet &variables, unsigned char *values); + + /** + * @brief Returns the Bdd representing a cube of variables, according to the given values. + * @param variables the variables that will be in the cube in their positive or negative form + * @param string a character array describing how the variables will appear in the result + * The length of string must be equal to the number of variables in the cube. + * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, + * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will + * be skipped. + */ + static Bdd bddCube(const BddSet &variables, std::vector values); + + int operator==(const Bdd& other) const; + int operator!=(const Bdd& other) const; + Bdd operator=(const Bdd& right); + int operator<=(const Bdd& other) const; + int operator>=(const Bdd& other) const; + int operator<(const Bdd& other) const; + int operator>(const Bdd& other) const; + Bdd operator!() const; + Bdd operator~() const; + Bdd operator*(const Bdd& other) const; + Bdd operator*=(const Bdd& other); + Bdd operator&(const Bdd& other) const; + Bdd operator&=(const Bdd& other); + Bdd operator+(const Bdd& other) const; + Bdd operator+=(const Bdd& other); + Bdd operator|(const Bdd& other) const; + Bdd operator|=(const Bdd& other); + Bdd operator^(const Bdd& other) const; + Bdd operator^=(const Bdd& other); + Bdd operator-(const Bdd& other) const; + Bdd operator-=(const Bdd& other); + + /** + * @brief Returns non-zero if this Bdd is bddOne() or bddZero() + */ + int isConstant() const; + + /** + * @brief Returns non-zero if this Bdd is bddOne() or bddZero() + */ + int isTerminal() const; + + /** + * @brief Returns non-zero if this Bdd is bddOne() + */ + int isOne() const; + + /** + * @brief Returns non-zero if this Bdd is bddZero() + */ + int isZero() const; + + /** + * @brief Returns the top variable index of this Bdd (the variable in the root node) + */ + uint32_t TopVar() const; + + /** + * @brief Follows the high edge ("then") of the root node of this Bdd + */ + Bdd Then() const; + + /** + * @brief Follows the low edge ("else") of the root node of this Bdd + */ + Bdd Else() const; + + /** + * @brief Computes \exists cube: f \and g + */ + Bdd AndAbstract(const Bdd& g, const BddSet& cube) const; + + /** + * @brief Computes \exists cube: f + */ + Bdd ExistAbstract(const BddSet& cube) const; + + /** + * @brief Computes \forall cube: f + */ + Bdd UnivAbstract(const BddSet& cube) const; + + /** + * @brief Computes if f then g else h + */ + Bdd Ite(const Bdd& g, const Bdd& h) const; + + /** + * @brief Computes f \and g + */ + Bdd And(const Bdd& g) const; + + /** + * @brief Computes f \or g + */ + Bdd Or(const Bdd& g) const; + + /** + * @brief Computes \not (f \and g) + */ + Bdd Nand(const Bdd& g) const; + + /** + * @brief Computes \not (f \or g) + */ + Bdd Nor(const Bdd& g) const; + + /** + * @brief Computes f \xor g + */ + Bdd Xor(const Bdd& g) const; + + /** + * @brief Computes \not (f \xor g), i.e. f \equiv g + */ + Bdd Xnor(const Bdd& g) const; + + /** + * @brief Returns whether all elements in f are also in g + */ + int Leq(const Bdd& g) const; + + /** + * @brief Computes the reverse application of a transition relation to this set. + * @param relation the transition relation to apply + * @param cube the variables that are in the transition relation + * This function assumes that s,t are interleaved with s even and t odd (s+1). + * Other variables in the relation are ignored (by existential quantification) + * Set cube to "false" (illegal cube) to assume all encountered variables are in s,t + * + * Use this function to concatenate two relations --> --> + * or to take the 'previous' of a set --> S + */ + Bdd RelPrev(const Bdd& relation, const BddSet& cube) const; + + /** + * @brief Computes the application of a transition relation to this set. + * @param relation the transition relation to apply + * @param cube the variables that are in the transition relation + * This function assumes that s,t are interleaved with s even and t odd (s+1). + * Other variables in the relation are ignored (by existential quantification) + * Set cube to "false" (illegal cube) to assume all encountered variables are in s,t + * + * Use this function to take the 'next' of a set S --> + */ + Bdd RelNext(const Bdd& relation, const BddSet& cube) const; + + /** + * @brief Computes the transitive closure by traversing the BDD recursively. + * See Y. Matsunaga, P. C. McGeer, R. K. Brayton + * On Computing the Transitive Closre of a State Transition Relation + * 30th ACM Design Automation Conference, 1993. + */ + Bdd Closure() const; + + /** + * @brief Computes the constrain f @ c + */ + Bdd Constrain(const Bdd &c) const; + + /** + * @brief Computes the BDD restrict according to Coudert and Madre's algorithm (ICCAD90). + */ + Bdd Restrict(const Bdd &c) const; + + /** + * @brief Functional composition. Whenever a variable v in the map m is found in the BDD, + * it is substituted by the associated function. + * You can also use this function to implement variable reordering. + */ + Bdd Compose(const BddMap &m) const; + + /** + * @brief Substitute all variables in the array from by the corresponding variables in to. + */ + Bdd Permute(const std::vector& from, const std::vector& to) const; + + /** + * @brief Computes the support of a Bdd. + */ + Bdd Support() const; + + /** + * @brief Gets the BDD of this Bdd (for C functions) + */ + BDD GetBDD() const; + + /** + * @brief Writes .dot file of this Bdd. Not thread-safe! + */ + void PrintDot(FILE *out) const; + + /** + * @brief Gets a SHA2 hash that describes the structure of this Bdd. + * @param string a character array of at least 65 characters (includes zero-termination) + * This hash is 64 characters long and is independent of the memory locations of BDD nodes. + */ + void GetShaHash(char *string) const; + + std::string GetShaHash() const; + + /** + * @brief Computes the number of satisfying variable assignments, using variables in cube. + */ + double SatCount(const BddSet &cube) const; + + /** + * @brief Compute the number of satisfying variable assignments, using the given number of variables. + */ + double SatCount(const size_t nvars) const; + + /** + * @brief Gets one satisfying assignment according to the variables. + * @param variables The set of variables to be assigned, must include the support of the Bdd. + */ + void PickOneCube(const BddSet &variables, uint8_t *string) const; + + /** + * @brief Gets one satisfying assignment according to the variables. + * @param variables The set of variables to be assigned, must include the support of the Bdd. + * Returns an empty vector when either this Bdd equals bddZero() or the cube is empty. + */ + std::vector PickOneCube(const BddSet &variables) const; + + /** + * @brief Gets a cube that satisfies this Bdd. + */ + Bdd PickOneCube() const; + + /** + * @brief Faster version of: *this + Sylvan::bddCube(variables, values); + */ + Bdd UnionCube(const BddSet &variables, uint8_t *values) const; + + /** + * @brief Faster version of: *this + Sylvan::bddCube(variables, values); + */ + Bdd UnionCube(const BddSet &variables, std::vector values) const; + + /** + * @brief Generate a cube representing a set of variables + */ + static Bdd VectorCube(const std::vector variables); + + /** + * @brief Generate a cube representing a set of variables + * @param variables An sorted set of variable indices + */ + static Bdd VariablesCube(const std::vector variables); + + /** + * @brief Gets the number of nodes in this Bdd. Not thread-safe! + */ + size_t NodeCount() const; + +#include "sylvan_obj_bdd_storm.hpp" + +private: + BDD bdd; +}; + +class BddSet +{ + friend class Bdd; + friend class Mtbdd; + Bdd set; + +public: + /** + * @brief Create a new empty set. + */ + BddSet() : set(Bdd::bddOne()) {} + + /** + * @brief Wrap the BDD cube in a set. + */ + BddSet(const Bdd &other) : set(other) {} + + /** + * @brief Create a copy of the set . + */ + BddSet(const BddSet &other) : set(other.set) {} + + /** + * @brief Add the variable to this set. + */ + void add(uint32_t variable) { + set *= Bdd::bddVar(variable); + } + + /** + * @brief Add all variables in the set to this set. + */ + void add(BddSet &other) { + set *= other.set; + } + + /** + * @brief Remove the variable from this set. + */ + void remove(uint32_t variable) { + set = set.ExistAbstract(Bdd::bddVar(variable)); + } + + /** + * @brief Remove all variables in the set from this set. + */ + void remove(BddSet &other) { + set = set.ExistAbstract(other.set); + } + + /** + * @brief Retrieve the head of the set. (The first variable.) + */ + uint32_t TopVar() const { + return set.TopVar(); + } + + /** + * @brief Retrieve the tail of the set. (The set containing all but the first variables.) + */ + BddSet Next() const { + Bdd then = set.Then(); + return BddSet(then); + } + + /** + * @brief Return true if this set is empty, or false otherwise. + */ + bool isEmpty() const { + return set.isOne(); + } + + /** + * @brief Return true if this set contains the variable , or false otherwise. + */ + bool contains(uint32_t variable) const { + if (isEmpty()) return false; + else if (TopVar() == variable) return true; + else return Next().contains(variable); + } + + /** + * @brief Return the number of variables in this set. + */ + size_t size() const { + if (isEmpty()) return 0; + else return 1 + Next().size(); + } + + /** + * @brief Create a set containing the variables in . + * It is advised to have the variables in in ascending order. + */ + static BddSet fromArray(BDDVAR *arr, size_t length) { + BddSet set; + for (size_t i = 0; i < length; i++) { + set.add(arr[length-i-1]); + } + return set; + } + + /** + * @brief Create a set containing the variables in . + * It is advised to have the variables in in ascending order. + */ + static BddSet fromVector(const std::vector variables) { + BddSet set; + for (int i=variables.size()-1; i>=0; i--) { + set.set *= variables[i]; + } + return set; + } + + /** + * @brief Create a set containing the variables in . + * It is advised to have the variables in in ascending order. + */ + static BddSet fromVector(const std::vector variables) { + BddSet set; + for (int i=variables.size()-1; i>=0; i--) { + set.add(variables[i]); + } + return set; + } + + /** + * @brief Write all variables in this set to . + * @param arr An array of at least size this.size(). + */ + void toArray(BDDVAR *arr) const { + if (!isEmpty()) { + *arr = TopVar(); + Next().toArray(arr+1); + } + } + + /** + * @brief Return the vector of all variables in this set. + */ + std::vector toVector() const { + std::vector result; + Bdd x = set; + while (!x.isOne()) { + result.push_back(x.TopVar()); + x = x.Then(); + } + return result; + } +}; + +class BddMap +{ + friend class Bdd; + BDD bdd; + BddMap(const BDD from) : bdd(from) { sylvan_protect(&bdd); } + BddMap(const Bdd &from) : bdd(from.bdd) { sylvan_protect(&bdd); } +public: + BddMap() : bdd(sylvan_map_empty()) { sylvan_protect(&bdd); } + ~BddMap() { sylvan_unprotect(&bdd); } + + BddMap(uint32_t key_variable, const Bdd value); + + BddMap operator+(const Bdd& other) const; + BddMap operator+=(const Bdd& other); + BddMap operator-(const Bdd& other) const; + BddMap operator-=(const Bdd& other); + + /** + * @brief Adds a key-value pair to the map + */ + void put(uint32_t key, Bdd value); + + /** + * @brief Removes a key-value pair from the map + */ + void removeKey(uint32_t key); + + /** + * @brief Returns the number of key-value pairs in this map + */ + size_t size() const; + + /** + * @brief Returns non-zero when this map is empty + */ + int isEmpty() const; +}; + +class MtbddMap; + +class Mtbdd { + friend class Sylvan; + friend class MtbddMap; + +public: + Mtbdd() { mtbdd = sylvan_false; mtbdd_protect(&mtbdd); } + Mtbdd(const MTBDD from) : mtbdd(from) { mtbdd_protect(&mtbdd); } + Mtbdd(const Mtbdd &from) : mtbdd(from.mtbdd) { mtbdd_protect(&mtbdd); } + Mtbdd(const Bdd &from) : mtbdd(from.bdd) { mtbdd_protect(&mtbdd); } + ~Mtbdd() { mtbdd_unprotect(&mtbdd); } + + /** + * @brief Creates a Mtbdd leaf representing the int64 value + */ + static Mtbdd int64Terminal(int64_t value); + + /** + * @brief Creates a Mtbdd leaf representing the floating-point value + */ + static Mtbdd doubleTerminal(double value); + + /** + * @brief Creates a Mtbdd leaf representing the fraction value / + * Internally, Sylvan uses 32-bit values and reports overflows to stderr. + */ + static Mtbdd fractionTerminal(int64_t nominator, uint64_t denominator); + + /** + * @brief Creates a Mtbdd leaf of type holding value + * This is useful for custom Mtbdd types. + */ + static Mtbdd terminal(uint32_t type, uint64_t value); + + /** + * @brief Creates a Boolean Mtbdd representing jsut the variable index in its positive form + * The variable index must be 0<=index<=2^23 (Sylvan uses 24 bits internally) + */ + static Mtbdd mtbddVar(uint32_t variable); + + /** + * @brief Returns the Boolean Mtbdd representing "True" + */ + static Mtbdd mtbddOne(); + + /** + * @brief Returns the Boolean Mtbdd representing "False" + */ + static Mtbdd mtbddZero(); + + /** + * @brief Returns the Mtbdd representing a cube of variables, according to the given values. + * @param variables the variables that will be in the cube in their positive or negative form + * @param values a character array describing how the variables will appear in the result + * @param terminal the leaf of the cube + * The length of string must be equal to the number of variables in the cube. + * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, + * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will + * be skipped. + */ + static Mtbdd mtbddCube(const BddSet &variables, unsigned char *values, const Mtbdd &terminal); + + /** + * @brief Returns the Mtbdd representing a cube of variables, according to the given values. + * @param variables the variables that will be in the cube in their positive or negative form + * @param values a character array describing how the variables will appear in the result + * @param terminal the leaf of the cube + * The length of string must be equal to the number of variables in the cube. + * For every ith char in string, if it is 0, the corresponding variable will appear in its negative form, + * if it is 1, it will appear in its positive form, and if it is 2, it will appear as "any", thus it will + * be skipped. + */ + static Mtbdd mtbddCube(const BddSet &variables, std::vector values, const Mtbdd &terminal); + + int operator==(const Mtbdd& other) const; + int operator!=(const Mtbdd& other) const; + Mtbdd operator=(const Mtbdd& right); + Mtbdd operator!() const; + Mtbdd operator~() const; + Mtbdd operator*(const Mtbdd& other) const; + Mtbdd operator*=(const Mtbdd& other); + Mtbdd operator+(const Mtbdd& other) const; + Mtbdd operator+=(const Mtbdd& other); + Mtbdd operator-(const Mtbdd& other) const; + Mtbdd operator-=(const Mtbdd& other); + + // not implemented (compared to Bdd): <=, >=, <, >, &, &=, |, |=, ^, ^= + + /** + * @brief Returns non-zero if this Mtbdd is a leaf + */ + int isTerminal() const; + + /** + * @brief Returns non-zero if this Mtbdd is a leaf + */ + int isLeaf() const; + + /** + * @brief Returns non-zero if this Mtbdd is mtbddOne() + */ + int isOne() const; + + /** + * @brief Returns non-zero if this Mtbdd is mtbddZero() + */ + int isZero() const; + + /** + * @brief Returns the top variable index of this Mtbdd (the variable in the root node) + */ + uint32_t TopVar() const; + + /** + * @brief Follows the high edge ("then") of the root node of this Mtbdd + */ + Mtbdd Then() const; + + /** + * @brief Follows the low edge ("else") of the root node of this Mtbdd + */ + Mtbdd Else() const; + + /** + * @brief Returns the negation of the MTBDD (every terminal negative) + * Do not use this for Boolean MTBDDs, only for Integer/Double/Fraction MTBDDs. + */ + Mtbdd Negate() const; + + /** + * @brief Applies the binary operation + */ + Mtbdd Apply(const Mtbdd &other, mtbdd_apply_op op) const; + + /** + * @brief Applies the unary operation with parameter + */ + Mtbdd UApply(mtbdd_uapply_op op, size_t param) const; + + /** + * @brief Computers the abstraction on variables using operator . + * See also: AbstractPlus, AbstractTimes, AbstractMin, AbstractMax + */ + Mtbdd Abstract(const BddSet &variables, mtbdd_abstract_op op) const; + + /** + * @brief Computes if f then g else h + * This Mtbdd must be a Boolean Mtbdd + */ + Mtbdd Ite(const Mtbdd &g, const Mtbdd &h) const; + + /** + * @brief Computes f + g + */ + Mtbdd Plus(const Mtbdd &other) const; + + /** + * @brief Computes f * g + */ + Mtbdd Times(const Mtbdd &other) const; + + /** + * @brief Computes min(f, g) + */ + Mtbdd Min(const Mtbdd &other) const; + + /** + * @brief Computes max(f, g) + */ + Mtbdd Max(const Mtbdd &other) const; + + /** + * @brief Computes abstraction by summation (existential quantification) + */ + Mtbdd AbstractPlus(const BddSet &variables) const; + + /** + * @brief Computes abstraction by multiplication (universal quantification) + */ + Mtbdd AbstractTimes(const BddSet &variables) const; + + /** + * @brief Computes abstraction by minimum + */ + Mtbdd AbstractMin(const BddSet &variables) const; + + /** + * @brief Computes abstraction by maximum + */ + Mtbdd AbstractMax(const BddSet &variables) const; + + /** + * @brief Computes abstraction by summation of f \times g + */ + Mtbdd AndExists(const Mtbdd &other, const BddSet &variables) const; + + /** + * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf >= value ? true : false + */ + Mtbdd MtbddThreshold(double value) const; + + /** + * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf > value ? true : false + */ + Mtbdd MtbddStrictThreshold(double value) const; + + /** + * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf >= value ? true : false + * Same as MtbddThreshold (Bdd = Boolean Mtbdd) + */ + Bdd BddThreshold(double value) const; + + /** + * @brief Convert floating-point/fraction Mtbdd to a Boolean Mtbdd, leaf > value ? true : false + * Same as MtbddStrictThreshold (Bdd = Boolean Mtbdd) + */ + Bdd BddStrictThreshold(double value) const; + + /** + * @brief Computes the support of a Mtbdd. + */ + Mtbdd Support() const; + + /** + * @brief Gets the MTBDD of this Mtbdd (for C functions) + */ + MTBDD GetMTBDD() const; + + /** + * @brief Functional composition. Whenever a variable v in the map m is found in the MTBDD, + * it is substituted by the associated function (which should be a Boolean MTBDD) + * You can also use this function to implement variable reordering. + */ + Mtbdd Compose(MtbddMap &m) const; + + /** + * @brief Substitute all variables in the array from by the corresponding variables in to. + */ + Mtbdd Permute(const std::vector& from, const std::vector& to) const; + + /** + * @brief Compute the number of satisfying variable assignments, using variables in cube. + */ + double SatCount(const BddSet &variables) const; + + /** + * @brief Compute the number of satisfying variable assignments, using the given number of variables. + */ + double SatCount(const size_t nvars) const; + + /** + * @brief Gets the number of nodes in this Bdd. Not thread-safe! + */ + size_t NodeCount() const; + +#include "sylvan_obj_mtbdd_storm.hpp" + +private: + MTBDD mtbdd; +}; + +class MtbddMap +{ + friend class Mtbdd; + MTBDD mtbdd; + MtbddMap(MTBDD from) : mtbdd(from) { mtbdd_protect(&mtbdd); } + MtbddMap(Mtbdd &from) : mtbdd(from.mtbdd) { mtbdd_protect(&mtbdd); } +public: + MtbddMap() : mtbdd(mtbdd_map_empty()) { mtbdd_protect(&mtbdd); } + ~MtbddMap() { mtbdd_unprotect(&mtbdd); } + + MtbddMap(uint32_t key_variable, Mtbdd value); + + MtbddMap operator+(const Mtbdd& other) const; + MtbddMap operator+=(const Mtbdd& other); + MtbddMap operator-(const Mtbdd& other) const; + MtbddMap operator-=(const Mtbdd& other); + + /** + * @brief Adds a key-value pair to the map + */ + void put(uint32_t key, Mtbdd value); + + /** + * @brief Removes a key-value pair from the map + */ + void removeKey(uint32_t key); + + /** + * @brief Returns the number of key-value pairs in this map + */ + size_t size(); + + /** + * @brief Returns non-zero when this map is empty + */ + int isEmpty(); +}; + +class Sylvan { +public: + /** + * @brief Initializes the Sylvan framework, call this only once in your program. + * @param initialTableSize The initial size of the nodes table. Must be a power of two. + * @param maxTableSize The maximum size of the nodes table. Must be a power of two. + * @param initialCacheSize The initial size of the operation cache. Must be a power of two. + * @param maxCacheSize The maximum size of the operation cache. Must be a power of two. + */ + static void initPackage(size_t initialTableSize, size_t maxTableSize, size_t initialCacheSize, size_t maxCacheSize); + + /** + * @brief Initializes the BDD module of the Sylvan framework. + * @param granularity determins operation cache behavior; for higher values (2+) it will use the operation cache less often. + * Values of 3-7 may result in better performance, since occasionally not using the operation cache is fine in practice. + * A granularity of 1 means that every BDD operation will be cached at every variable level. + */ + static void initBdd(int granularity); + + /** + * @brief Initializes the MTBDD module of the Sylvan framework. + */ + static void initMtbdd(); + + /** + * @brief Frees all memory in use by Sylvan. + * Warning: if you have any Bdd objects which are not bddZero() or bddOne() after this, your program may crash! + */ + static void quitPackage(); +}; + +} + +#endif diff --git a/src/sylvan_obj_bdd_storm.hpp b/src/sylvan_obj_bdd_storm.hpp new file mode 100644 index 000000000..393ce988c --- /dev/null +++ b/src/sylvan_obj_bdd_storm.hpp @@ -0,0 +1,3 @@ + Mtbdd toDoubleMtbdd() const; + Mtbdd toInt64Mtbdd() const; + Mtbdd Ite(Mtbdd const& thenDd, Mtbdd const& elseDd) const; diff --git a/src/sylvan_obj_mtbdd_storm.hpp b/src/sylvan_obj_mtbdd_storm.hpp new file mode 100644 index 000000000..26e5ea4be --- /dev/null +++ b/src/sylvan_obj_mtbdd_storm.hpp @@ -0,0 +1,51 @@ + /** + * @brief Computes f - g + */ + Mtbdd Minus(const Mtbdd &other) const; + + /** + * @brief Computes f / g + */ + Mtbdd Divide(const Mtbdd &other) const; + + Bdd NotZero() const; + + Bdd Equals(const Mtbdd& other) const; + + Bdd Less(const Mtbdd& other) const; + + Bdd LessOrEqual(const Mtbdd& other) const; + + Mtbdd Minimum() const; + + Mtbdd Maximum() const; + + bool EqualNorm(const Mtbdd& other, double epsilon) const; + + bool EqualNormRel(const Mtbdd& other, double epsilon) const; + + Mtbdd Floor() const; + + Mtbdd Ceil() const; + + Mtbdd Pow(const Mtbdd& other) const; + + Mtbdd Mod(const Mtbdd& other) const; + + Mtbdd Logxy(const Mtbdd& other) const; + + size_t CountLeaves() const; + + /** + * @brief Compute the number of non-zero variable assignments, using variables in cube. + */ + double NonZeroCount(size_t variableCount) const; + + bool isValid() const; + + /** + * @brief Writes .dot file of this Bdd. Not thread-safe! + */ + void PrintDot(FILE *out) const; + + std::string GetShaHash() const; diff --git a/src/sylvan_obj_storm.cpp b/src/sylvan_obj_storm.cpp new file mode 100644 index 000000000..27706dcdd --- /dev/null +++ b/src/sylvan_obj_storm.cpp @@ -0,0 +1,141 @@ +Mtbdd +Bdd::toDoubleMtbdd() const { + LACE_ME; + return mtbdd_bool_to_double(bdd); +} + +Mtbdd +Bdd::toInt64Mtbdd() const { + LACE_ME; + return mtbdd_bool_to_int64(bdd); +} + +Mtbdd +Bdd::Ite(Mtbdd const& thenDd, Mtbdd const& elseDd) const { + LACE_ME; + return mtbdd_ite(bdd, thenDd.GetMTBDD(), elseDd.GetMTBDD()); +} + +Mtbdd +Mtbdd::Minus(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_minus(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Divide(const Mtbdd &other) const +{ + LACE_ME; + return mtbdd_divide(mtbdd, other.mtbdd); +} + +Bdd +Mtbdd::NotZero() const +{ + LACE_ME; + return mtbdd_not_zero(mtbdd); +} + +Bdd +Mtbdd::Equals(const Mtbdd& other) const { + LACE_ME; + return mtbdd_equals(mtbdd, other.mtbdd); +} + +Bdd +Mtbdd::Less(const Mtbdd& other) const { + LACE_ME; + return mtbdd_less_as_bdd(mtbdd, other.mtbdd); +} + +Bdd +Mtbdd::LessOrEqual(const Mtbdd& other) const { + LACE_ME; + return mtbdd_less_or_equal_as_bdd(mtbdd, other.mtbdd); +} + +bool +Mtbdd::EqualNorm(const Mtbdd& other, double epsilon) const { + LACE_ME; + return mtbdd_equal_norm_d(mtbdd, other.mtbdd, epsilon); +} + +bool +Mtbdd::EqualNormRel(const Mtbdd& other, double epsilon) const { + LACE_ME; + return mtbdd_equal_norm_rel_d(mtbdd, other.mtbdd, epsilon); +} + +Mtbdd +Mtbdd::Floor() const { + LACE_ME; + return mtbdd_floor(mtbdd); +} + +Mtbdd +Mtbdd::Ceil() const { + LACE_ME; + return mtbdd_ceil(mtbdd); +} + +Mtbdd +Mtbdd::Pow(const Mtbdd& other) const { + LACE_ME; + return mtbdd_pow(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Mod(const Mtbdd& other) const { + LACE_ME; + return mtbdd_mod(mtbdd, other.mtbdd); +} + +Mtbdd +Mtbdd::Logxy(const Mtbdd& other) const { + LACE_ME; + return mtbdd_logxy(mtbdd, other.mtbdd); +} + +size_t +Mtbdd::CountLeaves() const { + LACE_ME; + return mtbdd_leafcount(mtbdd); +} + +double +Mtbdd::NonZeroCount(size_t variableCount) const { + LACE_ME; + return mtbdd_non_zero_count(mtbdd, variableCount); +} + +bool +Mtbdd::isValid() const { + LACE_ME; + return mtbdd_test_isvalid(mtbdd) == 1; +} + +Mtbdd +Mtbdd::Minimum() const { + LACE_ME; + return mtbdd_minimum(mtbdd); +} + +Mtbdd +Mtbdd::Maximum() const { + LACE_ME; + return mtbdd_maximum(mtbdd); +} + +void +Mtbdd::PrintDot(FILE *out) const { + mtbdd_fprintdot(out, mtbdd, NULL); +} + +std::string +Mtbdd::GetShaHash() const { + char buf[65]; + mtbdd_getsha(mtbdd, buf); + return std::string(buf); +} + diff --git a/src/tls.h b/src/tls.h new file mode 100644 index 000000000..80fdfe7e5 --- /dev/null +++ b/src/tls.h @@ -0,0 +1,35 @@ +/* + * Written by Josh Dybnis and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + * + * A platform independant wrapper around thread-local storage. On platforms that don't support + * __thread variables (e.g. Mac OS X), we have to use the pthreads library for thread-local storage + */ +#include + +#ifndef TLS_H +#define TLS_H + +#ifdef __ELF__ // use gcc thread-local storage (i.e. __thread variables) +#define DECLARE_THREAD_LOCAL(name, type) __thread type name +#define INIT_THREAD_LOCAL(name) +#define SET_THREAD_LOCAL(name, value) name = value +#define LOCALIZE_THREAD_LOCAL(name, type) + +#else//!__ELF__ + +#include + +#define DECLARE_THREAD_LOCAL(name, type) pthread_key_t name##_KEY + +#define INIT_THREAD_LOCAL(name) \ + do { \ + if (pthread_key_create(&name##_KEY, NULL) != 0) { assert(0); } \ + } while (0) + +#define SET_THREAD_LOCAL(name, value) pthread_setspecific(name##_KEY, (void *)(size_t)value); + +#define LOCALIZE_THREAD_LOCAL(name, type) type name = (type)(size_t)pthread_getspecific(name##_KEY) + +#endif//__ELF__ +#endif//TLS_H diff --git a/test/.gitignore b/test/.gitignore new file mode 100644 index 000000000..e04430786 --- /dev/null +++ b/test/.gitignore @@ -0,0 +1,5 @@ +test +cmake_install.cmake +CMakeFiles +*.o +.libs diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 000000000..a331904e8 --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 2.6) +project(sylvan C CXX) +enable_testing() + +add_executable(sylvan_test main.c) +target_link_libraries(sylvan_test sylvan) + +add_executable(test_basic test_basic.c) +target_link_libraries(test_basic sylvan) + +add_executable(test_cxx test_cxx.cpp) +target_link_libraries(test_cxx sylvan stdc++) + +add_test(test_cxx test_cxx) +add_test(test_basic test_basic) diff --git a/test/main.c b/test/main.c new file mode 100644 index 000000000..941f60d2c --- /dev/null +++ b/test/main.c @@ -0,0 +1,350 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "test_assert.h" + +#include "llmsset.h" +#include "sylvan.h" + +#define BLACK "\33[22;30m" +#define GRAY "\33[01;30m" +#define RED "\33[22;31m" +#define LRED "\33[01;31m" +#define GREEN "\33[22;32m" +#define LGREEN "\33[01;32m" +#define BLUE "\33[22;34m" +#define LBLUE "\33[01;34m" +#define BROWN "\33[22;33m" +#define YELLOW "\33[01;33m" +#define CYAN "\33[22;36m" +#define LCYAN "\33[22;36m" +#define MAGENTA "\33[22;35m" +#define LMAGENTA "\33[01;35m" +#define NC "\33[0m" +#define BOLD "\33[1m" +#define ULINE "\33[4m" //underline +#define BLINK "\33[5m" +#define INVERT "\33[7m" + +__thread uint64_t seed = 1; + +uint64_t +xorshift_rand(void) +{ + uint64_t x = seed; + if (seed == 0) seed = rand(); + x ^= x >> 12; + x ^= x << 25; + x ^= x >> 27; + seed = x; + return x * 2685821657736338717LL; +} + +double +uniform_deviate(uint64_t seed) +{ + return seed * (1.0 / (0xffffffffffffffffL + 1.0)); +} + +int +rng(int low, int high) +{ + return low + uniform_deviate(xorshift_rand()) * (high-low); +} + +static inline BDD +make_random(int i, int j) +{ + if (i == j) return rng(0, 2) ? sylvan_true : sylvan_false; + + BDD yes = make_random(i+1, j); + BDD no = make_random(i+1, j); + BDD result = sylvan_invalid; + + switch(rng(0, 4)) { + case 0: + result = no; + sylvan_deref(yes); + break; + case 1: + result = yes; + sylvan_deref(no); + break; + case 2: + result = sylvan_ref(sylvan_makenode(i, yes, no)); + sylvan_deref(no); + sylvan_deref(yes); + break; + case 3: + default: + result = sylvan_ref(sylvan_makenode(i, no, yes)); + sylvan_deref(no); + sylvan_deref(yes); + break; + } + + return result; +} + +/** GC testing */ +VOID_TASK_2(gctest_fill, int, levels, int, width) +{ + if (levels > 1) { + int i; + for (i=0; i,<2,2,3,5,4,3>,<2,2,3,5,4,2>,<2,3,3,5,4,3>,<2,3,4,4,4,3>} + MDD proj = lddmc_cube((uint32_t[]){1,1,-2},3); + b = lddmc_cube((uint32_t[]){1,2}, 2); + b = lddmc_union_cube(b, (uint32_t[]){2,2}, 2); + b = lddmc_union_cube(b, (uint32_t[]){2,3}, 2); + test_assert(lddmc_project(a, proj)==b); + test_assert(lddmc_project_minus(a, proj, lddmc_false)==b); + test_assert(lddmc_project_minus(a, proj, b)==lddmc_false); + + // Test relprod + + a = lddmc_cube((uint32_t[]){1},1); + b = lddmc_cube((uint32_t[]){1,2},2); + proj = lddmc_cube((uint32_t[]){1,2,-1}, 3); + test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); + test_assert(lddmc_cube((uint32_t[]){3},1) == lddmc_relprod(a, lddmc_cube((uint32_t[]){1,3},2), proj)); + a = lddmc_union_cube(a, (uint32_t[]){2},1); + test_assert(lddmc_satcount(a) == 2); + test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); + b = lddmc_union_cube(b, (uint32_t[]){2,2},2); + test_assert(lddmc_cube((uint32_t[]){2},1) == lddmc_relprod(a, b, proj)); + b = lddmc_union_cube(b, (uint32_t[]){2,3},2); + test_assert(lddmc_satcount(lddmc_relprod(a, b, proj)) == 2); + test_assert(lddmc_union(lddmc_cube((uint32_t[]){2},1),lddmc_cube((uint32_t[]){3},1)) == lddmc_relprod(a, b, proj)); + + // Test relprev + MDD universe = lddmc_union(lddmc_cube((uint32_t[]){1},1), lddmc_cube((uint32_t[]){2},1)); + a = lddmc_cube((uint32_t[]){2},1); + b = lddmc_cube((uint32_t[]){1,2},2); + test_assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, universe)); + test_assert(lddmc_cube((uint32_t[]){1},1) == lddmc_relprev(a, b, proj, lddmc_cube((uint32_t[]){1},1))); + a = lddmc_cube((uint32_t[]){1},1); + MDD next = lddmc_relprod(a, b, proj); + test_assert(lddmc_relprev(next, b, proj, a) == a); + + // Random tests + + MDD rnd1, rnd2; + + int i; + for (i=0; i<200; i++) { + int depth = rng(1, 20); + rnd1 = CALL(random_ldd, depth, rng(0, 30)); + rnd2 = CALL(random_ldd, depth, rng(0, 30)); + test_assert(rnd1 != lddmc_true); + test_assert(rnd2 != lddmc_true); + test_assert(lddmc_intersect(rnd1,rnd2) == lddmc_intersect(rnd2,rnd1)); + test_assert(lddmc_union(rnd1,rnd2) == lddmc_union(rnd2,rnd1)); + MDD tmp = lddmc_union(lddmc_minus(rnd1, rnd2), lddmc_minus(rnd2, rnd1)); + test_assert(lddmc_intersect(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_false); + test_assert(lddmc_union(tmp, lddmc_intersect(rnd1, rnd2)) == lddmc_union(rnd1, rnd2)); + test_assert(lddmc_minus(rnd1,rnd2) == lddmc_minus(rnd1, lddmc_intersect(rnd1,rnd2))); + } + + // Test file stuff + for (i=0; i<10; i++) { + FILE *f = fopen("__lddmc_test_bdd", "w+"); + int N = 20; + MDD rnd[N]; + size_t a[N]; + char sha[N][65]; + int j; + for (j=0;j 1) sscanf(argv[1], "%d", &threads); + + if (runtests(threads)) exit(1); + printf(NC); + exit(0); +} diff --git a/test/test_assert.h b/test/test_assert.h new file mode 100644 index 000000000..8cd18501a --- /dev/null +++ b/test/test_assert.h @@ -0,0 +1,13 @@ +#ifndef test_assert +#define test_assert(expr) do { \ + if (!(expr)) \ + { \ + fprintf(stderr, \ + "file %s: line %d (%s): precondition `%s' failed.\n", \ + __FILE__, \ + __LINE__, \ + __PRETTY_FUNCTION__, \ + #expr); \ + return 1; \ + } } while(0) +#endif diff --git a/test/test_basic.c b/test/test_basic.c new file mode 100644 index 000000000..f6cffcc15 --- /dev/null +++ b/test/test_basic.c @@ -0,0 +1,331 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "llmsset.h" +#include "sylvan.h" +#include "test_assert.h" + +__thread uint64_t seed = 1; + +uint64_t +xorshift_rand(void) +{ + uint64_t x = seed; + if (seed == 0) seed = rand(); + x ^= x >> 12; + x ^= x << 25; + x ^= x >> 27; + seed = x; + return x * 2685821657736338717LL; +} + +double +uniform_deviate(uint64_t seed) +{ + return seed * (1.0 / (0xffffffffffffffffL + 1.0)); +} + +int +rng(int low, int high) +{ + return low + uniform_deviate(xorshift_rand()) * (high-low); +} + +static inline BDD +make_random(int i, int j) +{ + if (i == j) return rng(0, 2) ? sylvan_true : sylvan_false; + + BDD yes = make_random(i+1, j); + BDD no = make_random(i+1, j); + BDD result = sylvan_invalid; + + switch(rng(0, 4)) { + case 0: + result = no; + sylvan_deref(yes); + break; + case 1: + result = yes; + sylvan_deref(no); + break; + case 2: + result = sylvan_ref(sylvan_makenode(i, yes, no)); + sylvan_deref(no); + sylvan_deref(yes); + break; + case 3: + default: + result = sylvan_ref(sylvan_makenode(i, no, yes)); + sylvan_deref(no); + sylvan_deref(yes); + break; + } + + return result; +} + +int testEqual(BDD a, BDD b) +{ + if (a == b) return 1; + + if (a == sylvan_invalid) { + fprintf(stderr, "a is invalid!\n"); + return 0; + } + + if (b == sylvan_invalid) { + fprintf(stderr, "b is invalid!\n"); + return 0; + } + + fprintf(stderr, "a and b are not equal!\n"); + + sylvan_fprint(stderr, a);fprintf(stderr, "\n"); + sylvan_fprint(stderr, b);fprintf(stderr, "\n"); + + return 0; +} + +int +test_bdd() +{ + test_assert(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_true) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_false))); + test_assert(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_true) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_false))); + test_assert(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_false) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_true))); + test_assert(sylvan_makenode(sylvan_ithvar(1), sylvan_false, sylvan_false) == sylvan_not(sylvan_makenode(sylvan_ithvar(1), sylvan_true, sylvan_true))); + + return 0; +} + +int +test_cube() +{ + LACE_ME; + BDDSET vars = sylvan_set_fromarray(((BDDVAR[]){1,2,3,4,6,8}), 6); + + uint8_t cube[6], check[6]; + int i, j; + for (i=0;i<6;i++) cube[i] = rng(0,3); + BDD bdd = sylvan_cube(vars, cube); + + sylvan_sat_one(bdd, vars, check); + for (i=0; i<6;i++) test_assert(cube[i] == check[i] || (cube[i] == 2 && check[i] == 0)); + + BDD picked = sylvan_pick_cube(bdd); + test_assert(testEqual(sylvan_and(picked, bdd), picked)); + + BDD t1 = sylvan_cube(vars, ((uint8_t[]){1,1,2,2,0,0})); + BDD t2 = sylvan_cube(vars, ((uint8_t[]){1,1,1,0,0,2})); + test_assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){1,1,1,0,0,2})), sylvan_or(t1, t2))); + t2 = sylvan_cube(vars, ((uint8_t[]){2,2,2,1,1,0})); + test_assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){2,2,2,1,1,0})), sylvan_or(t1, t2))); + t2 = sylvan_cube(vars, ((uint8_t[]){1,1,1,0,0,0})); + test_assert(testEqual(sylvan_union_cube(t1, vars, ((uint8_t[]){1,1,1,0,0,0})), sylvan_or(t1, t2))); + + bdd = make_random(1, 16); + for (j=0;j<10;j++) { + for (i=0;i<6;i++) cube[i] = rng(0,3); + BDD c = sylvan_cube(vars, cube); + test_assert(sylvan_union_cube(bdd, vars, cube) == sylvan_or(bdd, c)); + } + + for (i=0;i<10;i++) { + picked = sylvan_pick_cube(bdd); + test_assert(testEqual(sylvan_and(picked, bdd), picked)); + } + return 0; +} + +static int +test_operators() +{ + // We need to test: xor, and, or, nand, nor, imp, biimp, invimp, diff, less + LACE_ME; + + //int i; + BDD a = sylvan_ithvar(1); + BDD b = sylvan_ithvar(2); + BDD one = make_random(1, 12); + BDD two = make_random(6, 24); + + // Test or + test_assert(testEqual(sylvan_or(a, b), sylvan_makenode(1, b, sylvan_true))); + test_assert(testEqual(sylvan_or(a, b), sylvan_or(b, a))); + test_assert(testEqual(sylvan_or(one, two), sylvan_or(two, one))); + + // Test and + test_assert(testEqual(sylvan_and(a, b), sylvan_makenode(1, sylvan_false, b))); + test_assert(testEqual(sylvan_and(a, b), sylvan_and(b, a))); + test_assert(testEqual(sylvan_and(one, two), sylvan_and(two, one))); + + // Test xor + test_assert(testEqual(sylvan_xor(a, b), sylvan_makenode(1, b, sylvan_not(b)))); + test_assert(testEqual(sylvan_xor(a, b), sylvan_xor(a, b))); + test_assert(testEqual(sylvan_xor(a, b), sylvan_xor(b, a))); + test_assert(testEqual(sylvan_xor(one, two), sylvan_xor(two, one))); + test_assert(testEqual(sylvan_xor(a, b), sylvan_ite(a, sylvan_not(b), b))); + + // Test diff + test_assert(testEqual(sylvan_diff(a, b), sylvan_diff(a, b))); + test_assert(testEqual(sylvan_diff(a, b), sylvan_diff(a, sylvan_and(a, b)))); + test_assert(testEqual(sylvan_diff(a, b), sylvan_and(a, sylvan_not(b)))); + test_assert(testEqual(sylvan_diff(a, b), sylvan_ite(b, sylvan_false, a))); + test_assert(testEqual(sylvan_diff(one, two), sylvan_diff(one, two))); + test_assert(testEqual(sylvan_diff(one, two), sylvan_diff(one, sylvan_and(one, two)))); + test_assert(testEqual(sylvan_diff(one, two), sylvan_and(one, sylvan_not(two)))); + test_assert(testEqual(sylvan_diff(one, two), sylvan_ite(two, sylvan_false, one))); + + // Test biimp + test_assert(testEqual(sylvan_biimp(a, b), sylvan_makenode(1, sylvan_not(b), b))); + test_assert(testEqual(sylvan_biimp(a, b), sylvan_biimp(b, a))); + test_assert(testEqual(sylvan_biimp(one, two), sylvan_biimp(two, one))); + + // Test nand / and + test_assert(testEqual(sylvan_not(sylvan_and(a, b)), sylvan_nand(b, a))); + test_assert(testEqual(sylvan_not(sylvan_and(one, two)), sylvan_nand(two, one))); + + // Test nor / or + test_assert(testEqual(sylvan_not(sylvan_or(a, b)), sylvan_nor(b, a))); + test_assert(testEqual(sylvan_not(sylvan_or(one, two)), sylvan_nor(two, one))); + + // Test xor / biimp + test_assert(testEqual(sylvan_xor(a, b), sylvan_not(sylvan_biimp(b, a)))); + test_assert(testEqual(sylvan_xor(one, two), sylvan_not(sylvan_biimp(two, one)))); + + // Test imp + test_assert(testEqual(sylvan_imp(a, b), sylvan_ite(a, b, sylvan_true))); + test_assert(testEqual(sylvan_imp(one, two), sylvan_ite(one, two, sylvan_true))); + test_assert(testEqual(sylvan_imp(one, two), sylvan_not(sylvan_diff(one, two)))); + test_assert(testEqual(sylvan_invimp(one, two), sylvan_not(sylvan_less(one, two)))); + test_assert(testEqual(sylvan_imp(a, b), sylvan_invimp(b, a))); + test_assert(testEqual(sylvan_imp(one, two), sylvan_invimp(two, one))); + + return 0; +} + +int +test_relprod() +{ + LACE_ME; + + BDDVAR vars[] = {0,2,4}; + BDDVAR all_vars[] = {0,1,2,3,4,5}; + + BDDSET vars_set = sylvan_set_fromarray(vars, 3); + BDDSET all_vars_set = sylvan_set_fromarray(all_vars, 6); + + BDD s, t, next, prev; + BDD zeroes, ones; + + // transition relation: 000 --> 111 and !000 --> 000 + t = sylvan_false; + t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){0,1,0,1,0,1})); + t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){1,0,2,0,2,0})); + t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){2,0,1,0,2,0})); + t = sylvan_union_cube(t, all_vars_set, ((uint8_t[]){2,0,2,0,1,0})); + + s = sylvan_cube(vars_set, (uint8_t[]){0,0,1}); + zeroes = sylvan_cube(vars_set, (uint8_t[]){0,0,0}); + ones = sylvan_cube(vars_set, (uint8_t[]){1,1,1}); + + next = sylvan_relnext(s, t, all_vars_set); + prev = sylvan_relprev(t, next, all_vars_set); + test_assert(next == zeroes); + test_assert(prev == sylvan_not(zeroes)); + + next = sylvan_relnext(next, t, all_vars_set); + prev = sylvan_relprev(t, next, all_vars_set); + test_assert(next == ones); + test_assert(prev == zeroes); + + t = sylvan_cube(all_vars_set, (uint8_t[]){0,0,0,0,0,1}); + test_assert(sylvan_relprev(t, s, all_vars_set) == zeroes); + test_assert(sylvan_relprev(t, sylvan_not(s), all_vars_set) == sylvan_false); + test_assert(sylvan_relnext(s, t, all_vars_set) == sylvan_false); + test_assert(sylvan_relnext(zeroes, t, all_vars_set) == s); + + t = sylvan_cube(all_vars_set, (uint8_t[]){0,0,0,0,0,2}); + test_assert(sylvan_relprev(t, s, all_vars_set) == zeroes); + test_assert(sylvan_relprev(t, zeroes, all_vars_set) == zeroes); + test_assert(sylvan_relnext(sylvan_not(zeroes), t, all_vars_set) == sylvan_false); + + return 0; +} + +int +test_compose() +{ + LACE_ME; + + BDD a = sylvan_ithvar(1); + BDD b = sylvan_ithvar(2); + + BDD a_or_b = sylvan_or(a, b); + + BDD one = make_random(3, 16); + BDD two = make_random(8, 24); + + BDDMAP map = sylvan_map_empty(); + + map = sylvan_map_add(map, 1, one); + map = sylvan_map_add(map, 2, two); + + test_assert(sylvan_map_key(map) == 1); + test_assert(sylvan_map_value(map) == one); + test_assert(sylvan_map_key(sylvan_map_next(map)) == 2); + test_assert(sylvan_map_value(sylvan_map_next(map)) == two); + + test_assert(testEqual(one, sylvan_compose(a, map))); + test_assert(testEqual(two, sylvan_compose(b, map))); + + test_assert(testEqual(sylvan_or(one, two), sylvan_compose(a_or_b, map))); + + map = sylvan_map_add(map, 2, one); + test_assert(testEqual(sylvan_compose(a_or_b, map), one)); + + map = sylvan_map_add(map, 1, two); + test_assert(testEqual(sylvan_or(one, two), sylvan_compose(a_or_b, map))); + + test_assert(testEqual(sylvan_and(one, two), sylvan_compose(sylvan_and(a, b), map))); + return 0; +} + +int runtests() +{ + // we are not testing garbage collection + sylvan_gc_disable(); + + if (test_bdd()) return 1; + for (int j=0;j<10;j++) if (test_cube()) return 1; + for (int j=0;j<10;j++) if (test_relprod()) return 1; + for (int j=0;j<10;j++) if (test_compose()) return 1; + for (int j=0;j<10;j++) if (test_operators()) return 1; + return 0; +} + +int main() +{ + // Standard Lace initialization with 1 worker + lace_init(1, 0); + lace_startup(0, NULL, NULL); + + // Simple Sylvan initialization, also initialize BDD support + sylvan_init_package(1LL<<20, 1LL<<20, 1LL<<16, 1LL<<16); + sylvan_init_bdd(1); + + int res = runtests(); + + sylvan_quit(); + lace_exit(); + + return res; +} diff --git a/test/test_cxx.cpp b/test/test_cxx.cpp new file mode 100644 index 000000000..c1d984b3f --- /dev/null +++ b/test/test_cxx.cpp @@ -0,0 +1,51 @@ +/** + * Just a small test file to ensure that Sylvan can compile in C++ + */ + +#include +#include +#include + +#include "test_assert.h" + +using namespace sylvan; + +int runtest() +{ + Bdd one = Bdd::bddOne(); + Bdd zero = Bdd::bddZero(); + + test_assert(one != zero); + test_assert(one == !zero); + + Bdd v1 = Bdd::bddVar(1); + Bdd v2 = Bdd::bddVar(2); + + Bdd t = v1 + v2; + + BddMap map; + map.put(2, t); + + test_assert(v2.Compose(map) == (v1 + v2)); + test_assert((t * v2) == v2); + + return 0; +} + +int main() +{ + // Standard Lace initialization with 1 worker + lace_init(1, 0); + lace_startup(0, NULL, NULL); + + // Simple Sylvan initialization, also initialize BDD support + sylvan_init_package(1LL<<16, 1LL<<16, 1LL<<16, 1LL<<16); + sylvan_init_bdd(1); + + int res = runtest(); + + sylvan_quit(); + lace_exit(); + + return res; +} From 1d56cf04304f7269197819c9747bfc7c027d3973 Mon Sep 17 00:00:00 2001 From: dehnert Date: Sat, 7 May 2016 18:19:39 +0200 Subject: [PATCH 3/3] backported a bugfix Former-commit-id: 219e8e0e3bf74e234c2fc23ea7b4b3a6e97dc679 --- .../expressions/ExprtkExpressionEvaluator.cpp | 24 +++++++++---------- .../expressions/ExprtkExpressionEvaluator.h | 5 ++-- 2 files changed, 15 insertions(+), 14 deletions(-) mode change 100644 => 100755 src/storage/expressions/ExprtkExpressionEvaluator.cpp mode change 100644 => 100755 src/storage/expressions/ExprtkExpressionEvaluator.h diff --git a/src/storage/expressions/ExprtkExpressionEvaluator.cpp b/src/storage/expressions/ExprtkExpressionEvaluator.cpp old mode 100644 new mode 100755 index 4e79a8dc0..1d225ec12 --- a/src/storage/expressions/ExprtkExpressionEvaluator.cpp +++ b/src/storage/expressions/ExprtkExpressionEvaluator.cpp @@ -23,10 +23,10 @@ namespace storm { template bool ExprtkExpressionEvaluatorBase::asBool(Expression const& expression) const { - BaseExpression const* expressionPtr = expression.getBaseExpressionPointer().get(); - auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer().get()); + std::shared_ptr expressionPtr = expression.getBaseExpressionPointer(); + auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer()); if (expressionPair == this->compiledExpressions.end()) { - CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expressionPtr); + CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expression); return compiledExpression.value() == ValueType(1); } return expressionPair->second.value() == ValueType(1); @@ -34,22 +34,22 @@ namespace storm { template int_fast64_t ExprtkExpressionEvaluatorBase::asInt(Expression const& expression) const { - BaseExpression const* expressionPtr = expression.getBaseExpressionPointer().get(); - auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer().get()); + std::shared_ptr expressionPtr = expression.getBaseExpressionPointer(); + auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer()); if (expressionPair == this->compiledExpressions.end()) { - CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expressionPtr); + CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expression); return static_cast(compiledExpression.value()); } return static_cast(expressionPair->second.value()); } template - typename ExprtkExpressionEvaluatorBase::CompiledExpressionType& ExprtkExpressionEvaluatorBase::getCompiledExpression(BaseExpression const* expression) const { - std::pair result = this->compiledExpressions.emplace(expression, CompiledExpressionType()); + typename ExprtkExpressionEvaluatorBase::CompiledExpressionType& ExprtkExpressionEvaluatorBase::getCompiledExpression(storm::expressions::Expression const& expression) const { + std::pair result = this->compiledExpressions.emplace(expression.getBaseExpressionPointer(), CompiledExpressionType()); CompiledExpressionType& compiledExpression = result.first->second; compiledExpression.register_symbol_table(symbolTable); bool parsingOk = parser.compile(ToExprtkStringVisitor().toString(expression), compiledExpression); - STORM_LOG_ASSERT(parsingOk, "Expression was not properly parsed by ExprTk: " << *expression); + STORM_LOG_ASSERT(parsingOk, "Expression was not properly parsed by ExprTk: " << expression); return compiledExpression; } @@ -73,10 +73,10 @@ namespace storm { } double ExprtkExpressionEvaluator::asRational(Expression const& expression) const { - BaseExpression const* expressionPtr = expression.getBaseExpressionPointer().get(); - auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer().get()); + std::shared_ptr expressionPtr = expression.getBaseExpressionPointer(); + auto const& expressionPair = this->compiledExpressions.find(expression.getBaseExpressionPointer()); if (expressionPair == this->compiledExpressions.end()) { - CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expressionPtr); + CompiledExpressionType const& compiledExpression = this->getCompiledExpression(expression); return static_cast(compiledExpression.value()); } return static_cast(expressionPair->second.value()); diff --git a/src/storage/expressions/ExprtkExpressionEvaluator.h b/src/storage/expressions/ExprtkExpressionEvaluator.h old mode 100644 new mode 100755 index 77fc94993..2ae6800ba --- a/src/storage/expressions/ExprtkExpressionEvaluator.h +++ b/src/storage/expressions/ExprtkExpressionEvaluator.h @@ -1,6 +1,7 @@ #ifndef STORM_STORAGE_EXPRESSIONS_EXPRTKEXPRESSIONEVALUATOR_H_ #define STORM_STORAGE_EXPRESSIONS_EXPRTKEXPRESSIONEVALUATOR_H_ +#include #include #include @@ -26,14 +27,14 @@ namespace storm { protected: typedef double ValueType; typedef exprtk::expression CompiledExpressionType; - typedef std::unordered_map CacheType; + typedef std::unordered_map, CompiledExpressionType> CacheType; /*! * Adds a compiled version of the given expression to the internal storage. * * @param expression The expression that is to be compiled. */ - CompiledExpressionType& getCompiledExpression(BaseExpression const* expression) const; + CompiledExpressionType& getCompiledExpression(storm::expressions::Expression const& expression) const; // The parser used. mutable exprtk::parser parser;