diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7c55d776d..75d2a807c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,13 @@ The releases of major and minor versions contain an overview of changes since th
 
 Version 1.1.x
 -------------
+Long run average computation via ValueIteration, LP based MDP model checking, parametric model checking has an own binary
+
+### Version 1.1.1
+- c++ api changes: Building model takes BuilderOptions instead of extended list of Booleans, does not depend on settings anymore.
+- storm-cli-utilities now contains cli related stuff, instead of storm-lib
+- storm-pars: support for welldefinedness constraints in mdps.
+- symbolic (MT/BDD) bisimulation 
 
 ### Version 1.1.0 (2017/8)
 
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 722b36d4d..32e5ff8b2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -33,7 +33,7 @@ MARK_AS_ADVANCED(STORM_FORCE_POPCNT)
 option(USE_BOOST_STATIC_LIBRARIES "Sets whether the Boost libraries should be linked statically." OFF)
 option(STORM_USE_INTELTBB "Sets whether the Intel TBB libraries should be used." OFF)
 option(STORM_USE_GUROBI "Sets whether Gurobi should be used." OFF)
-set(USE_CARL ON)
+set(STORM_CARL_DIR_HINT "" CACHE STRING "A hint where the preferred CArL version can be found. If CArL cannot be found there, it is searched in the OS's default paths.")
 option(STORM_FORCE_SHIPPED_CARL "Sets whether the shipped version of carl is to be used no matter whether carl is found or not." OFF)
 MARK_AS_ADVANCED(STORM_FORCE_SHIPPED_CARL)
 option(USE_SMTRAT "Sets whether SMT-RAT should be included." OFF)
@@ -49,6 +49,8 @@ export_option(STORM_USE_CLN_EA)
 option(STORM_USE_CLN_RF "Sets whether CLN instead of GMP numbers should be used for rational functions." ON)
 export_option(STORM_USE_CLN_RF)
 option(BUILD_SHARED_LIBS "Build the Storm library dynamically" OFF)
+option(STORM_DEBUG_CUDD "Build CUDD in debug mode." OFF)
+MARK_AS_ADVANCED(STORM_DEBUG_CUDD)
 set(BOOST_ROOT "" CACHE STRING "A hint to the root directory of Boost (optional).")
 set(GUROBI_ROOT "" CACHE STRING "A hint to the root directory of Gurobi (optional).")
 set(Z3_ROOT "" CACHE STRING "A hint to the root directory of Z3 (optional).")
diff --git a/resources/3rdparty/CMakeLists.txt b/resources/3rdparty/CMakeLists.txt
index 243c0b0e5..ce379dc0e 100644
--- a/resources/3rdparty/CMakeLists.txt
+++ b/resources/3rdparty/CMakeLists.txt
@@ -99,16 +99,24 @@ list(APPEND STORM_DEP_TARGETS ExprTk)
 
 # Use the shipped version of Sparsepp
 message (STATUS "Storm - Including Sparsepp.")
-include_directories("${PROJECT_SOURCE_DIR}/resources/3rdparty/sparsepp")
-
-# Add sparsepp.h to the headers that are copied to the include directory in thebuild directory.
-add_custom_command(
-	OUTPUT ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp/sparsepp.h
-	COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp
-	COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_SOURCE_DIR}/resources/3rdparty/sparsepp/sparsepp.h ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp/sparsepp.h
-	DEPENDS ${PROJECT_SOURCE_DIR}/resources/3rdparty/sparsepp/sparsepp.h
-)
-list(APPEND STORM_RESOURCES_HEADERS "${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp/sparsepp.h")
+set(SPARSEPP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/resources/3rdparty/sparsepp/sparsepp")
+file(GLOB SPARSEPP_HEADERS "${SPARSEPP_INCLUDE_DIR}/*.h")
+
+# Add the sparsepp headers to the headers that are copied to the include directory in the build directory.
+set(SPARSEPP_BINDIR_DIR ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp)
+include_directories("${SPARSEPP_BINDIR_DIR}")
+foreach(HEADER ${SPARSEPP_HEADERS})
+    string(REGEX REPLACE "${PROJECT_SOURCE_DIR}/src/?" "" RELATIVE_HEADER_PATH ${HEADER})
+    string(REGEX MATCH "(.*)[/\\]" RELATIVE_DIRECTORY ${RELATIVE_HEADER_PATH})
+    string(REGEX REPLACE "${RELATIVE_DIRECTORY}/?" "" HEADER_FILENAME ${RELATIVE_HEADER_PATH})
+    add_custom_command(
+        OUTPUT ${SPARSEPP_BINDIR_DIR}/sparsepp/${HEADER_FILENAME}
+        COMMAND ${CMAKE_COMMAND} -E make_directory ${SPARSEPP_BINDIR_DIR}/sparsepp
+        COMMAND ${CMAKE_COMMAND} -E copy ${HEADER} ${SPARSEPP_BINDIR_DIR}/sparsepp/${HEADER_FILENAME}
+        DEPENDS ${SPARSEPP_INCLUDE_DIR}/${HEADER_FILENAME}
+    )
+    list(APPEND SPARSEPP_BINDIR_HEADERS ${SPARSEPP_BINDIR_DIR}/sparsepp/${HEADER_FILENAME})
+endforeach()
 
 #############################################################
 ##
@@ -202,109 +210,107 @@ set(STORM_HAVE_CARL OFF)
 set(CARL_MINYEAR 17)
 set(CARL_MINMONTH 08)
 set(CARL_MINPATCH 0)
-if(USE_CARL)
-	if (NOT STORM_FORCE_SHIPPED_CARL)
-    	find_package(carl QUIET)
-	endif()
-    if(carl_FOUND AND NOT STORM_FORCE_SHIPPED_CARL)
-        get_target_property(carlLOCATION lib_carl LOCATION)
-        if(${carlLOCATION} STREQUAL "carlLOCATION-NOTFOUND")
-            message(SEND_ERROR "Library location for carl is not found, did you build carl?")
-        elseif(EXISTS ${carlLOCATION})
-            #empty on purpose
-        else()
-            message(SEND_ERROR "File ${carlLOCATION} does not exist, did you build carl?")
-        endif()
-        if(${carl_MINORYEARVERSION} LESS ${CARL_MINYEAR})
+if (NOT STORM_FORCE_SHIPPED_CARL)
+    if (NOT "${STORM_CARL_DIR_HINT}" STREQUAL "")
+		find_package(carl QUIET PATHS ${STORM_CARL_DIR_HINT} NO_DEFAULT_PATH)
+    endif()
+	if (NOT carl_FOUND)
+		find_package(carl QUIET)
+    endif()
+endif()
+if(carl_FOUND AND NOT STORM_FORCE_SHIPPED_CARL)
+    get_target_property(carlLOCATION lib_carl LOCATION)
+    if(${carlLOCATION} STREQUAL "carlLOCATION-NOTFOUND")
+        message(SEND_ERROR "Library location for carl is not found, did you build carl?")
+    elseif(EXISTS ${carlLOCATION})
+        #empty on purpose
+    else()
+        message(SEND_ERROR "File ${carlLOCATION} does not exist, did you build carl?")
+    endif()
+    if(${carl_MINORYEARVERSION} LESS ${CARL_MINYEAR})
+        message(SEND_ERROR "Carl outdated, require ${CARL_MINYEAR}.${CARL_MINMONTH}.${CARL_MINPATCH}, have ${carl_VERSION}")
+    elseif(${carl_MINORYEARVERSION} EQUAL ${CARL_MINYEAR})
+        if(${carl_MINORMONTHVERSION} LESS ${CARL_MINMONTH})
             message(SEND_ERROR "Carl outdated, require ${CARL_MINYEAR}.${CARL_MINMONTH}.${CARL_MINPATCH}, have ${carl_VERSION}")
-        elseif(${carl_MINORYEARVERSION} EQUAL ${CARL_MINYEAR})
-            if(${carl_MINORMONTHVERSION} LESS ${CARL_MINMONTH})
+         elseif(${carl_MINORMONTHVERSION} EQUAL ${CARL_MINMONTH})
+            if(${carl_MAINTENANCEVERSION} LESS ${CARL_MINPATCH})
                 message(SEND_ERROR "Carl outdated, require ${CARL_MINYEAR}.${CARL_MINMONTH}.${CARL_MINPATCH}, have ${carl_VERSION}")
-             elseif(${carl_MINORMONTHVERSION} EQUAL ${CARL_MINMONTH})
-                if(${carl_MAINTENANCEVERSION} LESS ${CARL_MINPATCH})
-                    message(SEND_ERROR "Carl outdated, require ${CARL_MINYEAR}.${CARL_MINMONTH}.${CARL_MINPATCH}, have ${carl_VERSION}")
-                endif()
             endif()
         endif()
+    endif()
 
-        set(STORM_SHIPPED_CARL OFF)
-        set(STORM_HAVE_CARL ON)
-        message(STATUS "Storm - Use system version of carl.")
-        message(STATUS "Storm - Linking with preinstalled carl ${carl_VERSION} (include: ${carl_INCLUDE_DIR}, library ${carl_LIBRARIES}, CARL_USE_CLN_NUMBERS: ${CARL_USE_CLN_NUMBERS}, CARL_USE_GINAC: ${CARL_USE_GINAC}).")
-        set(STORM_HAVE_CLN ${CARL_USE_CLN_NUMBERS})
-        set(STORM_HAVE_GINAC ${CARL_USE_GINAC})
-
-    else()
-		set(STORM_SHIPPED_CARL ON)
-		# The first external project will be built at *configure stage*
-		message("START CARL CONFIG PROCESS")
-		file(MAKE_DIRECTORY ${STORM_3RDPARTY_BINARY_DIR}/carl_download) 
-		execute_process(
-		COMMAND ${CMAKE_COMMAND} ${STORM_3RDPARTY_SOURCE_DIR}/carl "-DSTORM_3RDPARTY_BINARY_DIR=${STORM_3RDPARTY_BINARY_DIR}" "-DBoost_LIBRARY_DIRS=${Boost_LIBRARY_DIRS}" "-DBoost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS}"
+    set(STORM_SHIPPED_CARL OFF)
+    set(STORM_HAVE_CARL ON)
+    message(STATUS "Storm - Use system version of carl.")
+    message(STATUS "Storm - Linking with preinstalled carl ${carl_VERSION} (include: ${carl_INCLUDE_DIR}, library ${carl_LIBRARIES}, CARL_USE_CLN_NUMBERS: ${CARL_USE_CLN_NUMBERS}, CARL_USE_GINAC: ${CARL_USE_GINAC}).")
+    set(STORM_HAVE_CLN ${CARL_USE_CLN_NUMBERS})
+    set(STORM_HAVE_GINAC ${CARL_USE_GINAC})
+else()
+	set(STORM_SHIPPED_CARL ON)
+	# The first external project will be built at *configure stage*
+	message("START CARL CONFIG PROCESS")
+	file(MAKE_DIRECTORY ${STORM_3RDPARTY_BINARY_DIR}/carl_download) 
+	execute_process(
+	COMMAND ${CMAKE_COMMAND} ${STORM_3RDPARTY_SOURCE_DIR}/carl "-DSTORM_3RDPARTY_BINARY_DIR=${STORM_3RDPARTY_BINARY_DIR}" "-DBoost_LIBRARY_DIRS=${Boost_LIBRARY_DIRS}" "-DBoost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS}"
+	WORKING_DIRECTORY ${STORM_3RDPARTY_BINARY_DIR}/carl_download
+	OUTPUT_VARIABLE carlconfig_out
+	RESULT_VARIABLE carlconfig_result)
+								
+	if(NOT carlconfig_result)
+		message("${carlconfig_out}")
+	endif()
+	execute_process(
+		COMMAND ${CMAKE_COMMAND} --build . --target carl-config
 		WORKING_DIRECTORY ${STORM_3RDPARTY_BINARY_DIR}/carl_download
 		OUTPUT_VARIABLE carlconfig_out
-		RESULT_VARIABLE carlconfig_result)
-								
+		RESULT_VARIABLE carlconfig_result
+		)
 		if(NOT carlconfig_result)
-			message("${carlconfig_out}")
-		endif()
-		execute_process(
-			COMMAND ${CMAKE_COMMAND} --build . --target carl-config
-			WORKING_DIRECTORY ${STORM_3RDPARTY_BINARY_DIR}/carl_download
-			OUTPUT_VARIABLE carlconfig_out
-			RESULT_VARIABLE carlconfig_result
-			)
-			if(NOT carlconfig_result)
-			message("${carlconfig_out}")
-		endif()
-    	message("END CARL CONFIG PROCESS")
+		message("${carlconfig_out}")
+	endif()
+    message("END CARL CONFIG PROCESS")
     	
-        message(STATUS "Storm - Using shipped version of carl.")
-        ExternalProject_Add(
-                carl
-                SOURCE_DIR ${STORM_3RDPARTY_BINARY_DIR}/carl
-                CONFIGURE_COMMAND ""
-                BUILD_IN_SOURCE 1
-                BUILD_COMMAND make lib_carl
-                INSTALL_COMMAND make install
-                LOG_BUILD ON
-				LOG_INSTALL ON
-                BUILD_BYPRODUCTS ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl${DYNAMIC_EXT}
-        )
-        include(${STORM_3RDPARTY_BINARY_DIR}/carl/carlConfig.cmake)
-
-        set(STORM_HAVE_CLN ${CARL_USE_CLN_NUMBERS})
-        set(STORM_HAVE_GINAC ${CARL_USE_GINAC})
-
-		add_dependencies(resources carl)
-        set(carl_INCLUDE_DIR "${STORM_3RDPARTY_BINARY_DIR}/carl/include/")
-		set(carl_LIBRARIES ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl${DYNAMIC_EXT})
-        set(STORM_HAVE_CARL ON)
-
-        message(STATUS "Storm - Linking with shipped carl ${carl_VERSION} (include: ${carl_INCLUDE_DIR}, library ${carl_LIBRARIES}, CARL_USE_CLN_NUMBERS: ${CARL_USE_CLN_NUMBERS}, CARL_USE_GINAC: ${CARL_USE_GINAC}).")
-
-        
-        # install the carl dynamic library if we build it
-        install(FILES ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl.${carl_VERSION}${DYNAMIC_EXT} DESTINATION lib)
-    endif()
+    message(STATUS "Storm - Using shipped version of carl.")
+    ExternalProject_Add(
+            carl
+            SOURCE_DIR ${STORM_3RDPARTY_BINARY_DIR}/carl
+            CONFIGURE_COMMAND ""
+            BUILD_IN_SOURCE 1
+            BUILD_COMMAND make lib_carl
+            INSTALL_COMMAND make install
+            LOG_BUILD ON
+			LOG_INSTALL ON
+            BUILD_BYPRODUCTS ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl${DYNAMIC_EXT}
+    )
+    include(${STORM_3RDPARTY_BINARY_DIR}/carl/carlConfig.cmake)
 
-    if(STORM_USE_CLN_RF AND NOT STORM_HAVE_CLN)
-		message(FATAL_ERROR "Cannot use CLN numbers if carl is build without.")
-	endif()
-    if(STORM_USE_CLN_RF AND NOT STORM_HAVE_GINAC)
-        message(FATAL_ERROR "Cannot use CLN numbers if carl is build without ginac.")
-    endif()
+    set(STORM_HAVE_CLN ${CARL_USE_CLN_NUMBERS})
+    set(STORM_HAVE_GINAC ${CARL_USE_GINAC})
 
+	add_dependencies(resources carl)
+    set(carl_INCLUDE_DIR "${STORM_3RDPARTY_BINARY_DIR}/carl/include/")
+	set(carl_LIBRARIES ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl${DYNAMIC_EXT})
+    set(STORM_HAVE_CARL ON)
 
-    #The library that needs symbols must be first, then the library that resolves the symbol.
+    message(STATUS "Storm - Linking with shipped carl ${carl_VERSION} (include: ${carl_INCLUDE_DIR}, library ${carl_LIBRARIES}, CARL_USE_CLN_NUMBERS: ${CARL_USE_CLN_NUMBERS}, CARL_USE_GINAC: ${CARL_USE_GINAC}).")
+        
+    # install the carl dynamic library if we built it
+    install(FILES ${STORM_3RDPARTY_BINARY_DIR}/carl/lib/libcarl.${carl_VERSION}${DYNAMIC_EXT} DESTINATION lib)
+endif()
 
-    list(APPEND STORM_DEP_IMP_TARGETS lib_carl)
-    if(STORM_USE_CLN_EA OR STORM_USE_CLN_RF)
-        list(APPEND STORM_DEP_IMP_TARGETS GINAC_SHARED CLN_SHARED)
-    endif()
-    list(APPEND STORM_DEP_IMP_TARGETS GMPXX_SHARED GMP_SHARED)
+if(STORM_USE_CLN_RF AND NOT STORM_HAVE_CLN)
+	message(FATAL_ERROR "Cannot use CLN numbers if carl is build without.")
+endif()
+if(STORM_USE_CLN_RF AND NOT STORM_HAVE_GINAC)
+    message(FATAL_ERROR "Cannot use CLN numbers if carl is build without ginac.")
+endif()
 
+# The library that needs symbols must be first, then the library that resolves the symbol.
+list(APPEND STORM_DEP_IMP_TARGETS lib_carl)
+if(STORM_USE_CLN_EA OR STORM_USE_CLN_RF)
+    list(APPEND STORM_DEP_IMP_TARGETS GINAC_SHARED CLN_SHARED)
 endif()
+list(APPEND STORM_DEP_IMP_TARGETS GMPXX_SHARED GMP_SHARED)
 
 
 #############################################################
@@ -410,12 +416,18 @@ else()
     set(sylvan_dep lib_carl)
 endif()
 
+if (STORM_DEBUG_SYLVAN)
+    set(SYLVAN_BUILD_TYPE "Debug")
+else()
+    set(SYLVAN_BUILD_TYPE "Release")
+endif()
+
 ExternalProject_Add(
         sylvan
         DOWNLOAD_COMMAND ""
         PREFIX "sylvan"
         SOURCE_DIR ${STORM_3RDPARTY_SOURCE_DIR}/sylvan
-        CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DGMP_LOCATION=${GMP_LIB_LOCATION}  -DGMP_INCLUDE=${GMP_INCLUDE_DIR}  -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DSYLVAN_BUILD_DOCS=OFF -DSYLVAN_BUILD_EXAMPLES=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DUSE_CARL=ON -Dcarl_INCLUDE_DIR=${carl_INCLUDE_DIR} -DSYLVAN_PORTABLE=${STORM_PORTABLE} -Dcarl_LIBRARIES=${carl_LIBRARIES} -DBUILD_SHARED_LIBS=OFF -DSYLVAN_BUILD_TESTS=OFF
+        CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DGMP_LOCATION=${GMP_LIB_LOCATION}  -DGMP_INCLUDE=${GMP_INCLUDE_DIR}  -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DSYLVAN_BUILD_DOCS=OFF -DSYLVAN_BUILD_EXAMPLES=OFF -DCMAKE_BUILD_TYPE=${SYLVAN_BUILD_TYPE} -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DUSE_CARL=ON -Dcarl_INCLUDE_DIR=${carl_INCLUDE_DIR} -DSYLVAN_PORTABLE=${STORM_PORTABLE} -Dcarl_LIBRARIES=${carl_LIBRARIES} -DBUILD_SHARED_LIBS=OFF -DSYLVAN_BUILD_TESTS=OFF
         BINARY_DIR ${STORM_3RDPARTY_BINARY_DIR}/sylvan
         BUILD_IN_SOURCE 0
         INSTALL_COMMAND ""
@@ -668,4 +680,4 @@ if(ENABLE_CUDA)
     include_directories("${PROJECT_SOURCE_DIR}/cuda/kernels/")
 endif()
 
-add_custom_target(copy_resources_headers DEPENDS ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp/sparsepp.h ${CMAKE_BINARY_DIR}/include/resources/3rdparty/sparsepp/sparsepp.h)
+add_custom_target(copy_resources_headers DEPENDS ${SPARSEPP_BINDIR_HEADERS})
diff --git a/resources/3rdparty/cudd-3.0.0/cudd/cudd.h b/resources/3rdparty/cudd-3.0.0/cudd/cudd.h
index 20acf5854..6f63f7a70 100644
--- a/resources/3rdparty/cudd-3.0.0/cudd/cudd.h
+++ b/resources/3rdparty/cudd-3.0.0/cudd/cudd.h
@@ -503,6 +503,9 @@ typedef void (*DD_TOHFP)(DdManager *, void *);
 extern "C" {
 #endif
 
+// Make this visible to the outside.
+extern DdNode * cuddUniqueInter(DdManager *unique, int index, DdNode *T, DdNode *E);
+    
 extern DdNode * Cudd_addNewVar(DdManager *dd);
 extern DdNode * Cudd_addNewVarAtLevel(DdManager *dd, int level);
 extern DdNode * Cudd_bddNewVar(DdManager *dd);
diff --git a/resources/3rdparty/cudd-3.0.0/cudd/cuddAddAbs.c b/resources/3rdparty/cudd-3.0.0/cudd/cuddAddAbs.c
index 1f8414017..a1d7e6105 100644
--- a/resources/3rdparty/cudd-3.0.0/cudd/cuddAddAbs.c
+++ b/resources/3rdparty/cudd-3.0.0/cudd/cuddAddAbs.c
@@ -1024,6 +1024,10 @@ cuddAddMinAbstractRepresentativeRecur(
         return(res1);
     }
     
+    if ((res = cuddCacheLookup2(manager, Cudd_addMinAbstractRepresentative, f, cube)) != NULL) {
+        return(res);
+    }
+    
     /* Abstract a variable that does not appear in f. */
     if (cuddI(manager,f->index) > cuddI(manager,cube->index)) {
         res = cuddAddMinAbstractRepresentativeRecur(manager, f, cuddT(cube));
@@ -1044,10 +1048,6 @@ cuddAddMinAbstractRepresentativeRecur(
        	return(res1);
     }
     
-    if ((res = cuddCacheLookup2(manager, Cudd_addMinAbstractRepresentative, f, cube)) != NULL) {
-        return(res);
-    }
-    
     
     E = cuddE(f);
     T = cuddT(f);
@@ -1211,6 +1211,10 @@ cuddAddMaxAbstractRepresentativeRecur(
             
     }
     
+    if ((res = cuddCacheLookup2(manager, Cudd_addMaxAbstractRepresentative, f, cube)) != NULL) {
+        return(res);
+    }
+    
     /* Abstract a variable that does not appear in f. */
     if (cuddI(manager,f->index) > cuddI(manager,cube->index)) {
         res = cuddAddMaxAbstractRepresentativeRecur(manager, f, cuddT(cube));
@@ -1231,10 +1235,6 @@ cuddAddMaxAbstractRepresentativeRecur(
        	return(res1);
     }
     
-    if ((res = cuddCacheLookup2(manager, Cudd_addMaxAbstractRepresentative, f, cube)) != NULL) {
-        return(res);
-    }
-    
     
     E = cuddE(f);
     T = cuddT(f);
diff --git a/resources/3rdparty/cudd-3.0.0/cudd/cuddAddApply.c b/resources/3rdparty/cudd-3.0.0/cudd/cuddAddApply.c
index 01fc45fe6..c0b6cebe0 100644
--- a/resources/3rdparty/cudd-3.0.0/cudd/cuddAddApply.c
+++ b/resources/3rdparty/cudd-3.0.0/cudd/cuddAddApply.c
@@ -350,7 +350,9 @@ Cudd_addMinus(
 
     F = *f; G = *g;
     if (F == G) return(DD_ZERO(dd));
-    if (F == DD_ZERO(dd)) return(cuddAddNegateRecur(dd,G));
+    // CHANGED BY CHRISTIAN DEHNERT.
+    // Commented out this case to avoid issues with dynamic reordering (fix suggested by Fabio Somenzi).
+    // if (F == DD_ZERO(dd)) return(cuddAddNegateRecur(dd,G));
     if (G == DD_ZERO(dd)) return(F);
     if (cuddIsConstant(F) && cuddIsConstant(G)) {
 	value = cuddV(F)-cuddV(G);
diff --git a/resources/3rdparty/cudd-3.0.0/cudd/cuddBddAbs.c b/resources/3rdparty/cudd-3.0.0/cudd/cuddBddAbs.c
index 8c3026b9d..d640d2147 100644
--- a/resources/3rdparty/cudd-3.0.0/cudd/cuddBddAbs.c
+++ b/resources/3rdparty/cudd-3.0.0/cudd/cuddBddAbs.c
@@ -563,6 +563,10 @@ cuddBddExistAbstractRepresentativeRecur(
     }
     /* From now on, cube and f are non-constant. */
     
+    /* Check the cache. */
+    if (F->ref != 1 && (res = cuddCacheLookup2(manager, Cudd_bddExistAbstractRepresentative, f, cube)) != NULL) {
+        return(res);
+    }
     
     /* Abstract a variable that does not appear in f. */
     if (manager->perm[F->index] > manager->perm[cube->index]) {
@@ -586,11 +590,6 @@ cuddBddExistAbstractRepresentativeRecur(
        	return(res1);
     }
     
-    /* Check the cache. */
-    if (F->ref != 1 && (res = cuddCacheLookup2(manager, Cudd_bddExistAbstractRepresentative, f, cube)) != NULL) {
-        return(res);
-    }
-    
     /* Compute the cofactors of f. */
     T = cuddT(F); E = cuddE(F);
     if (f != F) {
diff --git a/resources/3rdparty/cudd-3.0.0/cudd/cuddExport.c b/resources/3rdparty/cudd-3.0.0/cudd/cuddExport.c
index 4107b2d8b..e1e76bbdd 100644
--- a/resources/3rdparty/cudd-3.0.0/cudd/cuddExport.c
+++ b/resources/3rdparty/cudd-3.0.0/cudd/cuddExport.c
@@ -448,8 +448,9 @@ Cudd_DumpDot(
 		scan = nodelist[j];
 		while (scan != NULL) {
 		    if (st_is_member(visited,scan)) {
-			retval = fprintf(fp,"\"%#" PRIxPTR "\";\n",
-			    ((mask & (ptruint) scan) / sizeof(DdNode)));
+//			retval = fprintf(fp,"\"%#" PRIxPTR "\";\n",
+//			    ((mask & (ptruint) scan) / sizeof(DdNode)));
+            retval = fprintf(fp,"\"%p\";\n", (ptruint) scan);
 			if (retval == EOF) goto failure;
 		    }
 		    scan = scan->next;
@@ -470,8 +471,9 @@ Cudd_DumpDot(
 	scan = nodelist[j];
 	while (scan != NULL) {
 	    if (st_is_member(visited,scan)) {
-		retval = fprintf(fp,"\"%#" PRIxPTR "\";\n",
-		    ((mask & (ptruint) scan) / sizeof(DdNode)));
+//		retval = fprintf(fp,"\"%#" PRIxPTR "\";\n",
+//		    ((mask & (ptruint) scan) / sizeof(DdNode)));
+        retval = fprintf(fp,"\"%p\";\n", Cudd_Regular(scan));
 		if (retval == EOF) goto failure;
 	    }
 	    scan = scan->next;
@@ -491,11 +493,13 @@ Cudd_DumpDot(
 	if (retval == EOF) goto failure;
 	/* Account for the possible complement on the root. */
 	if (Cudd_IsComplement(f[i])) {
-	    retval = fprintf(fp," -> \"%#" PRIxPTR "\" [style = dotted];\n",
-		((mask & (ptruint) f[i]) / sizeof(DdNode)));
+//	    retval = fprintf(fp," -> \"%#" PRIxPTR "\" [style = dotted];\n",
+//		((mask & (ptruint) f[i]) / sizeof(DdNode)));
+        retval = fprintf(fp," -> \"%p\" [style = dotted];\n", (ptruint)Cudd_Regular(f[i]));
 	} else {
-	    retval = fprintf(fp," -> \"%#" PRIxPTR "\" [style = solid];\n",
-		((mask & (ptruint) f[i]) / sizeof(DdNode)));
+//	    retval = fprintf(fp," -> \"%p#" PRIxPTR "\" [style = solid];\n",
+//		((mask & (ptruint) f[i]) / sizeof(DdNode)));
+        retval = fprintf(fp," -> \"%p\" [style = solid];\n", (ptruint)Cudd_Regular(f[i]));
 	}
 	if (retval == EOF) goto failure;
     }
@@ -509,25 +513,28 @@ Cudd_DumpDot(
 		scan = nodelist[j];
 		while (scan != NULL) {
 		    if (st_is_member(visited,scan)) {
-			retval = fprintf(fp,
-			    "\"%#" PRIxPTR "\" -> \"%#" PRIxPTR "\";\n",
-			    ((mask & (ptruint) scan) / sizeof(DdNode)),
-			    ((mask & (ptruint) cuddT(scan)) / sizeof(DdNode)));
+//			retval = fprintf(fp,
+//			    "\"%#" PRIxPTR "\" -> \"%#" PRIxPTR "\";\n",
+//			    ((mask & (ptruint) scan) / sizeof(DdNode)),
+//			    ((mask & (ptruint) cuddT(scan)) / sizeof(DdNode)));
+            retval = fprintf(fp, "\"%p\" -> \"%p\";\n", (ptruint)Cudd_Regular(scan), (ptruint)Cudd_Regular(cuddT(scan)));
 			if (retval == EOF) goto failure;
 			if (Cudd_IsComplement(cuddE(scan))) {
-			    retval = fprintf(fp,
-				"\"%#" PRIxPTR "\" -> \"%#" PRIxPTR
-                                             "\" [style = dotted];\n",
-				((mask & (ptruint) scan) / sizeof(DdNode)),
-				((mask & (ptruint) cuddE(scan)) /
-				sizeof(DdNode)));
+//			    retval = fprintf(fp,
+//				"\"%#" PRIxPTR "\" -> \"%#" PRIxPTR
+//                                             "\" [style = dotted];\n",
+//				((mask & (ptruint) scan) / sizeof(DdNode)),
+//				((mask & (ptruint) cuddE(scan)) /
+//				sizeof(DdNode)));
+                retval = fprintf(fp, "\"%p\" -> \"%p\" [style = dotted];\n", (ptruint)Cudd_Regular(scan), (ptruint)Cudd_Regular(cuddE(scan)));
 			} else {
-			    retval = fprintf(fp,
-				"\"%#" PRIxPTR "\" -> \"%#" PRIxPTR
-                                             "\" [style = dashed];\n",
-				((mask & (ptruint) scan) / sizeof(DdNode)),
-				((mask & (ptruint) cuddE(scan)) /
-				sizeof(DdNode)));
+//			    retval = fprintf(fp,
+//				"\"%#" PRIxPTR "\" -> \"%#" PRIxPTR
+//                                             "\" [style = dashed];\n",
+//				((mask & (ptruint) scan) / sizeof(DdNode)),
+//				((mask & (ptruint) cuddE(scan)) /
+//				sizeof(DdNode)));
+                retval = fprintf(fp, "\"%p\" -> \"%p\" [style = dashed];\n", (ptruint)Cudd_Regular(scan), (ptruint)Cudd_Regular(cuddE(scan)));
 			}
 			if (retval == EOF) goto failure;
 		    }
@@ -544,8 +551,9 @@ Cudd_DumpDot(
 	scan = nodelist[j];
 	while (scan != NULL) {
 	    if (st_is_member(visited,scan)) {
-		retval = fprintf(fp,"\"%#" PRIxPTR "\" [label = \"%g\"];\n",
-		    ((mask & (ptruint) scan) / sizeof(DdNode)), cuddV(scan));
+//		retval = fprintf(fp,"\"%#" PRIxPTR "\" [label = \"%g\"];\n",
+//		    ((mask & (ptruint) scan) / sizeof(DdNode)), cuddV(scan));
+        retval = fprintf(fp,"\"%p\" [label = \"%g\"];\n", (ptruint)Cudd_Regular(scan), cuddV(scan));
 		if (retval == EOF) goto failure;
 	    }
 	    scan = scan->next;
diff --git a/resources/3rdparty/include_cudd.cmake b/resources/3rdparty/include_cudd.cmake
index e4bcebcb9..3030f44b3 100644
--- a/resources/3rdparty/include_cudd.cmake
+++ b/resources/3rdparty/include_cudd.cmake
@@ -16,7 +16,14 @@ endif()
 
 set(CUDD_LIB_DIR ${STORM_3RDPARTY_BINARY_DIR}/cudd-3.0.0/lib)
 
-set(STORM_CUDD_FLAGS "CFLAGS=-O3 -w -DPIC -DHAVE_IEEE_754 -fno-common -ffast-math -fno-finite-math-only")
+# create CUDD compilation flags
+if (NOT STORM_DEBUG_CUDD)
+	set(STORM_CUDD_FLAGS "-O3")
+else()
+	message(WARNING "Building CUDD in DEBUG mode.")
+	set(STORM_CUDD_FLAGS "-O0 -g")
+endif()
+set(STORM_CUDD_FLAGS "CFLAGS=${STORM_CUDD_FLAGS} -w -DPIC -DHAVE_IEEE_754 -fno-common -ffast-math -fno-finite-math-only")
 if (NOT STORM_PORTABLE)
 	set(STORM_CUDD_FLAGS "${STORM_CUDD_FLAGS} -march=native")
 endif()
diff --git a/resources/3rdparty/sparsepp/.gitignore b/resources/3rdparty/sparsepp/.gitignore
new file mode 100755
index 000000000..cd2946ad7
--- /dev/null
+++ b/resources/3rdparty/sparsepp/.gitignore
@@ -0,0 +1,47 @@
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+
+# =========================
+# Operating System Files
+# =========================
+
+# OSX
+# =========================
+
+.DS_Store
+.AppleDouble
+.LSOverride
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
diff --git a/resources/3rdparty/sparsepp/.travis.yml b/resources/3rdparty/sparsepp/.travis.yml
new file mode 100755
index 000000000..c8d240b98
--- /dev/null
+++ b/resources/3rdparty/sparsepp/.travis.yml
@@ -0,0 +1,14 @@
+language: cpp
+
+os:
+  - linux
+  - osx
+
+compiler:
+  - clang
+  - gcc
+
+dist: trusty
+sudo: false
+
+script: cd tests && make && make test
diff --git a/resources/3rdparty/sparsepp/CHANGELOG.md b/resources/3rdparty/sparsepp/CHANGELOG.md
new file mode 100755
index 000000000..c491ed950
--- /dev/null
+++ b/resources/3rdparty/sparsepp/CHANGELOG.md
@@ -0,0 +1,16 @@
+# 0.95
+
+* not single header anymore (this was just too much of a hassle).
+* custom allocator not quite ready yet. Checked in, but still using old allocator (easy to toggle - line 15 of spp_config.h)
+
+
+# 0.90
+
+* stable release (single header)
+* known issues:
+   -  memory usage can be excessive in Windows
+
+      sparsepp has a very simple default allocator based on the system malloc/realloc/free implementation,
+      and the default Windows realloc() appears to fragment the memory, causing significantly higher 
+      memory usage than on linux. To solve this issue, I am working on a new allocator which will 
+      remedy the problem.
diff --git a/resources/3rdparty/sparsepp/LICENSE b/resources/3rdparty/sparsepp/LICENSE
old mode 100644
new mode 100755
diff --git a/resources/3rdparty/sparsepp/README.md b/resources/3rdparty/sparsepp/README.md
old mode 100644
new mode 100755
index 241b116b0..7cf36b83e
--- a/resources/3rdparty/sparsepp/README.md
+++ b/resources/3rdparty/sparsepp/README.md
@@ -8,7 +8,7 @@ Sparsepp is derived from Google's excellent [sparsehash](https://github.com/spar
 - **Extremely low memory usage** (typically about one byte overhead per entry).
 - **Very efficient**, typically faster than your compiler's unordered map/set or Boost's.
 - **C++11 support** (if supported by compiler).
-- **Single header** implementation - just copy `sparsepp.h` to your project and include it.
+- ~~Single header~~ not anymore
 - **Tested** on Windows (vs2010-2015, g++), linux (g++, clang++) and MacOS (clang++).
 
 We believe Sparsepp provides an unparalleled combination of performance and memory usage, and will outperform your compiler's unordered_map on both counts. Only Google's `dense_hash_map` is consistently faster, at the cost of much greater memory usage (especially when the final size of the map is not known in advance). 
@@ -20,7 +20,7 @@ For a detailed comparison of various hash implementations, including Sparsepp, p
 ```c++
 #include <iostream>
 #include <string>
-#include <sparsepp.h>
+#include <sparsepp/spp.h>
 
 using spp::sparse_hash_map;
  
@@ -50,9 +50,7 @@ int main()
 
 ## Installation
 
-Since the full Sparsepp implementation is contained in a single header file `sparsepp.h`, the installation consist in copying this header file wherever it will be convenient to include in your project(s). 
-
-Optionally, a second header file `spp_utils.h` is provided, which implements only the spp::hash_combine() functionality. This is useful when we want to specify a hash function for a user-defined class in an header file, without including the full `sparsepp.h` header (this is demonstrated in [example 2](#example-2---providing-a-hash-function-for-a-user-defined-class) below).
+No compilation is needed, as this is a header-only library. The installation consist in copying the sparsepp directory wherever it will be convenient to include in your project(s). Also make the path to this directory is provided to the compiler with the `-I` option.
 
 ## Warning - iterator invalidation on erase/insert
 
@@ -62,7 +60,7 @@ Optionally, a second header file `spp_utils.h` is provided, which implements onl
 
 ## Usage
 
-As shown in the example above, you need to include the header file: `#include <sparsepp.h>`
+As shown in the example above, you need to include the header file: `#include <sparsepp/spp.h>`
 
 This provides the implementation for the following classes:
 
@@ -100,6 +98,16 @@ These classes provide the same interface as std::unordered_map and std::unordere
 
 - Since items are not grouped into buckets, Bucket APIs have been adapted: `max_bucket_count` is equivalent to `max_size`, and `bucket_count` returns the sparsetable size, which is normally at least twice the number of items inserted into the hash_map.
 
+## Memory allocator on Windows (when building with Visual Studio)
+
+When building with the Microsoft compiler, we provide a custom allocator because the default one (from the Visual C++ runtime) fragments memory when reallocating. 
+
+This is desirable *only* when creating large sparsepp hash maps. If you create lots of small hash_maps, memory usage may increase instead of decreasing as expected.  The reason is that, for each instance of a hash_map, the custom memory allocator creates a new memory space to allocate from, which is typically 4K, so it may be a big waste if just a few items are allocated.
+
+In order to use the custom spp allocator, define the following preprocessor variable before including `<spp/spp.h>`:
+
+`#define SPP_USE_SPP_ALLOC 1`
+
 ## Integer keys, and other hash function considerations.
 
 1. For basic integer types, sparsepp provides a default hash function which does some mixing of the bits of the keys (see [Integer Hashing](http://burtleburtle.net/bob/hash/integer.html)). This prevents a pathological case where inserted keys are sequential (1, 2, 3, 4, ...), and the lookup on non-present keys becomes very slow. 
@@ -107,7 +115,7 @@ These classes provide the same interface as std::unordered_map and std::unordere
    Of course, the user of sparsepp may provide its own hash function,  as shown below:
    
    ```c++
-   #include <sparsepp.h>
+   #include <sparsepp/spp.h>
    
    struct Hash64 {
        size_t operator()(uint64_t k) const { return (k ^ 14695981039346656037ULL) * 1099511628211ULL; }
@@ -125,7 +133,7 @@ These classes provide the same interface as std::unordered_map and std::unordere
    
    ```
 
-2. When the user provides its own hash function, for example when inserting custom classes into a hash map, sometimes the resulting hash keys have similar low order bits and cause many collisions, decreasing the efficiency of the hash map. To address this use case, sparsepp provides an optional 'mixing' of the hash key (see [Integer Hash Function](https://gist.github.com/badboy/6267743) which can be enabled by defining the proprocessor macro: SPP_HASH_MIX. 
+2. When the user provides its own hash function, for example when inserting custom classes into a hash map, sometimes the resulting hash keys have similar low order bits and cause many collisions, decreasing the efficiency of the hash map. To address this use case, sparsepp provides an optional 'mixing' of the hash key (see [Integer Hash Function](https://gist.github.com/badboy/6267743) which can be enabled by defining the proprocessor macro: SPP_MIX_HASH. 
 
 ## Example 2 - providing a hash function for a user-defined class
 
@@ -135,7 +143,7 @@ In order to use a sparse_hash_set or sparse_hash_map, a hash function should be
 #include <iostream>
 #include <functional>
 #include <string>
-#include "sparsepp.h"
+#include <sparsepp/spp.h>
 
 using std::string;
 
@@ -179,11 +187,11 @@ int main()
 
 The `std::hash` specialization for `Person` combines the hash values for both first and last name using the convenient spp::hash_combine function, and returns the combined hash value. 
 
-spp::hash_combine is provided by the header `sparsepp.h`. However, class definitions often appear in header files, and it is desirable to limit the size of headers included in such header files, so we provide the very small header `spp_utils.h` for that purpose:
+spp::hash_combine is provided by the header `sparsepp/spp.h`. However, class definitions often appear in header files, and it is desirable to limit the size of headers included in such header files, so we provide the very small header `sparsepp/spp_utils.h` for that purpose:
 
 ```c++
 #include <string>
-#include "spp_utils.h"
+#include <sparsepp/spp_utils.h>
 
 using std::string;
  
@@ -231,12 +239,12 @@ This support is implemented in the following APIs:
     bool unserialize(Serializer serializer, INPUT *stream);
 ```
 
-The following example demontrates how a simple sparse_hash_map can be written to a file, and then read back. The serializer we use read and writes to a file using the stdio APIs, but it would be equally simple to write a serialized using the stream APIS:
+The following example demonstrates how a simple sparse_hash_map can be written to a file, and then read back. The serializer we use read and writes to a file using the stdio APIs, but it would be equally simple to write a serialized using the stream APIS:
 
 ```c++
 #include <cstdio>
 
-#include "sparsepp.h"
+#include <sparsepp/spp.h>
 
 using spp::sparse_hash_map;
 using namespace std;
@@ -319,5 +327,12 @@ int main(int argc, char* argv[])
 }
 ```
 
+## Thread safety
+
+Sparsepp follows the thread safety rules of the Standard C++ library. In Particular:
+
+- A single sparsepp hash table is thread safe for reading from multiple threads. For example, given a hash table A, it is safe to read A from thread 1 and from thread 2 simultaneously.
 
+- If a single hash table is being written to by one thread, then all reads and writes to that hash table on the same or other threads must be protected. For example, given a hash table A, if thread 1 is writing to A, then thread 2 must be prevented from reading from or writing to A.
 
+- It is safe to read and write to one instance of a type even if another thread is reading or writing to a different instance of the same type. For example, given hash tables A and B of the same type, it is safe if A is being written in thread 1 and B is being read in thread 2.
diff --git a/resources/3rdparty/sparsepp/bench.md b/resources/3rdparty/sparsepp/bench.md
old mode 100644
new mode 100755
diff --git a/resources/3rdparty/sparsepp/docs/.gitignore b/resources/3rdparty/sparsepp/docs/.gitignore
old mode 100644
new mode 100755
diff --git a/resources/3rdparty/sparsepp/examples/emplace.cc b/resources/3rdparty/sparsepp/examples/emplace.cc
new file mode 100755
index 000000000..e4c04dcfd
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/emplace.cc
@@ -0,0 +1,128 @@
+#include <map>
+#include <unordered_map>
+#include <string>
+#include <iostream>
+#include <chrono>
+#include <vector>
+#include <sparsepp/spp.h>
+
+#include <sstream>
+
+namespace patch
+{
+    template <typename T> std::string to_string(const T& n)
+    {
+        std::ostringstream stm;
+        stm << n;
+        return stm.str();
+    }
+}
+
+#if defined(SPP_NO_CXX11_RVALUE_REFERENCES)
+    #warning "problem: we expect spp will detect we have rvalue support"
+#endif
+
+template <typename T>
+using milliseconds = std::chrono::duration<T, std::milli>;
+
+class custom_type
+{
+    std::string one = "one";
+    std::string two = "two";
+    std::uint32_t three = 3;
+    std::uint64_t four = 4;
+    std::uint64_t five = 5;
+public:
+    custom_type() = default;
+    // Make object movable and non-copyable
+    custom_type(custom_type &&) = default;
+    custom_type& operator=(custom_type &&) = default;
+    // should be automatically deleted per http://www.slideshare.net/ripplelabs/howard-hinnant-accu2014
+    //custom_type(custom_type const&) = delete;
+    //custom_type& operator=(custom_type const&) = delete;
+};
+
+void test(std::size_t iterations, std::size_t container_size)
+{
+    std::clog << "bench: iterations: " << iterations <<  " / container_size: "  << container_size << "\n";
+    {
+        std::size_t count = 0;
+        auto t1 = std::chrono::high_resolution_clock::now();
+        for (std::size_t i=0; i<iterations; ++i)
+        {
+            std::unordered_map<std::string,custom_type> m;
+            m.reserve(container_size);
+            for (std::size_t j=0; j<container_size; ++j)
+                m.emplace(patch::to_string(j),custom_type());
+            count += m.size();
+        }
+        auto t2 = std::chrono::high_resolution_clock::now();
+        auto elapsed = milliseconds<double>(t2 - t1).count();
+        if (count != iterations*container_size)
+            std::clog << "  invalid count: " << count << "\n";
+        std::clog << "  std::unordered_map:     " << std::fixed << int(elapsed) << " ms\n";
+    }
+
+    {
+        std::size_t count = 0;
+        auto t1 = std::chrono::high_resolution_clock::now();
+        for (std::size_t i=0; i<iterations; ++i)
+        {
+            std::map<std::string,custom_type> m;
+            for (std::size_t j=0; j<container_size; ++j)
+                m.emplace(patch::to_string(j),custom_type());
+            count += m.size();
+        }
+        auto t2 = std::chrono::high_resolution_clock::now();
+        auto elapsed = milliseconds<double>(t2 - t1).count();
+        if (count != iterations*container_size)
+            std::clog << "  invalid count: " << count << "\n";
+        std::clog << "  std::map:               " << std::fixed << int(elapsed) << " ms\n";
+    }
+
+    {
+        std::size_t count = 0;
+        auto t1 = std::chrono::high_resolution_clock::now();
+        for (std::size_t i=0; i<iterations; ++i)
+        {
+            std::vector<std::pair<std::string,custom_type>> m;
+            m.reserve(container_size);
+            for (std::size_t j=0; j<container_size; ++j)
+                m.emplace_back(patch::to_string(j),custom_type());
+            count += m.size();
+        }
+        auto t2 = std::chrono::high_resolution_clock::now();
+        auto elapsed = milliseconds<double>(t2 - t1).count();
+        if (count != iterations*container_size)
+            std::clog << "  invalid count: " << count << "\n";
+        std::clog << "  std::vector<std::pair>: " << std::fixed << int(elapsed) << " ms\n";
+    }
+
+    {
+        std::size_t count = 0;
+        auto t1 = std::chrono::high_resolution_clock::now();
+        for (std::size_t i=0; i<iterations; ++i)
+        {
+            spp::sparse_hash_map<std::string,custom_type> m;
+            m.reserve(container_size);
+            for (std::size_t j=0; j<container_size; ++j)
+                m.emplace(patch::to_string(j),custom_type());
+            count += m.size();
+        }
+        auto t2 = std::chrono::high_resolution_clock::now();
+        auto elapsed = milliseconds<double>(t2 - t1).count();
+        if (count != iterations*container_size)
+            std::clog << "  invalid count: " << count << "\n";
+        std::clog << "  spp::sparse_hash_map:   " << std::fixed << int(elapsed) << " ms\n";
+    }
+
+}
+
+int main()
+{
+    std::size_t iterations = 100000;
+
+    test(iterations,1);
+    test(iterations,10);
+    test(iterations,50);
+}
diff --git a/resources/3rdparty/sparsepp/examples/hash_std.cc b/resources/3rdparty/sparsepp/examples/hash_std.cc
new file mode 100755
index 000000000..e0738df58
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/hash_std.cc
@@ -0,0 +1,47 @@
+#include <iostream>
+#include <string>
+#include <sparsepp/spp.h>
+
+using std::string;
+
+struct Person
+{
+    bool operator==(const Person &o) const
+    { 
+        return _first == o._first && _last == o._last; 
+    }
+
+    string _first;
+    string _last;
+};
+
+namespace std
+{
+// inject specialization of std::hash for Person into namespace std
+// ----------------------------------------------------------------
+template<>
+struct hash<Person>
+{
+    std::size_t operator()(Person const &p) const
+    {
+        std::size_t seed = 0;
+        spp::hash_combine(seed, p._first);
+        spp::hash_combine(seed, p._last);
+        return seed;
+    }
+};
+}
+
+int main()
+{
+    // As we have defined a specialization of std::hash() for Person,
+    // we can now create sparse_hash_set or sparse_hash_map of Persons
+    // ----------------------------------------------------------------
+    spp::sparse_hash_set<Person> persons = 
+        { { "John", "Galt" },
+          { "Jane", "Doe" }
+        };
+
+    for (auto& p: persons)
+        std::cout << p._first << ' ' << p._last << '\n';
+}
diff --git a/resources/3rdparty/sparsepp/examples/makefile b/resources/3rdparty/sparsepp/examples/makefile
new file mode 100755
index 000000000..979a10fb9
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/makefile
@@ -0,0 +1,18 @@
+CXXFLAGS     = -O2 -std=c++11 -I..
+CXXFLAGS    += -Wall -pedantic -Wextra -D_XOPEN_SOURCE=700 
+SPP_DEPS_1   =  spp.h spp_utils.h spp_dlalloc.h spp_traits.h spp_config.h
+SPP_DEPS     = $(addprefix ../sparsepp/,$(SPP_DEPS_1))
+TARGETS      = emplace hash_std serialize_file serialize_stream serialize_large
+
+ifeq ($(OS),Windows_NT)
+    LDFLAGS  = -lpsapi
+endif
+
+all: $(TARGETS)
+
+clean:
+	rm -f $(TARGETS) ages.dmp data.dat vsprojects/x64/* vsprojects/x86/*
+
+%: %.cc $(SPP_DEPS) makefile
+	$(CXX) $(CXXFLAGS) -DNDEBUG $< -o $@ $(LDFLAGS)
+
diff --git a/resources/3rdparty/sparsepp/examples/serialize_file.cc b/resources/3rdparty/sparsepp/examples/serialize_file.cc
new file mode 100755
index 000000000..b682b6f9e
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/serialize_file.cc
@@ -0,0 +1,82 @@
+#include <cstdio>
+#include <sparsepp/spp.h>
+
+using spp::sparse_hash_map;
+using namespace std;
+
+class FileSerializer
+{
+public:
+    // serialize basic types to FILE
+    // -----------------------------
+    template <class T>
+    bool operator()(FILE *fp, const T& value)
+    {
+        return fwrite((const void *)&value, sizeof(value), 1, fp) == 1;
+    }
+
+    template <class T>
+    bool operator()(FILE *fp, T* value)
+    {
+        return fread((void *)value, sizeof(*value), 1, fp) == 1;
+    }
+
+    // serialize std::string to FILE
+    // -----------------------------
+    bool operator()(FILE *fp, const string& value)
+    {
+        const size_t size = value.size();
+        return (*this)(fp, size) && fwrite(value.c_str(), size, 1, fp) == 1;
+    }
+
+    bool operator()(FILE *fp, string* value)
+    {
+        size_t size;
+        if (!(*this)(fp, &size))
+            return false;
+        char* buf = new char[size];
+        if (fread(buf, size, 1, fp) != 1)
+        {
+            delete [] buf;
+            return false;
+        }
+        new (value) string(buf, (size_t)size);
+        delete[] buf;
+        return true;
+    }
+
+    // serialize std::pair<const A, B> to FILE - needed for maps
+    // ---------------------------------------------------------
+    template <class A, class B>
+    bool operator()(FILE *fp, const std::pair<const A, B>& value)
+    {
+        return (*this)(fp, value.first) && (*this)(fp, value.second);
+    }
+
+    template <class A, class B>
+    bool operator()(FILE *fp, std::pair<const A, B> *value)
+    {
+        return (*this)(fp, (A *)&value->first) && (*this)(fp, &value->second);
+    }
+};
+
+int main(int, char* [])
+{
+    sparse_hash_map<string, int> age{ { "John", 12 }, {"Jane", 13 }, { "Fred", 8 } };
+
+    // serialize age hash_map to "ages.dmp" file
+    FILE *out = fopen("ages.dmp", "wb");
+    age.serialize(FileSerializer(), out);
+    fclose(out);
+
+    sparse_hash_map<string, int> age_read;
+
+    // read from "ages.dmp" file into age_read hash_map
+    FILE *input = fopen("ages.dmp", "rb");
+    age_read.unserialize(FileSerializer(), input);
+    fclose(input);
+
+    // print out contents of age_read to verify correct serialization
+    for (auto& v : age_read)
+        printf("age_read: %s -> %d\n", v.first.c_str(), v.second);
+}
diff --git a/resources/3rdparty/sparsepp/examples/serialize_large.cc b/resources/3rdparty/sparsepp/examples/serialize_large.cc
new file mode 100755
index 000000000..574d34b13
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/serialize_large.cc
@@ -0,0 +1,97 @@
+#include <cstdio>
+#include <stdlib.h>
+#include <algorithm>
+#include <vector>
+#include <sparsepp/spp_timer.h>
+#include <sparsepp/spp_memory.h>
+#include <sparsepp/spp.h>
+
+using spp::sparse_hash_map;
+using namespace std;
+
+class FileSerializer
+{
+public:
+    // serialize basic types to FILE
+    // -----------------------------
+    template <class T>
+    bool operator()(FILE *fp, const T& value)
+    {
+        return fwrite((const void *)&value, sizeof(value), 1, fp) == 1;
+    }
+
+    template <class T>
+    bool operator()(FILE *fp, T* value)
+    {
+        return fread((void *)value, sizeof(*value), 1, fp) == 1;
+    }
+
+    // serialize std::string to FILE
+    // -----------------------------
+    bool operator()(FILE *fp, const string& value)
+    {
+        const size_t size = value.size();
+        return (*this)(fp, size) && fwrite(value.c_str(), size, 1, fp) == 1;
+    }
+
+    bool operator()(FILE *fp, string* value)
+    {
+        size_t size;
+        if (!(*this)(fp, &size))
+            return false;
+        char* buf = new char[size];
+        if (fread(buf, size, 1, fp) != 1)
+        {
+            delete [] buf;
+            return false;
+        }
+        new (value) string(buf, (size_t)size);
+        delete[] buf;
+        return true;
+    }
+
+    // serialize std::pair<const A, B> to FILE - needed for maps
+    // ---------------------------------------------------------
+    template <class A, class B>
+    bool operator()(FILE *fp, const std::pair<const A, B>& value)
+    {
+        return (*this)(fp, value.first) && (*this)(fp, value.second);
+    }
+
+    template <class A, class B>
+    bool operator()(FILE *fp, std::pair<const A, B> *value)
+    {
+        return (*this)(fp, (A *)&value->first) && (*this)(fp, &value->second);
+    }
+};
+
+float _to_gb(uint64_t m) { return (float)((double)m / (1024 * 1024 * 1024)); }
+
+int main(int, char* [])
+{
+    sparse_hash_map<string, int> age;
+
+    for (size_t i=0; i<10000000; ++i)
+    {
+        char buff[20];
+        sprintf(buff, "%zu", i);
+        age.insert(std::make_pair(std::string(buff), i));
+    }
+
+    printf("before serialize(): mem_usage %4.1f GB\n",  _to_gb(spp::GetProcessMemoryUsed()));
+    // serialize age hash_map to "ages.dmp" file
+    FILE *out = fopen("ages.dmp", "wb");
+    age.serialize(FileSerializer(), out);
+    fclose(out);
+
+    printf("before clear(): mem_usage %4.1f GB\n",  _to_gb(spp::GetProcessMemoryUsed()));
+    age.clear();
+    printf("after clear(): mem_usage %4.1f GB\n",  _to_gb(spp::GetProcessMemoryUsed()));
+
+
+    // read from "ages.dmp" file into age_read hash_map
+    FILE *input = fopen("ages.dmp", "rb");
+    age.unserialize(FileSerializer(), input);
+    fclose(input);
+    printf("after unserialize(): mem_usage %4.1f GB\n",  _to_gb(spp::GetProcessMemoryUsed()));
+}
diff --git a/resources/3rdparty/sparsepp/examples/serialize_stream.cc b/resources/3rdparty/sparsepp/examples/serialize_stream.cc
new file mode 100755
index 000000000..db65e456e
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/serialize_stream.cc
@@ -0,0 +1,64 @@
+#include <iostream>
+#include <algorithm>
+#include <fstream>
+
+#include <sparsepp/spp.h>
+using spp::sparse_hash_map;
+
+using namespace std;
+
+struct StringToIntSerializer
+{
+    bool operator()(std::ofstream* stream, const std::pair<const std::string, int>& value) const
+    {
+        size_t sizeSecond = sizeof(value.second);
+        size_t sizeFirst = value.first.size();
+        stream->write((char*)&sizeFirst, sizeof(sizeFirst));
+        stream->write(value.first.c_str(), sizeFirst);
+        stream->write((char*)&value.second, sizeSecond);
+        return true;
+    }
+
+    bool operator()(std::ifstream* istream, std::pair<const std::string, int>* value) const
+    {
+        // Read key
+        size_t size = 0;
+        istream->read((char*)&size, sizeof(size));
+        char * first = new char[size];
+        istream->read(first, size);
+        new (const_cast<string *>(&value->first)) string(first, size);
+
+        // Read value
+        istream->read((char *)&value->second, sizeof(value->second));
+        return true;
+    }
+};
+
+int main(int , char* [])
+{
+    sparse_hash_map<string, int> users;
+
+    users["John"] = 12345;
+    users["Bob"] = 553;
+    users["Alice"] = 82200;
+
+    // Write users to file "data.dat"
+    // ------------------------------
+    std::ofstream* stream = new std::ofstream("data.dat", 
+                                              std::ios::out | std::ios::trunc | std::ios::binary);
+    users.serialize(StringToIntSerializer(), stream);
+    stream->close();
+    delete stream;
+
+    // Read from file "data.dat" into users2
+    // -------------------------------------
+    sparse_hash_map<string, int> users2;
+    std::ifstream* istream = new std::ifstream("data.dat");
+    users2.unserialize(StringToIntSerializer(), istream);
+    istream->close();
+    delete istream;
+    
+    for (sparse_hash_map<string, int>::iterator it = users2.begin(); it != users2.end(); ++it)
+        printf("users2: %s -> %d\n", it->first.c_str(), it->second);
+
+}
diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj
new file mode 100755
index 000000000..63b159bcb
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj
@@ -0,0 +1,172 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\serialize_stream.cc" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{19BC4240-15ED-4C76-BC57-34BB70FE163B}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+    <ProjectName>serialize_stream</ProjectName>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <CharacterSet>MultiByte</CharacterSet>
+    <PlatformToolset>v140</PlatformToolset>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>14.0.23107.0</_ProjectFileVersion>
+  </PropertyGroup>
+  <PropertyGroup>
+    <IntDirSharingDetected>None</IntDirSharingDetected>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <MinimalRebuild>true</MinimalRebuild>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_alloc_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+      <TargetMachine>MachineX86</TargetMachine>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_alloc_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <TargetMachine>MachineX86</TargetMachine>
+      <Profile>true</Profile>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <Profile>true</Profile>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters
new file mode 100755
index 000000000..39ecd7689
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{ba5fa1b8-1783-4b3b-9a41-31d363b52841}</UniqueIdentifier>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln b/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln
new file mode 100755
index 000000000..a37d41277
--- /dev/null
+++ b/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln
@@ -0,0 +1,28 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25420.1
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "serialize_stream", "serialize_stream.vcxproj", "{19BC4240-15ED-4C76-BC57-34BB70FE163B}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|x64 = Debug|x64
+		Debug|x86 = Debug|x86
+		Release|x64 = Release|x64
+		Release|x86 = Release|x86
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.ActiveCfg = Debug|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.Build.0 = Debug|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.ActiveCfg = Debug|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.Build.0 = Debug|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.ActiveCfg = Release|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.Build.0 = Release|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.ActiveCfg = Release|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.Build.0 = Release|Win32
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal
diff --git a/resources/3rdparty/sparsepp/makefile b/resources/3rdparty/sparsepp/makefile
deleted file mode 100644
index eed3e5bca..000000000
--- a/resources/3rdparty/sparsepp/makefile
+++ /dev/null
@@ -1,17 +0,0 @@
-all: spp_test 
-
-clean: 
-	/bin/rm spp_test
-
-test:
-	./spp_test
-
-spp_test: spp_test.cc sparsepp.h makefile
-	$(CXX) -O2 -std=c++0x -Wall -pedantic -Wextra -D_XOPEN_SOURCE=700 -D_CRT_SECURE_NO_WARNINGS spp_test.cc -o spp_test
-
-spp_alloc_test: spp_alloc_test.cc spp_alloc.h spp_bitset.h sparsepp.h makefile
-	$(CXX) -O2 -DNDEBUG  -std=c++11  spp_alloc_test.cc -o spp_alloc_test
-
-perftest1: perftest1.cc sparsepp.h makefile
-	$(CXX) -O2 -DNDEBUG  -std=c++11 perftest1.cc -o perftest1
-
diff --git a/resources/3rdparty/sparsepp/sparsepp.h b/resources/3rdparty/sparsepp/sparsepp/spp.h
old mode 100644
new mode 100755
similarity index 69%
rename from resources/3rdparty/sparsepp/sparsepp.h
rename to resources/3rdparty/sparsepp/sparsepp/spp.h
index 8fc36ce47..abd4295e9
--- a/resources/3rdparty/sparsepp/sparsepp.h
+++ b/resources/3rdparty/sparsepp/sparsepp/spp.h
@@ -5,12 +5,12 @@
 // ----------------------------------------------------------------------
 // Copyright (c) 2016, Gregory Popovitch - greg7mdp@gmail.com
 // All rights reserved.
-// 
+//
 // This work is derived from Google's sparsehash library
 //
 // Copyright (c) 2005, Google Inc.
 // All rights reserved.
-// 
+//
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -38,802 +38,11 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 // ----------------------------------------------------------------------
 
- 
-// ---------------------------------------------------------------------------
-// Compiler detection code (SPP_ proprocessor macros) derived from Boost 
-// libraries. Therefore Boost software licence reproduced below.
-// ---------------------------------------------------------------------------
-// Boost Software License - Version 1.0 - August 17th, 2003
-// 
-// Permission is hereby granted, free of charge, to any person or organization
-// obtaining a copy of the software and accompanying documentation covered by
-// this license (the "Software") to use, reproduce, display, distribute,
-// execute, and transmit the Software, and to prepare derivative works of the
-// Software, and to permit third-parties to whom the Software is furnished to
-// do so, all subject to the following:
-// 
-// The copyright notices in the Software and this entire statement, including
-// the above license grant, this restriction and the following disclaimer,
-// must be included in all copies of the Software, in whole or in part, and
-// all derivative works of the Software, unless such copies or derivative
-// works are solely in the form of machine-executable object code generated by
-// a source language processor.
-// 
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
-// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
-// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
-// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
-// DEALINGS IN THE SOFTWARE.
-// ---------------------------------------------------------------------------
-
 
 // some macros for portability
 // ---------------------------
-#define spp_ spp
-#define SPP_NAMESPACE spp_
-#define SPP_START_NAMESPACE   namespace spp {
-#define SPP_END_NAMESPACE     }
-#define SPP_GROUP_SIZE 32     // must be 32 or 64
-#define SPP_ALLOC_SZ 0        // must be power of 2 (0 = agressive alloc, 1 = smallest memory usage, 2 = good compromise)
-#define SPP_STORE_NUM_ITEMS 1 // little bit more memory, but faster!!
-
-#if (SPP_GROUP_SIZE == 32)
-    #define SPP_SHIFT_ 5
-    #define SPP_MASK_  0x1F    
-#elif (SPP_GROUP_SIZE == 64)
-    #define SPP_SHIFT_ 6
-    #define SPP_MASK_  0x3F
-#else
-    #error "SPP_GROUP_SIZE must be either 32 or 64"
-#endif
-
-// Boost like configuration
-// ------------------------
-#if defined __clang__ 
-
-    #if defined(i386)
-        #include <cpuid.h>
-        inline void spp_cpuid(int info[4], int InfoType) {
-            __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
-        }
-    #endif
-
-    #define SPP_POPCNT   __builtin_popcount
-    #define SPP_POPCNT64 __builtin_popcountll
-    
-    #define SPP_HAS_CSTDINT
-
-    #ifndef __has_extension
-        #define __has_extension __has_feature
-    #endif
-
-    #if !__has_feature(cxx_exceptions) && !defined(SPP_NO_EXCEPTIONS)
-        #define SPP_NO_EXCEPTIONS
-    #endif
-
-    #if !__has_feature(cxx_rtti) && !defined(SPP_NO_RTTI)
-      #define SPP_NO_RTTI
-    #endif
-
-    #if !__has_feature(cxx_rtti) && !defined(SPP_NO_TYPEID)
-        #define SPP_NO_TYPEID
-    #endif
-
-    #if defined(__int64) && !defined(__GNUC__)
-        #define SPP_HAS_MS_INT64
-    #endif
-
-    #define SPP_HAS_NRVO
-
-    // Branch prediction hints
-    #if defined(__has_builtin)
-        #if __has_builtin(__builtin_expect)
-             #define SPP_LIKELY(x) __builtin_expect(x, 1)
-             #define SPP_UNLIKELY(x) __builtin_expect(x, 0)
-        #endif
-    #endif
-
-    // Clang supports "long long" in all compilation modes.
-    #define SPP_HAS_LONG_LONG
-
-    #if !__has_feature(cxx_constexpr)
-        #define SPP_NO_CXX11_CONSTEXPR
-    #endif
-
-    #if !__has_feature(cxx_decltype)
-        #define SPP_NO_CXX11_DECLTYPE
-    #endif
-
-    #if !__has_feature(cxx_decltype_incomplete_return_types)
-        #define SPP_NO_CXX11_DECLTYPE_N3276
-    #endif
-
-    #if !__has_feature(cxx_defaulted_functions)
-        #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
-    #endif
-
-    #if !__has_feature(cxx_deleted_functions)
-        #define SPP_NO_CXX11_DELETED_FUNCTIONS
-    #endif
-
-    #if !__has_feature(cxx_explicit_conversions)
-        #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
-    #endif
-
-    #if !__has_feature(cxx_default_function_template_args)
-        #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
-    #endif
-
-    #if !__has_feature(cxx_generalized_initializers)
-        #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
-    #endif
-
-    #if !__has_feature(cxx_lambdas)
-        #define SPP_NO_CXX11_LAMBDAS
-    #endif
-
-    #if !__has_feature(cxx_local_type_template_args)
-        #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS
-    #endif
-
-    #if !__has_feature(cxx_nullptr)
-        #define SPP_NO_CXX11_NULLPTR
-    #endif
-
-    #if !__has_feature(cxx_range_for)
-        #define SPP_NO_CXX11_RANGE_BASED_FOR
-    #endif
-
-    #if !__has_feature(cxx_raw_string_literals)
-        #define SPP_NO_CXX11_RAW_LITERALS
-    #endif
-
-    #if !__has_feature(cxx_reference_qualified_functions)
-        #define SPP_NO_CXX11_REF_QUALIFIERS
-    #endif
-
-    #if !__has_feature(cxx_generalized_initializers)
-        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
-    #endif
-
-    #if !__has_feature(cxx_rvalue_references)
-        #define SPP_NO_CXX11_RVALUE_REFERENCES
-    #endif
-
-    #if !__has_feature(cxx_strong_enums)
-        #define SPP_NO_CXX11_SCOPED_ENUMS
-    #endif
-
-    #if !__has_feature(cxx_static_assert)
-        #define SPP_NO_CXX11_STATIC_ASSERT
-    #endif
-
-    #if !__has_feature(cxx_alias_templates)
-        #define SPP_NO_CXX11_TEMPLATE_ALIASES
-    #endif
-
-    #if !__has_feature(cxx_unicode_literals)
-        #define SPP_NO_CXX11_UNICODE_LITERALS
-    #endif
-
-    #if !__has_feature(cxx_variadic_templates)
-        #define SPP_NO_CXX11_VARIADIC_TEMPLATES
-    #endif
-
-    #if !__has_feature(cxx_user_literals)
-        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
-    #endif
-
-    #if !__has_feature(cxx_alignas)
-        #define SPP_NO_CXX11_ALIGNAS
-    #endif
-
-    #if !__has_feature(cxx_trailing_return)
-        #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
-    #endif
-
-    #if !__has_feature(cxx_inline_namespaces)
-        #define SPP_NO_CXX11_INLINE_NAMESPACES
-    #endif
-
-    #if !__has_feature(cxx_override_control)
-        #define SPP_NO_CXX11_FINAL
-    #endif
-
-    #if !(__has_feature(__cxx_binary_literals__) || __has_extension(__cxx_binary_literals__))
-        #define SPP_NO_CXX14_BINARY_LITERALS
-    #endif
-
-    #if !__has_feature(__cxx_decltype_auto__)
-        #define SPP_NO_CXX14_DECLTYPE_AUTO
-    #endif
-
-    #if !__has_feature(__cxx_aggregate_nsdmi__)
-        #define SPP_NO_CXX14_AGGREGATE_NSDMI
-    #endif
-
-    #if !__has_feature(__cxx_init_captures__)
-        #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
-    #endif
-
-    #if !__has_feature(__cxx_generic_lambdas__)
-        #define SPP_NO_CXX14_GENERIC_LAMBDAS
-    #endif
-
-
-    #if !__has_feature(__cxx_generic_lambdas__) || !__has_feature(__cxx_relaxed_constexpr__)
-        #define SPP_NO_CXX14_CONSTEXPR
-    #endif
-
-    #if !__has_feature(__cxx_return_type_deduction__)
-        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
-    #endif
-
-    #if !__has_feature(__cxx_variable_templates__)
-        #define SPP_NO_CXX14_VARIABLE_TEMPLATES
-    #endif
-
-    #if __cplusplus < 201400
-        #define SPP_NO_CXX14_DIGIT_SEPARATORS
-    #endif
-
-    #if defined(__has_builtin) && __has_builtin(__builtin_unreachable)
-      #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable();
-    #endif
-
-    #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__))
-
-    #ifndef SPP_COMPILER
-        #define SPP_COMPILER "Clang version " __clang_version__
-    #endif
-
-    #define SPP_CLANG 1
-
-
-#elif defined __GNUC__
-
-    #define SPP_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
-
-    //  definition to expand macro then apply to pragma message
-    // #define VALUE_TO_STRING(x) #x
-    // #define VALUE(x) VALUE_TO_STRING(x)
-    // #define VAR_NAME_VALUE(var) #var "="  VALUE(var)
-    // #pragma message(VAR_NAME_VALUE(SPP_GCC_VERSION))
-
-    #if defined(i386)
-        #include <cpuid.h>
-        inline void spp_cpuid(int info[4], int InfoType) {
-            __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
-        }
-    #endif
-
-    // __POPCNT__ defined when the compiled with popcount support 
-    // (-mpopcnt compiler option is given for example)
-    #ifdef __POPCNT__
-        // slower unless compiled iwith -mpopcnt
-        #define SPP_POPCNT   __builtin_popcount
-        #define SPP_POPCNT64 __builtin_popcountll
-    #endif
-
-    #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
-        #define SPP_GCC_CXX11
-    #endif
-
-    #if __GNUC__ == 3
-        #if defined (__PATHSCALE__)
-             #define SPP_NO_TWO_PHASE_NAME_LOOKUP
-             #define SPP_NO_IS_ABSTRACT
-        #endif
-
-        #if __GNUC_MINOR__ < 4
-             #define SPP_NO_IS_ABSTRACT
-        #endif
-
-        #define SPP_NO_CXX11_EXTERN_TEMPLATE
-    #endif
-
-    #if __GNUC__ < 4
-    //
-    // All problems to gcc-3.x and earlier here:
-    //
-    #define SPP_NO_TWO_PHASE_NAME_LOOKUP
-        #ifdef __OPEN64__
-            #define SPP_NO_IS_ABSTRACT
-        #endif
-    #endif
-
-    // GCC prior to 3.4 had     #pragma once too but it didn't work well with filesystem links
-    #if SPP_GCC_VERSION >= 30400
-        #define SPP_HAS_PRAGMA_ONCE
-    #endif
-
-    #if SPP_GCC_VERSION < 40400
-        // Previous versions of GCC did not completely implement value-initialization:
-        // GCC Bug 30111, "Value-initialization of POD base class doesn't initialize
-        // members", reported by Jonathan Wakely in 2006,
-        // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=30111 (fixed for GCC 4.4)
-        // GCC Bug 33916, "Default constructor fails to initialize array members",
-        // reported by Michael Elizabeth Chastain in 2007,
-        // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916 (fixed for GCC 4.2.4)
-        // See also: http://www.boost.org/libs/utility/value_init.htm    #compiler_issues
-        #define SPP_NO_COMPLETE_VALUE_INITIALIZATION
-    #endif
-
-    #if !defined(__EXCEPTIONS) && !defined(SPP_NO_EXCEPTIONS)
-        #define SPP_NO_EXCEPTIONS
-    #endif
-
-    //
-    // Threading support: Turn this on unconditionally here (except for
-    // those platforms where we can know for sure). It will get turned off again
-    // later if no threading API is detected.
-    //
-    #if !defined(__MINGW32__) && !defined(linux) && !defined(__linux) && !defined(__linux__)
-        #define SPP_HAS_THREADS
-    #endif
-
-    //
-    // gcc has "long long"
-    // Except on Darwin with standard compliance enabled (-pedantic)
-    // Apple gcc helpfully defines this macro we can query
-    //
-    #if !defined(__DARWIN_NO_LONG_LONG)
-        #define SPP_HAS_LONG_LONG
-    #endif
-
-    //
-    // gcc implements the named return value optimization since version 3.1
-    //
-    #define SPP_HAS_NRVO
-
-    // Branch prediction hints
-    #define SPP_LIKELY(x) __builtin_expect(x, 1)
-    #define SPP_UNLIKELY(x) __builtin_expect(x, 0)
-
-    //
-    // Dynamic shared object (DSO) and dynamic-link library (DLL) support
-    //
-    #if __GNUC__ >= 4
-       #if (defined(_WIN32) || defined(__WIN32__) || defined(WIN32)) && !defined(__CYGWIN__)
-            // All Win32 development environments, including 64-bit Windows and MinGW, define
-            // _WIN32 or one of its variant spellings. Note that Cygwin is a POSIX environment,
-            // so does not define _WIN32 or its variants.
-            #define SPP_HAS_DECLSPEC
-            #define SPP_SYMBOL_EXPORT __attribute__((__dllexport__))
-            #define SPP_SYMBOL_IMPORT __attribute__((__dllimport__))
-       #else
-            #define SPP_SYMBOL_EXPORT __attribute__((__visibility__("default")))
-            #define SPP_SYMBOL_IMPORT
-       #endif
-
-       #define SPP_SYMBOL_VISIBLE __attribute__((__visibility__("default")))
-    #else
-       // config/platform/win32.hpp will define SPP_SYMBOL_EXPORT, etc., unless already defined
-       #define SPP_SYMBOL_EXPORT
-    #endif
-
-    //
-    // RTTI and typeinfo detection is possible post gcc-4.3:
-    //
-    #if SPP_GCC_VERSION > 40300
-        #ifndef __GXX_RTTI
-            #ifndef SPP_NO_TYPEID
-                #define SPP_NO_TYPEID
-            #endif
-            #ifndef SPP_NO_RTTI
-                #define SPP_NO_RTTI
-            #endif
-        #endif
-    #endif
-
-    //
-    // Recent GCC versions have __int128 when in 64-bit mode.
-    //
-    // We disable this if the compiler is really nvcc with C++03 as it
-    // doesn't actually support __int128 as of CUDA_VERSION=7500
-    // even though it defines __SIZEOF_INT128__.
-    // See https://svn.boost.org/trac/boost/ticket/8048
-    //     https://svn.boost.org/trac/boost/ticket/11852
-    // Only re-enable this for nvcc if you're absolutely sure
-    // of the circumstances under which it's supported:
-    //
-    #if defined(__CUDACC__)
-        #if defined(SPP_GCC_CXX11)
-            #define SPP_NVCC_CXX11
-        #else
-            #define SPP_NVCC_CXX03
-        #endif
-    #endif
-
-    #if defined(__SIZEOF_INT128__) && !defined(SPP_NVCC_CXX03)
-        #define SPP_HAS_INT128
-    #endif
-    //
-    // Recent GCC versions have a __float128 native type, we need to
-    // include a std lib header to detect this - not ideal, but we'll
-    // be including <cstddef> later anyway when we select the std lib.
-    //
-    // Nevertheless, as of CUDA 7.5, using __float128 with the host
-    // compiler in pre-C++11 mode is still not supported.
-    // See https://svn.boost.org/trac/boost/ticket/11852
-    //
-    #ifdef __cplusplus
-        #include <cstddef>
-    #else
-        #include <stddef.h>
-    #endif
-
-    #if defined(_GLIBCXX_USE_FLOAT128) && !defined(__STRICT_ANSI__) && !defined(SPP_NVCC_CXX03)
-         #define SPP_HAS_FLOAT128
-    #endif
-
-    // C++0x features in 4.3.n and later
-    //
-    #if (SPP_GCC_VERSION >= 40300) && defined(SPP_GCC_CXX11)
-       // C++0x features are only enabled when -std=c++0x or -std=gnu++0x are
-       // passed on the command line, which in turn defines
-       // __GXX_EXPERIMENTAL_CXX0X__.
-       #define SPP_HAS_DECLTYPE
-       #define SPP_HAS_RVALUE_REFS
-       #define SPP_HAS_STATIC_ASSERT
-       #define SPP_HAS_VARIADIC_TMPL
-       #define SPP_HAS_CSTDINT
-    #else
-       #define SPP_NO_CXX11_DECLTYPE
-       #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
-       #define SPP_NO_CXX11_RVALUE_REFERENCES
-       #define SPP_NO_CXX11_STATIC_ASSERT
-    #endif
-
-    // C++0x features in 4.4.n and later
-    //
-    #if (SPP_GCC_VERSION < 40400) || !defined(SPP_GCC_CXX11)
-       #define SPP_NO_CXX11_AUTO_DECLARATIONS
-       #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS
-       #define SPP_NO_CXX11_CHAR16_T
-       #define SPP_NO_CXX11_CHAR32_T
-       #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
-       #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
-       #define SPP_NO_CXX11_DELETED_FUNCTIONS
-       #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
-       #define SPP_NO_CXX11_INLINE_NAMESPACES
-       #define SPP_NO_CXX11_VARIADIC_TEMPLATES
-    #endif
-
-    #if SPP_GCC_VERSION < 40500
-       #define SPP_NO_SFINAE_EXPR
-    #endif
-
-    // GCC 4.5 forbids declaration of defaulted functions in private or protected sections
-    #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 5) || !defined(SPP_GCC_CXX11)
-       #define SPP_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS
-    #endif
-
-    // C++0x features in 4.5.0 and later
-    //
-    #if (SPP_GCC_VERSION < 40500) || !defined(SPP_GCC_CXX11)
-       #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
-       #define SPP_NO_CXX11_LAMBDAS
-       #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS
-       #define SPP_NO_CXX11_RAW_LITERALS
-       #define SPP_NO_CXX11_UNICODE_LITERALS
-    #endif
-
-    // C++0x features in 4.5.1 and later
-    //
-    #if (SPP_GCC_VERSION < 40501) || !defined(SPP_GCC_CXX11)
-       // scoped enums have a serious bug in 4.4.0, so define SPP_NO_CXX11_SCOPED_ENUMS before 4.5.1
-       // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38064
-       #define SPP_NO_CXX11_SCOPED_ENUMS
-    #endif
-
-    // C++0x features in 4.6.n and later
-    //
-    #if (SPP_GCC_VERSION < 40600) || !defined(SPP_GCC_CXX11)
-        #define SPP_NO_CXX11_CONSTEXPR
-        #define SPP_NO_CXX11_NULLPTR
-        #define SPP_NO_CXX11_RANGE_BASED_FOR
-        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
-    #endif
-
-    // C++0x features in 4.7.n and later
-    //
-    #if (SPP_GCC_VERSION < 40700) || !defined(SPP_GCC_CXX11)
-        #define SPP_NO_CXX11_FINAL
-        #define SPP_NO_CXX11_TEMPLATE_ALIASES
-        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
-        #define SPP_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS
-    #endif
-
-    // C++0x features in 4.8.n and later
-    //
-    #if (SPP_GCC_VERSION < 40800) || !defined(SPP_GCC_CXX11)
-        #define SPP_NO_CXX11_ALIGNAS
-    #endif
-
-    // C++0x features in 4.8.1 and later
-    //
-    #if (SPP_GCC_VERSION < 40801) || !defined(SPP_GCC_CXX11)
-        #define SPP_NO_CXX11_DECLTYPE_N3276
-        #define SPP_NO_CXX11_REF_QUALIFIERS
-        #define SPP_NO_CXX14_BINARY_LITERALS
-    #endif
-
-    // C++14 features in 4.9.0 and later
-    //
-    #if (SPP_GCC_VERSION < 40900) || (__cplusplus < 201300)
-        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
-        #define SPP_NO_CXX14_GENERIC_LAMBDAS
-        #define SPP_NO_CXX14_DIGIT_SEPARATORS
-        #define SPP_NO_CXX14_DECLTYPE_AUTO
-        #if !((SPP_GCC_VERSION >= 40801) && (SPP_GCC_VERSION < 40900) && defined(SPP_GCC_CXX11))
-            #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
-        #endif
-    #endif
-
-
-    // C++ 14:
-    #if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)
-        #define SPP_NO_CXX14_AGGREGATE_NSDMI
-    #endif
-    #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)
-        #define SPP_NO_CXX14_CONSTEXPR
-    #endif
-    #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)
-        #define SPP_NO_CXX14_VARIABLE_TEMPLATES
-    #endif
-
-    //
-    // Unused attribute:
-    #if __GNUC__ >= 4
-        #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__))
-    #endif
-    //
-    // __builtin_unreachable:
-    #if SPP_GCC_VERSION >= 40800
-        #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable();
-    #endif
-
-    #ifndef SPP_COMPILER
-        #define SPP_COMPILER "GNU C++ version " __VERSION__
-    #endif
-
-    // ConceptGCC compiler:
-    //   http://www.generic-programming.org/software/ConceptGCC/
-    #ifdef __GXX_CONCEPTS__
-        #define SPP_HAS_CONCEPTS
-        #define SPP_COMPILER "ConceptGCC version " __VERSION__
-    #endif
-
-
-#elif defined _MSC_VER
-
-    #include <intrin.h>                     // for __popcnt()
-
-    #define SPP_POPCNT_CHECK  // slower when defined, but we have to check!
-    #define spp_cpuid(info, x)    __cpuid(info, x)
-
-    #define SPP_POPCNT __popcnt
-    #if (SPP_GROUP_SIZE == 64 && INTPTR_MAX == INT64_MAX)
-        #define SPP_POPCNT64 __popcnt64
-    #endif
-
-    // Attempt to suppress VC6 warnings about the length of decorated names (obsolete):
-    #pragma warning( disable : 4503 ) // warning: decorated name length exceeded
-
-    #define SPP_HAS_PRAGMA_ONCE
-    #define SPP_HAS_CSTDINT
-
-   //
-    // versions check:
-    // we don't support Visual C++ prior to version 7.1:
-    #if _MSC_VER < 1310
-        #error "Antique compiler not supported"
-    #endif
-
-    #if _MSC_FULL_VER < 180020827
-        #define SPP_NO_FENV_H
-    #endif
-
-    #if _MSC_VER < 1400
-        // although a conforming signature for swprint exists in VC7.1
-        // it appears not to actually work:
-        #define SPP_NO_SWPRINTF
-
-        // Our extern template tests also fail for this compiler:
-        #define SPP_NO_CXX11_EXTERN_TEMPLATE
-
-        // Variadic macros do not exist for VC7.1 and lower
-        #define SPP_NO_CXX11_VARIADIC_MACROS
-    #endif
-
-    #if _MSC_VER < 1500  // 140X == VC++ 8.0
-        #undef SPP_HAS_CSTDINT
-        #define SPP_NO_MEMBER_TEMPLATE_FRIENDS
-    #endif
-
-    #if _MSC_VER < 1600  // 150X == VC++ 9.0
-        // A bug in VC9:
-        #define SPP_NO_ADL_BARRIER
-    #endif
-
-
-    // MSVC (including the latest checked version) has not yet completely
-    // implemented value-initialization, as is reported:
-    // "VC++ does not value-initialize members of derived classes without
-    // user-declared constructor", reported in 2009 by Sylvester Hesp:
-    // https:    //connect.microsoft.com/VisualStudio/feedback/details/484295
-    // "Presence of copy constructor breaks member class initialization",
-    // reported in 2009 by Alex Vakulenko:
-    // https:    //connect.microsoft.com/VisualStudio/feedback/details/499606
-    // "Value-initialization in new-expression", reported in 2005 by
-    // Pavel Kuznetsov (MetaCommunications Engineering):
-    // https:    //connect.microsoft.com/VisualStudio/feedback/details/100744
-    // See also: http:    //www.boost.org/libs/utility/value_init.htm    #compiler_issues
-    // (Niels Dekker, LKEB, May 2010)
-    #define SPP_NO_COMPLETE_VALUE_INITIALIZATION
-
-    #ifndef _NATIVE_WCHAR_T_DEFINED
-        #define SPP_NO_INTRINSIC_WCHAR_T
-    #endif
-
-    //
-    // check for exception handling support:
-    #if !defined(_CPPUNWIND) && !defined(SPP_NO_EXCEPTIONS)
-        #define SPP_NO_EXCEPTIONS
-    #endif
-
-    //
-    // __int64 support:
-    //
-    #define SPP_HAS_MS_INT64
-    #if defined(_MSC_EXTENSIONS) || (_MSC_VER >= 1400)
-        #define SPP_HAS_LONG_LONG
-    #else
-        #define SPP_NO_LONG_LONG
-    #endif
-
-    #if (_MSC_VER >= 1400) && !defined(_DEBUG)
-        #define SPP_HAS_NRVO
-    #endif
-
-    #if _MSC_VER >= 1500  // 150X == VC++ 9.0
-        #define SPP_HAS_PRAGMA_DETECT_MISMATCH
-    #endif
-
-    //
-    // disable Win32 API's if compiler extensions are
-    // turned off:
-    //
-    #if !defined(_MSC_EXTENSIONS) && !defined(SPP_DISABLE_WIN32)
-        #define SPP_DISABLE_WIN32
-    #endif
-
-    #if !defined(_CPPRTTI) && !defined(SPP_NO_RTTI)
-        #define SPP_NO_RTTI
-    #endif
-
-    //
-    // TR1 features:
-    //
-    #if _MSC_VER >= 1700
-        //      #define SPP_HAS_TR1_HASH	// don't know if this is true yet.
-        //      #define SPP_HAS_TR1_TYPE_TRAITS	// don't know if this is true yet.
-        #define SPP_HAS_TR1_UNORDERED_MAP
-        #define SPP_HAS_TR1_UNORDERED_SET
-    #endif
-
-    //
-    // C++0x features
-    //
-    //   See above for SPP_NO_LONG_LONG
-
-    // C++ features supported by VC++ 10 (aka 2010)
-    //
-    #if _MSC_VER < 1600
-        #define SPP_NO_CXX11_AUTO_DECLARATIONS
-        #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS
-        #define SPP_NO_CXX11_LAMBDAS
-        #define SPP_NO_CXX11_RVALUE_REFERENCES
-        #define SPP_NO_CXX11_STATIC_ASSERT
-        #define SPP_NO_CXX11_NULLPTR
-        #define SPP_NO_CXX11_DECLTYPE
-    #endif // _MSC_VER < 1600
-
-    #if _MSC_VER >= 1600
-        #define SPP_HAS_STDINT_H
-    #endif
-
-    // C++11 features supported by VC++ 11 (aka 2012)
-    //
-    #if _MSC_VER < 1700
-        #define SPP_NO_CXX11_FINAL
-        #define SPP_NO_CXX11_RANGE_BASED_FOR
-        #define SPP_NO_CXX11_SCOPED_ENUMS
-    #endif // _MSC_VER < 1700
-
-    // C++11 features supported by VC++ 12 (aka 2013).
-    //
-    #if _MSC_FULL_VER < 180020827
-        #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
-        #define SPP_NO_CXX11_DELETED_FUNCTIONS
-        #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
-        #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
-        #define SPP_NO_CXX11_RAW_LITERALS
-        #define SPP_NO_CXX11_TEMPLATE_ALIASES
-        #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
-        #define SPP_NO_CXX11_VARIADIC_TEMPLATES
-        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
-        #define SPP_NO_CXX11_DECLTYPE_N3276
-    #endif
-
-    // C++11 features supported by VC++ 14 (aka 2014) CTP1
-    #if (_MSC_FULL_VER < 190021730)
-        #define SPP_NO_CXX11_REF_QUALIFIERS
-        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
-        #define SPP_NO_CXX11_ALIGNAS
-        #define SPP_NO_CXX11_INLINE_NAMESPACES
-        #define SPP_NO_CXX14_DECLTYPE_AUTO
-        #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
-        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
-        #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
-    #endif
-
-    // C++11 features not supported by any versions
-    #define SPP_NO_CXX11_CHAR16_T
-    #define SPP_NO_CXX11_CHAR32_T
-    #define SPP_NO_CXX11_CONSTEXPR
-    #define SPP_NO_CXX11_UNICODE_LITERALS
-    #define SPP_NO_SFINAE_EXPR
-    #define SPP_NO_TWO_PHASE_NAME_LOOKUP
-
-    // C++ 14:
-    #if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304)
-        #define SPP_NO_CXX14_AGGREGATE_NSDMI
-    #endif
-
-    #if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)
-        #define SPP_NO_CXX14_BINARY_LITERALS
-    #endif
-
-    #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)
-        #define SPP_NO_CXX14_CONSTEXPR
-    #endif
-
-    #if (__cplusplus < 201304) // There's no SD6 check for this....
-        #define SPP_NO_CXX14_DIGIT_SEPARATORS
-    #endif
-
-    #if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)
-        #define SPP_NO_CXX14_GENERIC_LAMBDAS
-    #endif
-
-    #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)
-         #define SPP_NO_CXX14_VARIABLE_TEMPLATES
-    #endif
-
-#endif
-
-// from boost/config/suffix.hpp
-// ----------------------------
-#ifndef SPP_ATTRIBUTE_UNUSED
-    #define SPP_ATTRIBUTE_UNUSED
-#endif
-
 // includes
 // --------
-#if defined(SPP_HAS_CSTDINT) && (__cplusplus >= 201103)
-    #include <cstdint>
-#else
-    #if defined(__FreeBSD__) || defined(__IBMCPP__) || defined(_AIX)
-        #include <inttypes.h>
-    #else
-        #include <stdint.h>
-    #endif
-#endif
-
 #include <cassert>
 #include <cstring>
 #include <string>
@@ -851,418 +60,77 @@
 #include <iosfwd>
 #include <ios>
 
+#include <sparsepp/spp_stdint.h>  // includes spp_config.h
+#include <sparsepp/spp_traits.h>
+#include <sparsepp/spp_utils.h>
+
+#ifdef SPP_INCLUDE_SPP_ALLOC
+    #include <sparsepp/spp_dlalloc.h>
+#endif
+
 #if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST)
     #include <initializer_list>
 #endif
 
 #if (SPP_GROUP_SIZE == 32)
+    #define SPP_SHIFT_ 5
+    #define SPP_MASK_  0x1F
     typedef uint32_t group_bm_type;
-#else
+#elif (SPP_GROUP_SIZE == 64)
+    #define SPP_SHIFT_ 6
+    #define SPP_MASK_  0x3F
     typedef uint64_t group_bm_type;
-#endif
-
-template<int S, int H> class HashObject; // for Google's benchmark, not in spp namespace!
-
-//  ----------------------------------------------------------------------
-//                  H A S H    F U N C T I O N S
-//                  ----------------------------
-//
-//    Implements spp::spp_hash() and spp::hash_combine()
-//
-//    This is exactly the content of spp_utils.h, except for the copyright 
-//    attributions at the beginning
-//
-//    WARNING: Any change here has to be duplicated in spp_utils.h.
-//  ----------------------------------------------------------------------
-
-#if !defined(spp_utils_h_guard_)
-#define spp_utils_h_guard_
-
-#if defined(_MSC_VER) 
-    #if (_MSC_VER >= 1600 )                      // vs2010 (1900 is vs2015)
-        #include <functional>
-        #define SPP_HASH_CLASS std::hash
-    #else
-        #include  <hash_map>
-        #define SPP_HASH_CLASS stdext::hash_compare
-    #endif
-    #if (_MSC_FULL_VER < 190021730)
-        #define SPP_NO_CXX11_NOEXCEPT
-    #endif
-#elif defined(__GNUC__)
-    #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
-        #include <functional>
-        #define SPP_HASH_CLASS std::hash
-
-        #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) < 40600
-            #define SPP_NO_CXX11_NOEXCEPT
-        #endif
-    #else
-        #include <tr1/unordered_map>
-        #define SPP_HASH_CLASS std::tr1::hash
-        #define SPP_NO_CXX11_NOEXCEPT
-    #endif
-#elif defined __clang__
-    #include <functional>
-    #define SPP_HASH_CLASS  std::hash
-
-    #if !__has_feature(cxx_noexcept)
-        #define SPP_NO_CXX11_NOEXCEPT
-    #endif
 #else
-    #include <functional>
-    #define SPP_HASH_CLASS  std::hash
-#endif
-
-#ifdef SPP_NO_CXX11_NOEXCEPT
-    #define SPP_NOEXCEPT
-#else
-    #define SPP_NOEXCEPT noexcept
-#endif
-
-#ifdef SPP_NO_CXX11_CONSTEXPR
-    #define SPP_CONSTEXPR
-#else
-    #define SPP_CONSTEXPR constexpr
-#endif
-
-#define SPP_INLINE
-
-#ifndef SPP_NAMESPACE
-    #define SPP_NAMESPACE spp
+    #error "SPP_GROUP_SIZE must be either 32 or 64"
 #endif
 
-namespace SPP_NAMESPACE
-{
-
-template <class T>
-struct spp_hash
-{
-    SPP_INLINE size_t operator()(const T &__v) const SPP_NOEXCEPT 
-    {
-        SPP_HASH_CLASS<T> hasher;
-        return hasher(__v);
-    }
-};
-
-template <class T>
-struct spp_hash<T *>
-{
-    static size_t spp_log2 (size_t val) SPP_NOEXCEPT 
-    {
-        size_t res = 0;
-        while (val > 1) 
-        {
-            val >>= 1;
-            res++;
-        }
-        return res;
-    }
-
-    SPP_INLINE size_t operator()(const T *__v) const SPP_NOEXCEPT 
-    {
-        static const size_t shift = 3; // spp_log2(1 + sizeof(T)); // T might be incomplete!
-        return static_cast<size_t>((*(reinterpret_cast<const uintptr_t *>(&__v))) >> shift);
-    }
-};
-
-// from http://burtleburtle.net/bob/hash/integer.html
-// fast and efficient for power of two table sizes where we always 
-// consider the last bits.
-// ---------------------------------------------------------------
-inline size_t spp_mix_32(uint32_t a)
-{
-    a = a ^ (a >> 4);
-    a = (a ^ 0xdeadbeef) + (a << 5);
-    a = a ^ (a >> 11);
-    return static_cast<size_t>(a);
-}
-
-// Maybe we should do a more thorough scrambling as described in 
-// https://gist.github.com/badboy/6267743
-// -------------------------------------------------------------
-inline size_t spp_mix_64(uint64_t a)
-{
-    a = a ^ (a >> 4);
-    a = (a ^ 0xdeadbeef) + (a << 5);
-    a = a ^ (a >> 11);
-    return a;
-}
-
-template <>
-struct spp_hash<bool> : public std::unary_function<bool, size_t>
-{
-    SPP_INLINE size_t operator()(bool __v) const SPP_NOEXCEPT 
-    { return static_cast<size_t>(__v); }
-};
-
-template <>
-struct spp_hash<char> : public std::unary_function<char, size_t>
-{
-    SPP_INLINE size_t operator()(char __v) const SPP_NOEXCEPT 
-    { return static_cast<size_t>(__v); }
-};
-
-template <>
-struct spp_hash<signed char> : public std::unary_function<signed char, size_t>
-{
-    SPP_INLINE size_t operator()(signed char __v) const SPP_NOEXCEPT 
-    { return static_cast<size_t>(__v); }
-};
-
-template <>
-struct spp_hash<unsigned char> : public std::unary_function<unsigned char, size_t>
-{
-    SPP_INLINE size_t operator()(unsigned char __v) const SPP_NOEXCEPT 
-    { return static_cast<size_t>(__v); }
-};
-
-template <>
-struct spp_hash<wchar_t> : public std::unary_function<wchar_t, size_t>
-{
-    SPP_INLINE size_t operator()(wchar_t __v) const SPP_NOEXCEPT 
-    { return static_cast<size_t>(__v); }
-};
-
-template <>
-struct spp_hash<int16_t> : public std::unary_function<int16_t, size_t>
-{
-    SPP_INLINE size_t operator()(int16_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_32(static_cast<uint32_t>(__v)); }
-};
-
-template <> 
-struct spp_hash<uint16_t> : public std::unary_function<uint16_t, size_t>
-{
-    SPP_INLINE size_t operator()(uint16_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_32(static_cast<uint32_t>(__v)); }
-};
-
-template <>
-struct spp_hash<int32_t> : public std::unary_function<int32_t, size_t>
-{
-    SPP_INLINE size_t operator()(int32_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_32(static_cast<uint32_t>(__v)); }
-};
-
-template <>
-struct spp_hash<uint32_t> : public std::unary_function<uint32_t, size_t>
-{
-    SPP_INLINE size_t operator()(uint32_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_32(static_cast<uint32_t>(__v)); }
-};
-
-template <>
-struct spp_hash<int64_t> : public std::unary_function<int64_t, size_t>
-{
-    SPP_INLINE size_t operator()(int64_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_64(static_cast<uint64_t>(__v)); }
-};
-
-template <>
-struct spp_hash<uint64_t> : public std::unary_function<uint64_t, size_t>
-{
-    SPP_INLINE size_t operator()(uint64_t __v) const SPP_NOEXCEPT 
-    { return spp_mix_64(static_cast<uint64_t>(__v)); }
-};
-
-template <>
-struct spp_hash<float> : public std::unary_function<float, size_t>
-{
-    SPP_INLINE size_t operator()(float __v) const SPP_NOEXCEPT
-    {
-        // -0.0 and 0.0 should return same hash
-        uint32_t *as_int = reinterpret_cast<uint32_t *>(&__v);
-        return (__v == 0) ? static_cast<size_t>(0) : spp_mix_32(*as_int);
-    }
-};
-
-template <>
-struct spp_hash<double> : public std::unary_function<double, size_t>
-{
-    SPP_INLINE size_t operator()(double __v) const SPP_NOEXCEPT
-    {
-        // -0.0 and 0.0 should return same hash
-        uint64_t *as_int = reinterpret_cast<uint64_t *>(&__v);
-        return (__v == 0) ? static_cast<size_t>(0) : spp_mix_64(*as_int);
-    }
-};
-
-template <class T, int sz> struct Combiner
-{
-    inline void operator()(T& seed, T value);
-};
-
-template <class T> struct Combiner<T, 4>
-{
-    inline void  operator()(T& seed, T value)
-    {
-        seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2);
-    }
-};
-
-template <class T> struct Combiner<T, 8>
-{
-    inline void  operator()(T& seed, T value)
-    {
-        seed ^= value + T(0xc6a4a7935bd1e995) + (seed << 6) + (seed >> 2);
-    }
-};
-
-template <class T>
-inline void hash_combine(std::size_t& seed, T const& v)
-{
-    spp::spp_hash<T> hasher;
-    Combiner<std::size_t, sizeof(std::size_t)> combiner;
-
-    combiner(seed, hasher(v));
-}
-    
-}
-
-#endif // spp_utils_h_guard_
-
-SPP_START_NAMESPACE
+namespace spp_ {
 
 //  ----------------------------------------------------------------------
 //                  U T I L    F U N C T I O N S
 //  ----------------------------------------------------------------------
 template <class E>
 inline void throw_exception(const E& exception)
-{
-#if !defined(SPP_NO_EXCEPTIONS)
-    throw exception;
-#else
-    assert(0);
-    abort();
-#endif
-}
-
-//  ----------------------------------------------------------------------
-//              M U T A B L E     P A I R      H A C K
-// turn mutable std::pair<K, V> into correct value_type std::pair<const K, V>
-//  ----------------------------------------------------------------------
-template <class T>
-struct cvt
-{
-    typedef T type;
-};
-
-template <class K, class V>
-struct cvt<std::pair<K, V> >
-{
-    typedef std::pair<const K, V> type;
-};
-
-template <class K, class V>
-struct cvt<const std::pair<K, V> >
-{
-    typedef const std::pair<const K, V> type;
-};
-
-//  ----------------------------------------------------------------------
-//              M O V E   I T E R A T O R
-//  ----------------------------------------------------------------------
-#ifdef SPP_NO_CXX11_RVALUE_REFERENCES
-    #define MK_MOVE_IT(p) (p)
-#else
-    #define MK_MOVE_IT(p) std::make_move_iterator(p)
-#endif
-
-
-//  ----------------------------------------------------------------------
-//                  A L L O C A T O R     S T U F F 
-//  ----------------------------------------------------------------------
-template<class T>
-class libc_allocator_with_realloc 
-{
-public:
-    typedef T value_type;
-    typedef size_t size_type;
-    typedef ptrdiff_t difference_type;
-
-    typedef T* pointer;
-    typedef const T* const_pointer;
-    typedef T& reference;
-    typedef const T& const_reference;
-
-    libc_allocator_with_realloc() {}
-    libc_allocator_with_realloc(const libc_allocator_with_realloc& /*unused*/) {}
-    ~libc_allocator_with_realloc() {}
-
-    pointer address(reference r) const  { return &r; }
-    const_pointer address(const_reference r) const  { return &r; }
-
-    pointer allocate(size_type n, const_pointer  /*unused*/= 0) 
-    {
-        return static_cast<pointer>(malloc(n * sizeof(value_type)));
-    }
-
-    void deallocate(pointer p, size_type /*unused*/) 
-    {
-        free(p);
-    }
-
-    pointer reallocate(pointer p, size_type n) 
-    {
-        return static_cast<pointer>(realloc(p, n * sizeof(value_type)));
-    }
-
-    size_type max_size() const  
-    {
-        return static_cast<size_type>(-1) / sizeof(value_type);
-    }
-
-    void construct(pointer p, const value_type& val) 
-    {
-        new(p) value_type(val);
-    }
-
-    void destroy(pointer p) { p->~value_type(); }
-
-    template <class U>
-    explicit libc_allocator_with_realloc(const libc_allocator_with_realloc<U>& /*unused*/) {}
-
-    template<class U>
-    struct rebind 
-    {
-        typedef libc_allocator_with_realloc<U> other;
-    };
-};
+{
+#if !defined(SPP_NO_EXCEPTIONS)
+    throw exception;
+#else
+    assert(0);
+    abort();
+#endif
+}
 
 //  ----------------------------------------------------------------------
-// libc_allocator_with_realloc<void> specialization.
+//              M U T A B L E     P A I R      H A C K
+// turn std::pair<const K, V> into mutable std::pair<K, V>
 //  ----------------------------------------------------------------------
-template<>
-class libc_allocator_with_realloc<void> 
+template <class T>
+struct cvt
 {
-public:
-    typedef void value_type;
-    typedef size_t size_type;
-    typedef ptrdiff_t difference_type;
-    typedef void* pointer;
-    typedef const void* const_pointer;
-
-    template<class U>
-    struct rebind 
-    {
-        typedef libc_allocator_with_realloc<U> other;
-    };
+    typedef T type;
 };
 
-template<class T>
-inline bool operator==(const libc_allocator_with_realloc<T>& /*unused*/,
-                       const libc_allocator_with_realloc<T>& /*unused*/)
+template <class K, class V>
+struct cvt<std::pair<const K, V> >
 {
-    return true;
-}
+    typedef std::pair<K, V> type;
+};
 
-template<class T>
-inline bool operator!=(const libc_allocator_with_realloc<T>& /*unused*/,
-                       const libc_allocator_with_realloc<T>& /*unused*/)
+template <class K, class V>
+struct cvt<const std::pair<const K, V> >
 {
-    return false;
-}
+    typedef const std::pair<K, V> type;
+};
+
+//  ----------------------------------------------------------------------
+//              M O V E   I T E R A T O R
+//  ----------------------------------------------------------------------
+#ifdef SPP_NO_CXX11_RVALUE_REFERENCES
+    #define MK_MOVE_IT(p) (p)
+#else
+    #define MK_MOVE_IT(p) std::make_move_iterator(p)
+#endif
+
 
 //  ----------------------------------------------------------------------
 //             I N T E R N A L    S T U F F
@@ -1275,7 +143,7 @@ inline bool operator!=(const libc_allocator_with_realloc<T>& /*unused*/,
     #define SPP_COMPILE_ASSERT static_assert
 #endif
 
-namespace sparsehash_internal 
+namespace sparsehash_internal
 {
 
 // Adaptor methods for reading/writing data from an INPUT or OUPTUT
@@ -1308,14 +176,14 @@ namespace sparsehash_internal
 
     template<typename Ignored>
     inline bool read_data_internal(Ignored* /*unused*/, FILE* fp,
-                                   void* data, size_t length) 
+                                   void* data, size_t length)
     {
         return fread(data, length, 1, fp) == 1;
     }
 
     template<typename Ignored>
     inline bool write_data_internal(Ignored* /*unused*/, FILE* fp,
-                                    const void* data, size_t length) 
+                                    const void* data, size_t length)
     {
         return fwrite(data, length, 1, fp) == 1;
     }
@@ -1328,23 +196,23 @@ namespace sparsehash_internal
     // the istream/ostream is a template type.  So we jump through hoops.
     template<typename ISTREAM>
     inline bool read_data_internal_for_istream(ISTREAM* fp,
-                                               void* data, size_t length) 
+                                               void* data, size_t length)
     {
-        return fp->read(reinterpret_cast<char*>(data), 
+        return fp->read(reinterpret_cast<char*>(data),
                         static_cast<std::streamsize>(length)).good();
     }
     template<typename Ignored>
     inline bool read_data_internal(Ignored* /*unused*/, std::istream* fp,
-                                   void* data, size_t length) 
+                                   void* data, size_t length)
     {
         return read_data_internal_for_istream(fp, data, length);
     }
 
     template<typename OSTREAM>
     inline bool write_data_internal_for_ostream(OSTREAM* fp,
-                                                const void* data, size_t length) 
+                                                const void* data, size_t length)
     {
-        return fp->write(reinterpret_cast<const char*>(data), 
+        return fp->write(reinterpret_cast<const char*>(data),
                          static_cast<std::streamsize>(length)).good();
     }
     template<typename Ignored>
@@ -1360,7 +228,7 @@ namespace sparsehash_internal
     // buffer and a length and returns the number of bytes read.
     template <typename INPUT>
     inline bool read_data_internal(INPUT* fp, void* /*unused*/,
-                                   void* data, size_t length) 
+                                   void* data, size_t length)
     {
         return static_cast<size_t>(fp->Read(data, length)) == length;
     }
@@ -1369,7 +237,7 @@ namespace sparsehash_internal
     // a buffer and a length and returns the number of bytes written.
     template <typename OUTPUT>
     inline bool write_data_internal(OUTPUT* fp, void* /*unused*/,
-                                    const void* data, size_t length) 
+                                    const void* data, size_t length)
     {
         return static_cast<size_t>(fp->Write(data, length)) == length;
     }
@@ -1377,13 +245,13 @@ namespace sparsehash_internal
     // ----- low-level I/O: the public API ----
 
     template <typename INPUT>
-    inline bool read_data(INPUT* fp, void* data, size_t length) 
+    inline bool read_data(INPUT* fp, void* data, size_t length)
     {
         return read_data_internal(fp, fp, data, length);
     }
 
     template <typename OUTPUT>
-    inline bool write_data(OUTPUT* fp, const void* data, size_t length) 
+    inline bool write_data(OUTPUT* fp, const void* data, size_t length)
     {
         return write_data_internal(fp, fp, data, length);
     }
@@ -1395,15 +263,15 @@ namespace sparsehash_internal
     // INPUT and OUTPUT must match legal inputs to read/write_data (above).
     // --------------------------------------------------------------------
     template <typename INPUT, typename IntType>
-    bool read_bigendian_number(INPUT* fp, IntType* value, size_t length) 
+    bool read_bigendian_number(INPUT* fp, IntType* value, size_t length)
     {
         *value = 0;
         unsigned char byte;
         // We require IntType to be unsigned or else the shifting gets all screwy.
         SPP_COMPILE_ASSERT(static_cast<IntType>(-1) > static_cast<IntType>(0), "serializing_int_requires_an_unsigned_type");
-        for (size_t i = 0; i < length; ++i) 
+        for (size_t i = 0; i < length; ++i)
         {
-            if (!read_data(fp, &byte, sizeof(byte))) 
+            if (!read_data(fp, &byte, sizeof(byte)))
                 return false;
             *value |= static_cast<IntType>(byte) << ((length - 1 - i) * 8);
         }
@@ -1411,12 +279,12 @@ namespace sparsehash_internal
     }
 
     template <typename OUTPUT, typename IntType>
-    bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length) 
+    bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length)
     {
         unsigned char byte;
         // We require IntType to be unsigned or else the shifting gets all screwy.
         SPP_COMPILE_ASSERT(static_cast<IntType>(-1) > static_cast<IntType>(0), "serializing_int_requires_an_unsigned_type");
-        for (size_t i = 0; i < length; ++i) 
+        for (size_t i = 0; i < length; ++i)
         {
             byte = (sizeof(value) <= length-1 - i)
                 ? static_cast<unsigned char>(0) : static_cast<unsigned char>((value >> ((length-1 - i) * 8)) & 255);
@@ -1431,16 +299,16 @@ namespace sparsehash_internal
     // however, we don't try to normalize endianness.
     // This is the type used for NopointerSerializer.
     // ---------------------------------------------------------------
-    template <typename value_type> struct pod_serializer 
+    template <typename value_type> struct pod_serializer
     {
         template <typename INPUT>
-        bool operator()(INPUT* fp, value_type* value) const 
+        bool operator()(INPUT* fp, value_type* value) const
         {
             return read_data(fp, value, sizeof(*value));
         }
 
         template <typename OUTPUT>
-        bool operator()(OUTPUT* fp, const value_type& value) const 
+        bool operator()(OUTPUT* fp, const value_type& value) const
         {
             return write_data(fp, &value, sizeof(value));
         }
@@ -1450,11 +318,11 @@ namespace sparsehash_internal
     // Settings contains parameters for growing and shrinking the table.
     // It also packages zero-size functor (ie. hasher).
     //
-    // It does some munging of the hash value for the cases where 
+    // It does some munging of the hash value for the cases where
     // the original hash function is not be very good.
     // ---------------------------------------------------------------
     template<typename Key, typename HashFunc, typename SizeType, int HT_MIN_BUCKETS>
-    class sh_hashtable_settings : public HashFunc 
+    class sh_hashtable_settings : public HashFunc
     {
     private:
 #ifndef SPP_MIX_HASH
@@ -1514,13 +382,13 @@ namespace sparsehash_internal
               enlarge_threshold_(0),
               shrink_threshold_(0),
               consider_shrink_(false),
-              num_ht_copies_(0) 
+              num_ht_copies_(0)
         {
             set_enlarge_factor(ht_occupancy_flt);
             set_shrink_factor(ht_empty_flt);
         }
 
-        size_t hash(const key_type& v) const 
+        size_t hash(const key_type& v) const
         {
             size_t h = hasher::operator()(v);
             Mixer<size_t, sizeof(size_t)> mixer;
@@ -1548,7 +416,7 @@ namespace sparsehash_internal
         void inc_num_ht_copies()                { ++num_ht_copies_; }
 
         // Reset the enlarge and shrink thresholds
-        void reset_thresholds(size_type num_buckets) 
+        void reset_thresholds(size_type num_buckets)
         {
             set_enlarge_threshold(enlarge_size(num_buckets));
             set_shrink_threshold(shrink_size(num_buckets));
@@ -1559,10 +427,10 @@ namespace sparsehash_internal
         // Caller is resposible for calling reset_threshold right after
         // set_resizing_parameters.
         // ------------------------------------------------------------
-        void set_resizing_parameters(float shrink, float grow) 
+        void set_resizing_parameters(float shrink, float grow)
         {
-            assert(shrink >= 0.0f);
-            assert(grow <= 1.0f);
+            assert(shrink >= 0);
+            assert(grow <= 1);
             if (shrink > grow/2.0f)
                 shrink = grow / 2.0f;     // otherwise we thrash hashtable size
             set_shrink_factor(shrink);
@@ -1572,7 +440,7 @@ namespace sparsehash_internal
         // This is the smallest size a hashtable can be without being too crowded
         // If you like, you can give a min #buckets as well as a min #elts
         // ----------------------------------------------------------------------
-        size_type min_buckets(size_type num_elts, size_type min_buckets_wanted) 
+        size_type min_buckets(size_type num_elts, size_type min_buckets_wanted)
         {
             float enlarge = enlarge_factor();
             size_type sz = HT_MIN_BUCKETS;             // min buckets allowed
@@ -1595,7 +463,7 @@ namespace sparsehash_internal
         float enlarge_factor_;         // how full before resize
         float shrink_factor_;          // how empty before resize
         bool consider_shrink_;         // if we should try to shrink before next insert
-                                       
+
         unsigned int num_ht_copies_;   // num_ht_copies is a counter incremented every Copy/Move
     };
 
@@ -1636,7 +504,7 @@ namespace sparsehash_internal
 // T           The value of the array: the type of   --
 //             object that is stored in the array.
 //
-// Alloc:      Allocator to use to allocate memory.  libc_allocator_with_realloc
+// Alloc:      Allocator to use to allocate memory.
 //
 // --- Model of
 // Random Access Container
@@ -1669,94 +537,6 @@ namespace sparsehash_internal
 // the table changes (ie resize() or clear() is used).
 
 
-// ---------------------------------------------------------------------------
-//                       type_traits we need
-// ---------------------------------------------------------------------------
-template<class T, T v>
-struct integral_constant { static const T value = v; };
-
-template <class T, T v> const T integral_constant<T, v>::value;
-
-typedef integral_constant<bool, true>  true_type;
-typedef integral_constant<bool, false> false_type;
-
-template<typename T, typename U> struct is_same : public false_type { };
-template<typename T> struct is_same<T, T> : public true_type { };
-
-template<typename T> struct remove_const { typedef T type; };
-template<typename T> struct remove_const<T const> { typedef T type; };
-
-template<typename T> struct remove_volatile { typedef T type; };
-template<typename T> struct remove_volatile<T volatile> { typedef T type; };
-
-template<typename T> struct remove_cv {
-    typedef typename remove_const<typename remove_volatile<T>::type>::type type;
-};
-
-// ---------------- is_integral ----------------------------------------
-template <class T> struct is_integral;
-template <class T> struct is_integral         : false_type { };
-template<> struct is_integral<bool>           : true_type { };
-template<> struct is_integral<char>           : true_type { };
-template<> struct is_integral<unsigned char>  : true_type { };
-template<> struct is_integral<signed char>    : true_type { };
-template<> struct is_integral<short>          : true_type { };
-template<> struct is_integral<unsigned short> : true_type { };
-template<> struct is_integral<int>            : true_type { };
-template<> struct is_integral<unsigned int>   : true_type { };
-template<> struct is_integral<long>           : true_type { };
-template<> struct is_integral<unsigned long>  : true_type { };
-#ifdef SPP_HAS_LONG_LONG
-    template<> struct is_integral<long long>  : true_type { };
-    template<> struct is_integral<unsigned long long> : true_type { };
-#endif
-template <class T> struct is_integral<const T>          : is_integral<T> { };
-template <class T> struct is_integral<volatile T>       : is_integral<T> { };
-template <class T> struct is_integral<const volatile T> : is_integral<T> { };
-
-// ---------------- is_floating_point ----------------------------------------
-template <class T> struct is_floating_point;
-template <class T> struct is_floating_point      : false_type { };
-template<> struct is_floating_point<float>       : true_type { };
-template<> struct is_floating_point<double>      : true_type { };
-template<> struct is_floating_point<long double> : true_type { };
-template <class T> struct is_floating_point<const T> :        is_floating_point<T> { };
-template <class T> struct is_floating_point<volatile T>       : is_floating_point<T> { };
-template <class T> struct is_floating_point<const volatile T> : is_floating_point<T> { };
-
-//  ---------------- is_pointer ----------------------------------------
-template <class T> struct is_pointer;
-template <class T> struct is_pointer     : false_type { };
-template <class T> struct is_pointer<T*> : true_type { };
-template <class T> struct is_pointer<const T>          : is_pointer<T> { };
-template <class T> struct is_pointer<volatile T>       : is_pointer<T> { };
-template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
-
-//  ---------------- is_reference ----------------------------------------
-template <class T> struct is_reference;
-template<typename T> struct is_reference     : false_type {};
-template<typename T> struct is_reference<T&> : true_type {};
-
-//  ---------------- is_relocatable ----------------------------------------
-// relocatable values can be moved around in memory using memcpy and remain 
-// correct. Most types are relocatable, an example of a type who is not would 
-// be a struct which contains a pointer to a buffer inside itself - this is the 
-// case for std::string in gcc 5.
-// ------------------------------------------------------------------------
-template <class T> struct is_relocatable;
-template <class T> struct is_relocatable : 
-     integral_constant<bool, (is_integral<T>::value || is_floating_point<T>::value)> 
-{ };
-
-template<int S, int H> struct is_relocatable<HashObject<S, H> > : true_type { };
-
-template <class T> struct is_relocatable<const T>          : is_relocatable<T> { };
-template <class T> struct is_relocatable<volatile T>       : is_relocatable<T> { };
-template <class T> struct is_relocatable<const volatile T> : is_relocatable<T> { };
-template <class A, int N> struct is_relocatable<A[N]>      : is_relocatable<A> { };
-template <class T, class U> struct is_relocatable<std::pair<T, U> > : 
-     integral_constant<bool, (is_relocatable<T>::value && is_relocatable<U>::value)> 
-{ };
 
 // ---------------------------------------------------------------------------
 // Our iterator as simple as iterators can be: basically it's just
@@ -1791,7 +571,7 @@ template <class T, class U> struct is_relocatable<std::pair<T, U> > :
 // ---------------------------------------------------------------------------
 // ---------------------------------------------------------------------------
 template <class tabletype>
-class table_iterator 
+class table_iterator
 {
 public:
     typedef table_iterator iterator;
@@ -1801,12 +581,12 @@ public:
     typedef typename tabletype::difference_type  difference_type;
     typedef typename tabletype::size_type        size_type;
 
-    explicit table_iterator(tabletype *tbl = 0, size_type p = 0) : 
-        table(tbl), pos(p) 
+    explicit table_iterator(tabletype *tbl = 0, size_type p = 0) :
+        table(tbl), pos(p)
     { }
 
     // Helper function to assert things are ok; eg pos is still in range
-    void check() const 
+    void check() const
     {
         assert(table);
         assert(pos <= table->size());
@@ -1818,38 +598,39 @@ public:
     iterator& operator-=(size_type t) { pos -= t; check(); return *this; }
     iterator& operator++()            { ++pos; check(); return *this; }
     iterator& operator--()            { --pos; check(); return *this; }
-    iterator operator++(int)         
+    iterator operator++(int)
     {
         iterator tmp(*this);     // for x++
         ++pos; check(); return tmp;
     }
 
-    iterator operator--(int)          
+    iterator operator--(int)
     {
         iterator tmp(*this);     // for x--
         --pos; check(); return tmp;
     }
 
-    iterator operator+(difference_type i) const  
+    iterator operator+(difference_type i) const
     {
         iterator tmp(*this);
-        tmp += i; return tmp; 
+        tmp += i; return tmp;
     }
 
-    iterator operator-(difference_type i) const  
+    iterator operator-(difference_type i) const
     {
         iterator tmp(*this);
         tmp -= i; return tmp;
     }
 
-    difference_type operator-(iterator it) const 
-    {      // for "x = it2 - it"
+    difference_type operator-(iterator it) const
+    {
+        // for "x = it2 - it"
         assert(table == it.table);
         return pos - it.pos;
     }
 
     // Comparisons.
-    bool operator==(const iterator& it) const 
+    bool operator==(const iterator& it) const
     {
         return table == it.table && pos == it.pos;
     }
@@ -1873,7 +654,7 @@ public:
 // ---------------------------------------------------------------------------
 // ---------------------------------------------------------------------------
 template <class tabletype>
-class const_table_iterator 
+class const_table_iterator
 {
 public:
     typedef table_iterator<tabletype> iterator;
@@ -1907,7 +688,7 @@ public:
     pointer operator->() const        { return &(operator*()); }
 
     // Helper function to assert things are ok; eg pos is still in range
-    void check() const 
+    void check() const
     {
         assert(table);
         assert(pos <= table->size());
@@ -1919,24 +700,33 @@ public:
     const_iterator& operator-=(size_type t) { pos -= t; check(); return *this; }
     const_iterator& operator++()            { ++pos; check(); return *this; }
     const_iterator& operator--()            { --pos; check(); return *this; }
-    const_iterator operator++(int)          { const_iterator tmp(*this); // for x++
-        ++pos; check(); return tmp; }
-    const_iterator operator--(int)          { const_iterator tmp(*this); // for x--
-        --pos; check(); return tmp; }
-    const_iterator operator+(difference_type i) const  
+    const_iterator operator++(int)          
+    {
+        const_iterator tmp(*this); // for x++
+        ++pos; check(); 
+        return tmp; 
+    }
+    const_iterator operator--(int)          
+    {
+        const_iterator tmp(*this); // for x--
+        --pos; check(); 
+        return tmp;
+    }
+    const_iterator operator+(difference_type i) const
     {
         const_iterator tmp(*this);
         tmp += i;
-        return tmp; 
+        return tmp;
     }
-    const_iterator operator-(difference_type i) const 
+    const_iterator operator-(difference_type i) const
     {
         const_iterator tmp(*this);
-        tmp -= i; 
-        return tmp; 
+        tmp -= i;
+        return tmp;
     }
     difference_type operator-(const_iterator it) const
-    {   // for "x = it2 - it"
+    {
+        // for "x = it2 - it"
         assert(table == it.table);
         return pos - it.pos;
     }
@@ -1951,7 +741,7 @@ public:
         return table == it.table && pos == it.pos;
     }
 
-    bool operator<(const const_iterator& it) const 
+    bool operator<(const const_iterator& it) const
     {
         assert(table == it.table);              // life is bad bad bad otherwise
         return pos < it.pos;
@@ -1992,30 +782,25 @@ class Two_d_iterator : public std::iterator<iter_type, T>
 {
 public:
     typedef Two_d_iterator iterator;
-
-    // T can be std::pair<K, V>, but we need to return std::pair<const K, V>
-    // ---------------------------------------------------------------------
-    typedef typename spp_::cvt<T>::type value_type;
-    typedef value_type&                reference;
-    typedef value_type*                pointer;
+    typedef T              value_type;
 
     explicit Two_d_iterator(row_it curr) : row_current(curr), col_current(0)
     {
-        if (row_current && !row_current->is_marked()) 
+        if (row_current && !row_current->is_marked())
         {
             col_current = row_current->ne_begin();
             advance_past_end();                 // in case cur->begin() == cur->end()
         }
     }
 
-    explicit Two_d_iterator(row_it curr, col_it col) : row_current(curr), col_current(col) 
+    explicit Two_d_iterator(row_it curr, col_it col) : row_current(curr), col_current(col)
     {
         assert(col);
     }
 
     // The default constructor
     Two_d_iterator() :  row_current(0), col_current(0) { }
-    
+
     // Need this explicitly so we can convert normal iterators <=> const iterators
     // not explicit on purpose
     // ---------------------------------------------------------------------------
@@ -2028,18 +813,18 @@ public:
     // The default destructor is fine; we don't define one
     // The default operator= is fine; we don't define one
 
-    reference operator*() const    { return *(col_current); }
-    pointer operator->() const     { return &(operator*()); }
+    value_type& operator*() const  { return *(col_current); }
+    value_type* operator->() const { return &(operator*()); }
 
     // Arithmetic: we just do arithmetic on pos.  We don't even need to
     // do bounds checking, since STL doesn't consider that its job.  :-)
     // NOTE: this is not amortized constant time!  What do we do about it?
     // ------------------------------------------------------------------
-    void advance_past_end() 
-    {   
+    void advance_past_end()
+    {
         // used when col_current points to end()
-        while (col_current == row_current->ne_end()) 
-        { 
+        while (col_current == row_current->ne_end())
+        {
             // end of current row
             // ------------------
             ++row_current;                                // go to beginning of next
@@ -2063,8 +848,8 @@ public:
         }
         return diff;
     }
-        
-    iterator& operator++() 
+
+    iterator& operator++()
     {
         // assert(!row_current->is_marked());               // how to ++ from there?
         ++col_current;
@@ -2072,10 +857,10 @@ public:
         return *this;
     }
 
-    iterator& operator--() 
+    iterator& operator--()
     {
         while (row_current->is_marked() ||
-               col_current == row_current->ne_begin()) 
+               col_current == row_current->ne_begin())
         {
             --row_current;
             col_current = row_current->ne_end();             // this is 1 too far
@@ -2088,7 +873,7 @@ public:
 
 
     // Comparisons.
-    bool operator==(const iterator& it) const 
+    bool operator==(const iterator& it) const
     {
         return (row_current == it.row_current &&
                 (!row_current || row_current->is_marked() || col_current == it.col_current));
@@ -2112,12 +897,12 @@ class Two_d_destructive_iterator : public Two_d_iterator<T, row_it, col_it, iter
 public:
     typedef Two_d_destructive_iterator iterator;
 
-    Two_d_destructive_iterator(Alloc &alloc, row_it curr) : 
+    Two_d_destructive_iterator(Alloc &alloc, row_it curr) :
         _alloc(alloc)
     {
         this->row_current = curr;
         this->col_current = 0;
-        if (this->row_current && !this->row_current->is_marked()) 
+        if (this->row_current && !this->row_current->is_marked())
         {
             this->col_current = this->row_current->ne_begin();
             advance_past_end();                 // in case cur->begin() == cur->end()
@@ -2128,11 +913,11 @@ public:
     // do bounds checking, since STL doesn't consider that its job.  :-)
     // NOTE: this is not amortized constant time!  What do we do about it?
     // ------------------------------------------------------------------
-    void advance_past_end() 
-    {   
+    void advance_past_end()
+    {
         // used when col_current points to end()
-        while (this->col_current == this->row_current->ne_end()) 
-        { 
+        while (this->col_current == this->row_current->ne_end())
+        {
             this->row_current->clear(_alloc, true);  // This is what differs from non-destructive iterators above
 
             // end of current row
@@ -2145,7 +930,7 @@ public:
         }
     }
 
-    iterator& operator++() 
+    iterator& operator++()
     {
         // assert(!this->row_current->is_marked());         // how to ++ from there?
         ++this->col_current;
@@ -2162,71 +947,6 @@ private:
 
 // ---------------------------------------------------------------------------
 // ---------------------------------------------------------------------------
-static const char spp_bits_in[256] = {
-    0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-    4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
-};
-
-static inline uint32_t s_spp_popcount_default_lut(uint32_t i)
-{
-    uint32_t res = static_cast<uint32_t>(spp_bits_in[i & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 8)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 16) & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[i >> 24]);
-    return res;
-}
-
-static inline uint32_t s_spp_popcount_default_lut(uint64_t i)
-{
-    uint32_t res = static_cast<uint32_t>(spp_bits_in[i & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >>  8)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 16)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 24)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 32)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 40)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[(i >> 48)  & 0xFF]);
-    res += static_cast<uint32_t>(spp_bits_in[i >> 56]);
-    return res;
-}
-
-// faster than the lookup table (LUT)
-// ----------------------------------
-static inline uint32_t s_spp_popcount_default(uint32_t i)
-{
-    i = i - ((i >> 1) & 0x55555555);
-    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
-    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
-}
-
-// faster than the lookup table (LUT)
-// ----------------------------------
-static inline uint32_t s_spp_popcount_default(uint64_t x)
-{
-    const uint64_t m1  = uint64_t(0x5555555555555555); // binary: 0101...
-    const uint64_t m2  = uint64_t(0x3333333333333333); // binary: 00110011..
-    const uint64_t m4  = uint64_t(0x0f0f0f0f0f0f0f0f); // binary:  4 zeros,  4 ones ...
-    const uint64_t h01 = uint64_t(0x0101010101010101); // the sum of 256 to the power of 0,1,2,3...
-
-    x -= (x >> 1) & m1;             // put count of each 2 bits into those 2 bits
-    x = (x & m2) + ((x >> 2) & m2); // put count of each 4 bits into those 4 bits 
-    x = (x + (x >> 4)) & m4;        // put count of each 8 bits into those 8 bits 
-    return (x * h01)>>56;           // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24)+...
-}
-
 #if defined(SPP_POPCNT_CHECK)
 static inline bool spp_popcount_check()
 {
@@ -2242,7 +962,7 @@ static inline bool spp_popcount_check()
 
 static inline uint32_t spp_popcount(uint32_t i)
 {
-    static const bool s_ok = spp_popcount_check(); 
+    static const bool s_ok = spp_popcount_check();
     return s_ok ? SPP_POPCNT(i) : s_spp_popcount_default(i);
 }
 
@@ -2263,7 +983,7 @@ static inline uint32_t spp_popcount(uint32_t i)
 
 static inline uint32_t spp_popcount(uint64_t i)
 {
-    static const bool s_ok = spp_popcount_check(); 
+    static const bool s_ok = spp_popcount_check();
     return s_ok ? (uint32_t)SPP_POPCNT64(i) : s_spp_popcount_default(i);
 }
 
@@ -2304,11 +1024,11 @@ static inline uint32_t spp_popcount(uint64_t i)
 // ---------------------------------------------------------------------------
 
 template <class T, class Alloc>
-class sparsegroup 
+class sparsegroup
 {
 public:
     // Basic types
-    typedef typename spp::cvt<T>::type                     value_type;
+    typedef T                                              value_type;
     typedef Alloc                                          allocator_type;
     typedef value_type&                                    reference;
     typedef const value_type&                              const_reference;
@@ -2341,18 +1061,11 @@ public:
     const_reverse_ne_iterator ne_crend() const   { return const_reverse_ne_iterator(ne_cbegin());  }
 
 private:
-    // T can be std::pair<K, V>, but we need to return std::pair<const K, V>
-    // ---------------------------------------------------------------------
-    typedef T                                              mutable_value_type;
-    typedef mutable_value_type&                            mutable_reference;
-    typedef const mutable_value_type&                      const_mutable_reference;
-    typedef mutable_value_type*                            mutable_pointer;
-    typedef const mutable_value_type*                      const_mutable_pointer;
-
-#define spp_mutable_ref(x) (*(reinterpret_cast<mutable_pointer>(&(x))))
-#define spp_const_mutable_ref(x) (*(reinterpret_cast<const_mutable_pointer>(&(x))))
-
-    typedef typename Alloc::template rebind<T>::other      value_alloc_type;
+    // T can be std::pair<const K, V>, but sometime we need to cast to a mutable type
+    // ------------------------------------------------------------------------------
+    typedef typename spp_::cvt<T>::type                    mutable_value_type;
+    typedef mutable_value_type *                           mutable_pointer;
+    typedef const mutable_value_type *                     const_mutable_pointer;
 
     bool _bmtest(size_type i) const   { return !!(_bitmap & (static_cast<group_bm_type>(1) << i)); }
     void _bmset(size_type i)          { _bitmap |= static_cast<group_bm_type>(1) << i; }
@@ -2362,11 +1075,11 @@ private:
     void _bme_set(size_type i)        { _bm_erased |= static_cast<group_bm_type>(1) << i; }
     void _bme_clear(size_type i)      { _bm_erased &= ~(static_cast<group_bm_type>(1) << i); }
 
-    bool _bmtest_strict(size_type i) const   
+    bool _bmtest_strict(size_type i) const
     { return !!((_bitmap | _bm_erased) & (static_cast<group_bm_type>(1) << i)); }
 
-    
-    static uint32_t _sizing(uint32_t n) 
+
+    static uint32_t _sizing(uint32_t n)
     {
 #if !defined(SPP_ALLOC_SZ) || (SPP_ALLOC_SZ == 0)
         // aggressive allocation first, then decreasing as sparsegroups fill up
@@ -2409,15 +1122,15 @@ private:
 #endif
     }
 
-    mutable_pointer _allocate_group(Alloc &alloc, uint32_t n /* , bool tight = false */) 
+    pointer _allocate_group(allocator_type &alloc, uint32_t n /* , bool tight = false */)
     {
         // ignore tight since we don't store num_alloc
         // num_alloc = (uint8_t)(tight ? n : _sizing(n));
 
         uint32_t num_alloc = (uint8_t)_sizing(n);
         _set_num_alloc(num_alloc);
-        mutable_pointer retval = alloc.allocate(static_cast<size_type>(num_alloc));
-        if (retval == NULL) 
+        pointer retval = alloc.allocate(static_cast<size_type>(num_alloc));
+        if (retval == NULL)
         {
             // the allocator is supposed to throw an exception if the allocation fails.
             fprintf(stderr, "sparsehash FATAL ERROR: failed to allocate %d groups\n", num_alloc);
@@ -2426,15 +1139,15 @@ private:
         return retval;
     }
 
-    void _free_group(Alloc &alloc, uint32_t num_alloc)
+    void _free_group(allocator_type &alloc, uint32_t num_alloc)
     {
-        if (_group)  
+        if (_group)
         {
             uint32_t num_buckets = _num_items();
             if (num_buckets)
             {
-                mutable_pointer end_it = _group + num_buckets;
-                for (mutable_pointer p = _group; p != end_it; ++p)
+                mutable_pointer end_it = (mutable_pointer)(_group + num_buckets);
+                for (mutable_pointer p = (mutable_pointer)_group; p != end_it; ++p)
                     p->~mutable_value_type();
             }
             alloc.deallocate(_group, (typename allocator_type::size_type)num_alloc);
@@ -2446,21 +1159,21 @@ private:
     sparsegroup &operator=(const sparsegroup& x);
 
     static size_type _pos_to_offset(group_bm_type bm, size_type pos)
-    {  
+    {
         //return (size_type)((uint32_t)~((int32_t(-1) + pos) >> 31) & spp_popcount(bm << (SPP_GROUP_SIZE - pos)));
         //return (size_type)(pos ? spp_popcount(bm << (SPP_GROUP_SIZE - pos)) : 0);
         return static_cast<size_type>(spp_popcount(bm & ((static_cast<group_bm_type>(1) << pos) - 1)));
     }
 
 public:
-    
+
     // get_iter() in sparsetable needs it
     size_type pos_to_offset(size_type pos) const
-    {  
-        return _pos_to_offset(_bitmap, pos); 
+    {
+        return _pos_to_offset(_bitmap, pos);
     }
 
-#ifdef _MSC_VER 
+#ifdef _MSC_VER
 #pragma warning(push)
 #pragma warning(disable : 4146)
 #endif
@@ -2471,9 +1184,9 @@ public:
     // of an ne_iterator in the table.  Bit-twiddling from
     // http://hackersdelight.org/basics.pdf
     // -----------------------------------------------------------------
-    static size_type offset_to_pos(group_bm_type bm, size_type offset) 
+    static size_type offset_to_pos(group_bm_type bm, size_type offset)
     {
-        for (; offset > 0; offset--) 
+        for (; offset > 0; offset--)
             bm &= (bm-1);  // remove right-most set bit
 
         // Clear all bits to the left of the rightmost bit (the &),
@@ -2484,11 +1197,11 @@ public:
         return  static_cast<size_type>(spp_popcount(bm));
     }
 
-#ifdef _MSC_VER 
+#ifdef _MSC_VER
 #pragma warning(pop)
 #endif
 
-    size_type offset_to_pos(size_type offset) const 
+    size_type offset_to_pos(size_type offset) const
     {
         return offset_to_pos(_bitmap, offset);
     }
@@ -2499,22 +1212,22 @@ public:
         _group(0), _bitmap(0), _bm_erased(0)
     {
         _set_num_items(0);
-        _set_num_alloc(0);        
+        _set_num_alloc(0);
     }
 
-    sparsegroup(const sparsegroup& x) : 
+    sparsegroup(const sparsegroup& x) :
         _group(0), _bitmap(x._bitmap), _bm_erased(x._bm_erased)
     {
         _set_num_items(0);
-        _set_num_alloc(0);  
+        _set_num_alloc(0);
          assert(_group == 0); if (_group) exit(1);
     }
 
-    sparsegroup(const sparsegroup& x, allocator_type& a) : 
+    sparsegroup(const sparsegroup& x, allocator_type& a) :
         _group(0), _bitmap(x._bitmap), _bm_erased(x._bm_erased)
     {
         _set_num_items(0);
-        _set_num_alloc(0);  
+        _set_num_alloc(0);
 
         uint32_t num_items = x._num_items();
         if (num_items)
@@ -2530,7 +1243,7 @@ public:
     void destruct(allocator_type& a) { _free_group(a, _num_alloc()); }
 
     // Many STL algorithms use swap instead of copy constructors
-    void swap(sparsegroup& x) 
+    void swap(sparsegroup& x)
     {
         using std::swap;
 
@@ -2539,12 +1252,12 @@ public:
         swap(_bm_erased, x._bm_erased);
 #ifdef SPP_STORE_NUM_ITEMS
         swap(_num_buckets,   x._num_buckets);
-        swap(_num_allocated, x._num_allocated);        
+        swap(_num_allocated, x._num_allocated);
 #endif
     }
 
     // It's always nice to be able to clear a table without deallocating it
-    void clear(Alloc &alloc, bool erased) 
+    void clear(allocator_type &alloc, bool erased)
     {
         _free_group(alloc, _num_alloc());
         _bitmap = 0;
@@ -2573,57 +1286,85 @@ public:
         return (reference)_group[pos_to_offset(i)];
     }
 
-    typedef std::pair<mutable_pointer, bool> SetResult;
+    typedef std::pair<pointer, bool> SetResult;
 
 private:
-    typedef spp_::integral_constant<bool,
-                                    (spp_::is_relocatable<value_type>::value &&
-                                     spp_::is_same<allocator_type,
-                                                   spp_::libc_allocator_with_realloc<mutable_value_type> >::value)>
-            realloc_and_memmove_ok; 
+    //typedef spp_::integral_constant<bool, spp_::is_relocatable<value_type>::value> check_relocatable;
+    typedef spp_::true_type  realloc_ok_type;
+    typedef spp_::false_type realloc_not_ok_type;
+
+    //typedef spp_::zero_type  libc_reloc_type;
+    //typedef spp_::one_type   spp_reloc_type;
+    //typedef spp_::two_type   spp_not_reloc_type;
+    //typedef spp_::three_type generic_alloc_type;
+
+#if 1
+    typedef typename if_<((spp_::is_same<allocator_type, libc_allocator<value_type> >::value ||
+                           spp_::is_same<allocator_type,  spp_allocator<value_type> >::value) &&
+                          spp_::is_relocatable<value_type>::value), realloc_ok_type, realloc_not_ok_type>::type
+             check_alloc_type;
+#else
+    typedef typename if_<spp_::is_same<allocator_type, spp_allocator<value_type> >::value,
+                         typename if_<spp_::is_relocatable<value_type>::value, spp_reloc_type, spp_not_reloc_type>::type,
+                         typename if_<(spp_::is_same<allocator_type, libc_allocator<value_type> >::value &&
+                                       spp_::is_relocatable<value_type>::value), libc_reloc_type, generic_alloc_type>::type >::type 
+        check_alloc_type;
+#endif
+
+
+    //typedef if_<spp_::is_same<allocator_type, libc_allocator<value_type> >::value,
+    //            libc_alloc_type,
+    //            if_<spp_::is_same<allocator_type, spp_allocator<value_type> >::value,
+    //                spp_alloc_type, user_alloc_type> > check_alloc_type;
+
+    //typedef spp_::integral_constant<bool,
+    //            (spp_::is_relocatable<value_type>::value &&
+    //             (spp_::is_same<allocator_type, spp_allocator<value_type> >::value ||
+    //              spp_::is_same<allocator_type, libc_allocator<value_type> >::value)) >
+    //        realloc_and_memmove_ok;
 
     // ------------------------- memory at *p is uninitialized => need to construct
     void _init_val(mutable_value_type *p, reference val)
     {
 #if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
-        ::new (p) mutable_value_type(std::move(val));
+        ::new (p) value_type(std::move(val));
 #else
-        ::new (p) mutable_value_type(val);
+        ::new (p) value_type(val);
 #endif
     }
 
     // ------------------------- memory at *p is uninitialized => need to construct
     void _init_val(mutable_value_type *p, const_reference val)
     {
-        ::new (p) mutable_value_type(val);
+        ::new (p) value_type(val);
     }
 
     // ------------------------------------------------ memory at *p is initialized
-    void _set_val(mutable_value_type *p, reference val)
+    void _set_val(value_type *p, reference val)
     {
 #if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
-        *p = std::move(val);
+        *(mutable_pointer)p = std::move(val);
 #else
         using std::swap;
-        swap(*p, spp_mutable_ref(val)); 
+        swap(*(mutable_pointer)p, *(mutable_pointer)&val);
 #endif
     }
 
     // ------------------------------------------------ memory at *p is initialized
-    void _set_val(mutable_value_type *p, const_reference val)
+    void _set_val(value_type *p, const_reference val)
     {
-        *p = spp_const_mutable_ref(val);
+        *(mutable_pointer)p = *(const_mutable_pointer)&val;
     }
 
-    // Our default allocator - try to merge memory buffers
-    // right now it uses Google's traits, but we should use something like folly::IsRelocatable
-    // return true if the slot was constructed (i.e. contains a valid mutable_value_type
+    // Create space at _group[offset], assuming value_type is relocatable, and the 
+    // allocator_type is the spp allocator.
+    // return true if the slot was constructed (i.e. contains a valid value_type
     // ---------------------------------------------------------------------------------
     template <class Val>
-    void _set_aux(Alloc &alloc, size_type offset, Val &val, spp_::true_type) 
+    void _set_aux(allocator_type &alloc, size_type offset, Val &val, realloc_ok_type)
     {
         //static int x=0;  if (++x < 10) printf("x\n"); // check we are getting here
-        
+
         uint32_t  num_items = _num_items();
         uint32_t  num_alloc = _sizing(num_items);
 
@@ -2637,15 +1378,15 @@ private:
         for (uint32_t i = num_items; i > offset; --i)
             memcpy(_group + i, _group + i-1, sizeof(*_group));
 
-        _init_val(_group + offset, val);
+        _init_val((mutable_pointer)(_group + offset), val);
     }
 
-    // Create space at _group[offset], without special assumptions about value_type
-    // and allocator_type, with a default value
-    // return true if the slot was constructed (i.e. contains a valid mutable_value_type
+    // Create space at _group[offset], assuming value_type is *not* relocatable, and the 
+    // allocator_type is the spp allocator.
+    // return true if the slot was constructed (i.e. contains a valid value_type
     // ---------------------------------------------------------------------------------
     template <class Val>
-    void _set_aux(Alloc &alloc, size_type offset, Val &val, spp_::false_type) 
+    void _set_aux(allocator_type &alloc, size_type offset, Val &val, realloc_not_ok_type)
     {
         uint32_t  num_items = _num_items();
         uint32_t  num_alloc = _sizing(num_items);
@@ -2654,33 +1395,35 @@ private:
         if (num_items < num_alloc)
         {
             // create new object at end and rotate it to position
-            _init_val(&_group[num_items], val);
-            std::rotate(_group + offset, _group + num_items, _group + num_items + 1);
+            _init_val((mutable_pointer)&_group[num_items], val);
+            std::rotate((mutable_pointer)(_group + offset),
+                        (mutable_pointer)(_group + num_items),
+                        (mutable_pointer)(_group + num_items + 1));
             return;
         }
 
         // This is valid because 0 <= offset <= num_items
-        mutable_pointer p = _allocate_group(alloc, _sizing(num_items + 1));
+        pointer p = _allocate_group(alloc, _sizing(num_items + 1));
         if (offset)
-            std::uninitialized_copy(MK_MOVE_IT(_group), 
-                                    MK_MOVE_IT(_group + offset),
-                                    p);
+            std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)_group),
+                                    MK_MOVE_IT((mutable_pointer)(_group + offset)),
+                                    (mutable_pointer)p);
         if (num_items > offset)
-            std::uninitialized_copy(MK_MOVE_IT(_group + offset),
-                                    MK_MOVE_IT(_group + num_items),
-                                    p + offset + 1);
-        _init_val(p + offset, val);
+            std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group + offset)),
+                                    MK_MOVE_IT((mutable_pointer)(_group + num_items)),
+                                    (mutable_pointer)(p + offset + 1));
+        _init_val((mutable_pointer)(p + offset), val);
         _free_group(alloc, num_alloc);
         _group = p;
     }
 
     // ----------------------------------------------------------------------------------
     template <class Val>
-    void _set(Alloc &alloc, size_type i, size_type offset, Val &val)
+    void _set(allocator_type &alloc, size_type i, size_type offset, Val &val)
     {
-        if (!_bmtest(i)) 
+        if (!_bmtest(i))
         {
-            _set_aux(alloc, offset, val, realloc_and_memmove_ok());
+            _set_aux(alloc, offset, val, check_alloc_type());
             _incr_num_items();
             _bmset(i);
         }
@@ -2693,15 +1436,15 @@ public:
     // This returns the pointer to the inserted item
     // ---------------------------------------------
     template <class Val>
-    pointer set(Alloc &alloc, size_type i, Val &val)
+    pointer set(allocator_type &alloc, size_type i, Val &val)
     {
         _bme_clear(i); // in case this was an "erased" location
 
-        size_type offset = pos_to_offset(i);  
+        size_type offset = pos_to_offset(i);
         _set(alloc, i, offset, val);            // may change _group pointer
         return (pointer)(_group + offset);
     }
-    
+
     // We let you see if a bucket is non-empty without retrieving it
     // -------------------------------------------------------------
     bool test(size_type i) const
@@ -2711,17 +1454,16 @@ public:
 
     // also tests for erased values
     // ----------------------------
-    bool test_strict(size_type i) const 
+    bool test_strict(size_type i) const
     {
         return _bmtest_strict(i);
     }
 
 private:
-    // Shrink the array, assuming value_type has trivial copy
-    // constructor and destructor, and the allocator_type is the default
-    // libc_allocator_with_alloc. 
-    // -----------------------------------------------------------------------
-    void _group_erase_aux(Alloc &alloc, size_type offset, spp_::true_type) 
+    // Shrink the array, assuming value_type is relocatable, and the 
+    // allocator_type is the libc allocator (supporting reallocate).
+    // -------------------------------------------------------------
+    void _group_erase_aux(allocator_type &alloc, size_type offset, realloc_ok_type)
     {
         // static int x=0;  if (++x < 10) printf("Y\n"); // check we are getting here
         uint32_t  num_items = _num_items();
@@ -2735,11 +1477,11 @@ private:
             return;
         }
 
-        _group[offset].~mutable_value_type();
+        _group[offset].~value_type();
 
         for (size_type i = offset; i < num_items - 1; ++i)
             memcpy(_group + i, _group + i + 1, sizeof(*_group));
-        
+
         if (_sizing(num_items - 1) != num_alloc)
         {
             num_alloc = _sizing(num_items - 1);
@@ -2752,25 +1494,25 @@ private:
     // Shrink the array, without any special assumptions about value_type and
     // allocator_type.
     // --------------------------------------------------------------------------
-    void _group_erase_aux(Alloc &alloc, size_type offset, spp_::false_type) 
+    void _group_erase_aux(allocator_type &alloc, size_type offset, realloc_not_ok_type)
     {
         uint32_t  num_items = _num_items();
         uint32_t  num_alloc   = _sizing(num_items);
 
         if (_sizing(num_items - 1) != num_alloc)
         {
-            mutable_pointer p = 0;
+            pointer p = 0;
             if (num_items > 1)
             {
                 p = _allocate_group(alloc, num_items - 1);
                 if (offset)
-                    std::uninitialized_copy(MK_MOVE_IT(_group), 
-                                            MK_MOVE_IT(_group + offset), 
-                                            p);
+                    std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group)),
+                                            MK_MOVE_IT((mutable_pointer)(_group + offset)),
+                                            (mutable_pointer)(p));
                 if (static_cast<uint32_t>(offset + 1) < num_items)
-                    std::uninitialized_copy(MK_MOVE_IT(_group + offset + 1), 
-                                            MK_MOVE_IT(_group + num_items),
-                                            p + offset);
+                    std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group + offset + 1)),
+                                            MK_MOVE_IT((mutable_pointer)(_group + num_items)),
+                                            (mutable_pointer)(p + offset));
             }
             else
             {
@@ -2782,19 +1524,21 @@ private:
         }
         else
         {
-            std::rotate(_group + offset, _group + offset + 1, _group + num_items);
-            _group[num_items - 1].~mutable_value_type();
+            std::rotate((mutable_pointer)(_group + offset),
+                        (mutable_pointer)(_group + offset + 1),
+                        (mutable_pointer)(_group + num_items));
+            ((mutable_pointer)(_group + num_items - 1))->~mutable_value_type();
         }
     }
 
-    void _group_erase(Alloc &alloc, size_type offset)
+    void _group_erase(allocator_type &alloc, size_type offset)
     {
-        _group_erase_aux(alloc, offset, realloc_and_memmove_ok());
+        _group_erase_aux(alloc, offset, check_alloc_type());
     }
 
 public:
     template <class twod_iter>
-    bool erase_ne(Alloc &alloc, twod_iter &it)
+    bool erase_ne(allocator_type &alloc, twod_iter &it)
     {
         assert(_group && it.col_current != ne_end());
         size_type offset = (size_type)(it.col_current - ne_begin());
@@ -2812,7 +1556,7 @@ public:
             _bmclear(pos);
 
             // in case _group_erase reallocated the buffer
-            it.col_current = reinterpret_cast<pointer>(_group) + offset; 
+            it.col_current = reinterpret_cast<pointer>(_group) + offset;
         }
         _bme_set(pos);  // remember that this position has been erased
         it.advance_past_end();
@@ -2825,16 +1569,16 @@ public:
     // TODO(austern): Make this exception safe: handle exceptions from
     // value_type's copy constructor.
     // ---------------------------------------------------------------
-    void erase(Alloc &alloc, size_type i)
+    void erase(allocator_type &alloc, size_type i)
     {
         if (_bmtest(i))
-        { 
+        {
             // trivial to erase empty bucket
             if (_num_items() == 1)
                 clear(alloc, false);
-            else 
+            else
             {
-                _group_erase(alloc, pos_to_offset(i)); 
+                _group_erase(alloc, pos_to_offset(i));
                 _decr_num_items();
                 _bmclear(i);
             }
@@ -2847,9 +1591,9 @@ public:
     // the actual array contents (which we don't know how to store),
     // just the bitmap and size.  Meant to be used with table I/O.
     // --------------------------------------------------------------
-    template <typename OUTPUT> bool write_metadata(OUTPUT *fp) const 
+    template <typename OUTPUT> bool write_metadata(OUTPUT *fp) const
     {
-        // warning: we write 4 or 8 bytes for the bitmap, instead of 6 in the 
+        // warning: we write 4 or 8 bytes for the bitmap, instead of 6 in the
         //          original google sparsehash
         // ------------------------------------------------------------------
         if (!sparsehash_internal::write_data(fp, &_bitmap, sizeof(_bitmap)))
@@ -2859,7 +1603,7 @@ public:
     }
 
     // Reading destroys the old group contents!  Returns true if all was ok.
-    template <typename INPUT> bool read_metadata(Alloc &alloc, INPUT *fp) 
+    template <typename INPUT> bool read_metadata(allocator_type &alloc, INPUT *fp)
     {
         clear(alloc, true);
 
@@ -2877,7 +1621,7 @@ public:
     // Again, only meaningful if value_type is a POD.
     template <typename INPUT> bool read_nopointer_data(INPUT *fp)
     {
-        for (ne_iterator it = ne_begin(); it != ne_end(); ++it) 
+        for (ne_iterator it = ne_begin(); it != ne_end(); ++it)
             if (!sparsehash_internal::read_data(fp, &(*it), sizeof(*it)))
                 return false;
         return true;
@@ -2889,7 +1633,7 @@ public:
     // ------------------------------------------------------------
     template <typename OUTPUT> bool write_nopointer_data(OUTPUT *fp) const
     {
-        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) 
+        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it)
             if (!sparsehash_internal::write_data(fp, &(*it), sizeof(*it)))
                 return false;
         return true;
@@ -2905,14 +1649,14 @@ public:
     bool operator==(const sparsegroup& x) const
     {
         return (_bitmap == x._bitmap &&
-                _bm_erased == x._bm_erased && 
-                std::equal(_group, _group + _num_items(), x._group));  
+                _bm_erased == x._bm_erased &&
+                std::equal(_group, _group + _num_items(), x._group));
     }
 
-    bool operator<(const sparsegroup& x) const 
+    bool operator<(const sparsegroup& x) const
     {
         // also from <algorithm>
-        return std::lexicographical_compare(_group, _group + _num_items(), 
+        return std::lexicographical_compare(_group, _group + _num_items(),
                                             x._group, x._group + x._num_items());
     }
 
@@ -2921,13 +1665,13 @@ public:
     bool operator> (const sparsegroup& x) const { return x < *this; }
     bool operator>=(const sparsegroup& x) const { return !(*this < x); }
 
-    void mark()            { _group = (mutable_value_type *)static_cast<uintptr_t>(-1); }
-    bool is_marked() const { return _group == (mutable_value_type *)static_cast<uintptr_t>(-1); }
+    void mark()            { _group = (value_type *)static_cast<uintptr_t>(-1); }
+    bool is_marked() const { return _group == (value_type *)static_cast<uintptr_t>(-1); }
 
 private:
     // ---------------------------------------------------------------------------
     template <class A>
-    class alloc_impl : public A 
+    class alloc_impl : public A
     {
     public:
         typedef typename A::pointer pointer;
@@ -2937,35 +1681,62 @@ private:
         explicit alloc_impl(const A& a) : A(a) { }
 
         // realloc_or_die should only be used when using the default
-        // allocator (libc_allocator_with_realloc).
-        pointer realloc_or_die(pointer /*ptr*/, size_type /*n*/) 
+        // allocator (spp::spp_allocator).
+        pointer realloc_or_die(pointer /*ptr*/, size_type /*n*/)
         {
             fprintf(stderr, "realloc_or_die is only supported for "
-                    "libc_allocator_with_realloc\n");
+                    "spp::spp_allocator\n");
             exit(1);
             return NULL;
         }
     };
 
     // A template specialization of alloc_impl for
-    // libc_allocator_with_realloc that can handle realloc_or_die.
+    // spp::libc_allocator that can handle realloc_or_die.
+    // -----------------------------------------------------------
+    template <class A>
+    class alloc_impl<spp_::libc_allocator<A> > : public spp_::libc_allocator<A>
+    {
+    public:
+        typedef typename spp_::libc_allocator<A>::pointer pointer;
+        typedef typename spp_::libc_allocator<A>::size_type size_type;
+
+        explicit alloc_impl(const spp_::libc_allocator<A>& a)
+            : spp_::libc_allocator<A>(a)
+        { }
+
+        pointer realloc_or_die(pointer ptr, size_type n)
+        {
+            pointer retval = this->reallocate(ptr, n);
+            if (retval == NULL) 
+            {
+                fprintf(stderr, "sparsehash: FATAL ERROR: failed to reallocate "
+                        "%lu elements for ptr %p", static_cast<unsigned long>(n), ptr);
+                exit(1);
+            }
+            return retval;
+        }
+    };
+
+    // A template specialization of alloc_impl for
+    // spp::spp_allocator that can handle realloc_or_die.
     // -----------------------------------------------------------
     template <class A>
-    class alloc_impl<libc_allocator_with_realloc<A> >
-        : public libc_allocator_with_realloc<A>    
+    class alloc_impl<spp_::spp_allocator<A> > : public spp_::spp_allocator<A>
     {
     public:
-        typedef typename libc_allocator_with_realloc<A>::pointer pointer;
-        typedef typename libc_allocator_with_realloc<A>::size_type size_type;
+        typedef typename spp_::spp_allocator<A>::pointer pointer;
+        typedef typename spp_::spp_allocator<A>::size_type size_type;
 
-        explicit alloc_impl(const libc_allocator_with_realloc<A>& a)
-            : libc_allocator_with_realloc<A>(a) 
+        explicit alloc_impl(const spp_::spp_allocator<A>& a)
+            : spp_::spp_allocator<A>(a)
         { }
 
         pointer realloc_or_die(pointer ptr, size_type n)
         {
             pointer retval = this->reallocate(ptr, n);
-            if (retval == NULL) {
+            if (retval == NULL) 
+            {
                 fprintf(stderr, "sparsehash: FATAL ERROR: failed to reallocate "
                         "%lu elements for ptr %p", static_cast<unsigned long>(n), ptr);
                 exit(1);
@@ -2974,6 +1745,7 @@ private:
         }
     };
 
+
 #ifdef SPP_STORE_NUM_ITEMS
     uint32_t _num_items() const           { return (uint32_t)_num_buckets; }
     void     _set_num_items(uint32_t val) { _num_buckets = static_cast<size_type>(val); }
@@ -2992,7 +1764,7 @@ private:
 
     // The actual data
     // ---------------
-    mutable_value_type * _group;                             // (small) array of T's
+    value_type *         _group;                             // (small) array of T's
     group_bm_type        _bitmap;
     group_bm_type        _bm_erased;                         // ones where items have been erased
 
@@ -3003,76 +1775,61 @@ private:
 };
 
 // ---------------------------------------------------------------------------
-// We need a global swap as well
 // ---------------------------------------------------------------------------
 template <class T, class Alloc>
-inline void swap(sparsegroup<T,Alloc> &x, sparsegroup<T,Alloc> &y) 
+class sparsetable
 {
-    x.swap(y);
-}
+public:
+    typedef T                                             value_type;
+    typedef Alloc                                         allocator_type;
+    typedef sparsegroup<value_type, allocator_type>       group_type;
 
-// ---------------------------------------------------------------------------
-// ---------------------------------------------------------------------------
-template <class T, class Alloc = libc_allocator_with_realloc<T> >
-class sparsetable 
-{
 private:
-    typedef typename Alloc::template rebind<T>::other     value_alloc_type;
-
-    typedef typename Alloc::template rebind<
-        sparsegroup<T, value_alloc_type> >::other group_alloc_type;
+    typedef typename Alloc::template rebind<group_type>::other group_alloc_type;
     typedef typename group_alloc_type::size_type          group_size_type;
 
-    typedef T                                             mutable_value_type;
-    typedef mutable_value_type*                           mutable_pointer;
-    typedef const mutable_value_type*                     const_mutable_pointer;
-
 public:
     // Basic types
     // -----------
-    typedef typename spp::cvt<T>::type                    value_type;
-    typedef Alloc                                         allocator_type;
-    typedef typename value_alloc_type::size_type          size_type;
-    typedef typename value_alloc_type::difference_type    difference_type;
+    typedef typename allocator_type::size_type            size_type;
+    typedef typename allocator_type::difference_type      difference_type;
     typedef value_type&                                   reference;
     typedef const value_type&                             const_reference;
     typedef value_type*                                   pointer;
     typedef const value_type*                             const_pointer;
 
-    typedef sparsegroup<T, value_alloc_type>              group_type;
-
     typedef group_type&                                   GroupsReference;
     typedef const group_type&                             GroupsConstReference;
 
     typedef typename group_type::ne_iterator              ColIterator;
     typedef typename group_type::const_ne_iterator        ColConstIterator;
 
-    typedef table_iterator<sparsetable<T, Alloc> >        iterator;       // defined with index
-    typedef const_table_iterator<sparsetable<T, Alloc> >  const_iterator; // defined with index
+    typedef table_iterator<sparsetable<T, allocator_type> >        iterator;       // defined with index
+    typedef const_table_iterator<sparsetable<T, allocator_type> >  const_iterator; // defined with index
     typedef std::reverse_iterator<const_iterator>         const_reverse_iterator;
     typedef std::reverse_iterator<iterator>               reverse_iterator;
 
     // These are our special iterators, that go over non-empty buckets in a
     // table.  These aren't const only because you can change non-empty bcks.
     // ----------------------------------------------------------------------
-    typedef Two_d_iterator<T, 
-                           group_type *, 
+    typedef Two_d_iterator<T,
+                           group_type *,
                            ColIterator,
                            std::bidirectional_iterator_tag> ne_iterator;
 
-    typedef Two_d_iterator<const T, 
-                           const group_type *, 
+    typedef Two_d_iterator<const T,
+                           const group_type *,
                            ColConstIterator,
                            std::bidirectional_iterator_tag> const_ne_iterator;
 
     // Another special iterator: it frees memory as it iterates (used to resize).
     // Obviously, you can only iterate over it once, which is why it's an input iterator
     // ---------------------------------------------------------------------------------
-    typedef Two_d_destructive_iterator<T, 
-                                       group_type *, 
+    typedef Two_d_destructive_iterator<T,
+                                       group_type *,
                                        ColIterator,
-                                       std::input_iterator_tag, 
-                                       allocator_type>       destructive_iterator;
+                                       std::input_iterator_tag,
+                                       allocator_type>     destructive_iterator;
 
     typedef std::reverse_iterator<ne_iterator>               reverse_ne_iterator;
     typedef std::reverse_iterator<const_ne_iterator>         const_reverse_ne_iterator;
@@ -3109,35 +1866,35 @@ public:
     const_reverse_ne_iterator ne_rend() const    { return const_reverse_ne_iterator(ne_begin()); }
     const_reverse_ne_iterator ne_crend() const   { return const_reverse_ne_iterator(ne_begin()); }
 
-    destructive_iterator destructive_begin()  
-    { 
+    destructive_iterator destructive_begin()
+    {
         return destructive_iterator(_alloc, _first_group);
     }
 
-    destructive_iterator destructive_end() 
-    { 
-        return destructive_iterator(_alloc, _last_group); 
+    destructive_iterator destructive_end()
+    {
+        return destructive_iterator(_alloc, _last_group);
     }
 
     // How to deal with the proper group
-    static group_size_type num_groups(group_size_type num)
-    {   
+    static group_size_type num_groups(size_type num)
+    {
         // how many to hold num buckets
-        return num == 0 ? (group_size_type)0 : 
+        return num == 0 ? (group_size_type)0 :
             (group_size_type)(((num-1) / SPP_GROUP_SIZE) + 1);
     }
 
-    typename group_type::size_type pos_in_group(size_type i) const 
+    typename group_type::size_type pos_in_group(size_type i) const
     {
         return static_cast<typename group_type::size_type>(i & SPP_MASK_);
     }
-    
+
     size_type group_num(size_type i) const
     {
         return (size_type)(i >> SPP_SHIFT_);
     }
 
-    GroupsReference which_group(size_type i) 
+    GroupsReference which_group(size_type i)
     {
         return _first_group[group_num(i)];
     }
@@ -3156,7 +1913,7 @@ public:
             last = first + sz;
         }
     }
-    
+
     void _free_group_array(group_type *&first, group_type *&last)
     {
         if (first)
@@ -3218,12 +1975,13 @@ public:
 
 public:
     // Constructors -- default, normal (when you specify size), and copy
-    explicit sparsetable(size_type sz = 0, const Alloc &alloc = Alloc()) : 
-        _first_group(0), 
+    explicit sparsetable(size_type sz = 0, const allocator_type &alloc = allocator_type()) :
+        _first_group(0),
         _last_group(0),
         _table_size(sz),
         _num_buckets(0),
-        _alloc(alloc)  // todo - copy or move allocator according to 
+        _alloc(alloc)  
+                       // todo - copy or move allocator according to
                        // http://en.cppreference.com/w/cpp/container/unordered_map/unordered_map
     {
         _allocate_groups(num_groups(sz));
@@ -3234,7 +1992,7 @@ public:
         _free_groups();
     }
 
-    sparsetable(const sparsetable &o) 
+    sparsetable(const sparsetable &o)
     {
         _init();
         _copy(o);
@@ -3255,7 +2013,7 @@ public:
         this->swap(o);
     }
 
-    sparsetable(sparsetable&& o, const Alloc &alloc)
+    sparsetable(sparsetable&& o, const allocator_type &alloc)
     {
         _init();
         this->swap(o);
@@ -3268,10 +2026,10 @@ public:
         this->swap(o);
         return *this;
     }
-#endif    
+#endif
 
     // Many STL algorithms use swap instead of copy constructors
-    void swap(sparsetable& o) 
+    void swap(sparsetable& o)
     {
         using std::swap;
 
@@ -3286,14 +2044,14 @@ public:
     }
 
     // It's always nice to be able to clear a table without deallocating it
-    void clear() 
+    void clear()
     {
         _free_groups();
         _num_buckets = 0;
         _table_size = 0;
     }
 
-    inline allocator_type get_allocator() const 
+    inline allocator_type get_allocator() const
     {
         return _alloc;
     }
@@ -3310,7 +2068,7 @@ public:
     size_type num_nonempty() const   { return _num_buckets; }
 
     // OK, we'll let you resize one of these puppies
-    void resize(size_type new_size) 
+    void resize(size_type new_size)
     {
         group_size_type sz = num_groups(new_size);
         group_size_type old_sz = (group_size_type)(_last_group - _first_group);
@@ -3333,7 +2091,7 @@ public:
             }
             else
                 std::uninitialized_fill(first + old_sz, last, group_type());
-        
+
             _free_group_array(_first_group, _last_group);
             _first_group = first;
             _last_group  = last;
@@ -3342,7 +2100,7 @@ public:
         // used only in test program
         // todo: fix if sparsetable to be used directly
         // --------------------------------------------
-        if (new_size < _table_size) 
+        if (new_size < _table_size)
         {
             // lower num_buckets, clear last group
             if (pos_in_group(new_size) > 0)     // need to clear inside last group
@@ -3358,7 +2116,7 @@ public:
 
     // We let you see if a bucket is non-empty without retrieving it
     // -------------------------------------------------------------
-    bool test(size_type i) const 
+    bool test(size_type i) const
     {
         // assert(i < _table_size);
         return which_group(i).test(pos_in_group(i));
@@ -3366,7 +2124,7 @@ public:
 
     // also tests for erased values
     // ----------------------------
-    bool test_strict(size_type i) const 
+    bool test_strict(size_type i) const
     {
         // assert(i < _table_size);
         return which_group(i).test_strict(pos_in_group(i));
@@ -3374,8 +2132,8 @@ public:
 
     friend struct GrpPos;
 
-    struct GrpPos 
-    { 
+    struct GrpPos
+    {
         typedef typename sparsetable::ne_iterator ne_iter;
         GrpPos(const sparsetable &table, size_type i) :
             grp(table.which_group(i)), pos(table.pos_in_group(i)) {}
@@ -3394,20 +2152,20 @@ public:
             ((group_type &)grp).erase(table._alloc, pos);
             --table._num_buckets;
         }
-        
+
     private:
         GrpPos* operator=(const GrpPos&);
 
-        const group_type &grp; 
-        typename group_type::size_type pos; 
+        const group_type &grp;
+        typename group_type::size_type pos;
     };
 
-    bool test(iterator pos) const 
+    bool test(iterator pos) const
     {
         return which_group(pos.pos).test(pos_in_group(pos.pos));
     }
 
-    bool test(const_iterator pos) const 
+    bool test(const_iterator pos) const
     {
         return which_group(pos.pos).test(pos_in_group(pos.pos));
     }
@@ -3416,7 +2174,7 @@ public:
     // This is used by sparse_hashtable to get an element from the table
     // when we know it exists (because the caller has called test(i)).
     // -----------------------------------------------------------------
-    reference unsafe_get(size_type i) const 
+    reference unsafe_get(size_type i) const
     {
         assert(i < _table_size);
         // assert(test(i));
@@ -3424,13 +2182,13 @@ public:
     }
 
     // Needed for hashtables, gets as a ne_iterator.  Crashes for empty bcks
-    const_ne_iterator get_iter(size_type i) const 
+    const_ne_iterator get_iter(size_type i) const
     {
         //assert(test(i));    // how can a ne_iterator point to an empty bucket?
 
         size_type grp_idx = group_num(i);
 
-        return const_ne_iterator(_first_group + grp_idx, 
+        return const_ne_iterator(_first_group + grp_idx,
                                  (_first_group[grp_idx].ne_begin() +
                                   _first_group[grp_idx].pos_to_offset(pos_in_group(i))));
     }
@@ -3441,18 +2199,18 @@ public:
     }
 
     // For nonempty we can return a non-const version
-    ne_iterator get_iter(size_type i) 
+    ne_iterator get_iter(size_type i)
     {
         //assert(test(i));    // how can a nonempty_iterator point to an empty bucket?
-        
+
         size_type grp_idx = group_num(i);
 
-        return ne_iterator(_first_group + grp_idx,  
+        return ne_iterator(_first_group + grp_idx,
                            (_first_group[grp_idx].ne_begin() +
                             _first_group[grp_idx].pos_to_offset(pos_in_group(i))));
     }
 
-    ne_iterator get_iter(size_type i, ColIterator col_it) 
+    ne_iterator get_iter(size_type i, ColIterator col_it)
     {
         return ne_iterator(_first_group + group_num(i), col_it);
     }
@@ -3469,7 +2227,7 @@ public:
     // Val can be reference or const_reference
     // ---------------------------------------
     template <class Val>
-    reference set(size_type i, Val &val) 
+    reference set(size_type i, Val &val)
     {
         assert(i < _table_size);
         group_type &group = which_group(i);
@@ -3480,26 +2238,26 @@ public:
     }
 
     // used in _move_from (where we can move the old value instead of copying it
-    void move(size_type i, reference val) 
+    void move(size_type i, reference val)
     {
         assert(i < _table_size);
         which_group(i).set(_alloc, pos_in_group(i), val);
         ++_num_buckets;
     }
 
-    // This takes the specified elements out of the table. 
+    // This takes the specified elements out of the table.
     // --------------------------------------------------
-    void erase(size_type i) 
+    void erase(size_type i)
     {
         assert(i < _table_size);
-        
+
         GroupsReference grp(which_group(i));
         typename group_type::size_type old_numbuckets = grp.num_nonempty();
         grp.erase(_alloc, pos_in_group(i));
         _num_buckets += grp.num_nonempty() - old_numbuckets;
     }
 
-    void erase(iterator pos) 
+    void erase(iterator pos)
     {
         erase(pos.pos);
     }
@@ -3548,10 +2306,11 @@ private:
     template <typename OUTPUT, typename IntType>
     static bool write_32_or_64(OUTPUT* fp, IntType value)
     {
-        if (value < 0xFFFFFFFFULL) {        // fits in 4 bytes
+        if (value < 0xFFFFFFFFULL)        // fits in 4 bytes
+        {
             if (!sparsehash_internal::write_bigendian_number(fp, value, 4))
                 return false;
-        } 
+        }
         else
         {
             if (!sparsehash_internal::write_bigendian_number(fp, 0xFFFFFFFFUL, 4))
@@ -3563,16 +2322,17 @@ private:
     }
 
     template <typename INPUT, typename IntType>
-    static bool read_32_or_64(INPUT* fp, IntType *value) 
-    {   // reads into value
+    static bool read_32_or_64(INPUT* fp, IntType *value)
+    {
+        // reads into value
         MagicNumberType first4 = 0;   // a convenient 32-bit unsigned type
         if (!sparsehash_internal::read_bigendian_number(fp, &first4, 4))
             return false;
 
-        if (first4 < 0xFFFFFFFFULL) 
+        if (first4 < 0xFFFFFFFFULL)
         {
             *value = first4;
-        } 
+        }
         else
         {
             if (!sparsehash_internal::read_bigendian_number(fp, value, 8))
@@ -3585,26 +2345,26 @@ public:
     // read/write_metadata() and read_write/nopointer_data() are DEPRECATED.
     // Use serialize() and unserialize(), below, for new code.
 
-    template <typename OUTPUT> 
-    bool write_metadata(OUTPUT *fp) const 
+    template <typename OUTPUT>
+    bool write_metadata(OUTPUT *fp) const
     {
         if (!write_32_or_64(fp, MAGIC_NUMBER))  return false;
         if (!write_32_or_64(fp, _table_size))  return false;
         if (!write_32_or_64(fp, _num_buckets))  return false;
 
         for (const group_type *group = _first_group; group != _last_group; ++group)
-            if (group->write_metadata(fp) == false)  
+            if (group->write_metadata(fp) == false)
                 return false;
         return true;
     }
 
     // Reading destroys the old table contents!  Returns true if read ok.
-    template <typename INPUT> 
+    template <typename INPUT>
     bool read_metadata(INPUT *fp)
     {
         size_type magic_read = 0;
         if (!read_32_or_64(fp, &magic_read))  return false;
-        if (magic_read != MAGIC_NUMBER) 
+        if (magic_read != MAGIC_NUMBER)
         {
             clear();                        // just to be consistent
             return false;
@@ -3615,7 +2375,7 @@ public:
 
         resize(_table_size);                    // so the vector's sized ok
         for (group_type *group = _first_group; group != _last_group; ++group)
-            if (group->read_metadata(_alloc, fp) == false)  
+            if (group->read_metadata(_alloc, fp) == false)
                 return false;
         return true;
     }
@@ -3624,18 +2384,18 @@ public:
     // If your keys and values are simple enough, we can write them
     // to disk for you.  "simple enough" means no pointers.
     // However, we don't try to normalize endianness
-    bool write_nopointer_data(FILE *fp) const 
+    bool write_nopointer_data(FILE *fp) const
     {
-        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) 
-            if (!fwrite(&*it, sizeof(*it), 1, fp))  
+        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it)
+            if (!fwrite(&*it, sizeof(*it), 1, fp))
                 return false;
         return true;
     }
 
     // When reading, we have to override the potential const-ness of *it
-    bool read_nopointer_data(FILE *fp) 
+    bool read_nopointer_data(FILE *fp)
     {
-        for (ne_iterator it = ne_begin(); it != ne_end(); ++it) 
+        for (ne_iterator it = ne_begin(); it != ne_end(); ++it)
             if (!fread(reinterpret_cast<void*>(&(*it)), sizeof(*it), 1, fp))
                 return false;
         return true;
@@ -3651,25 +2411,25 @@ public:
 
     // ValueSerializer: a functor.  operator()(OUTPUT*, const value_type&)
     template <typename ValueSerializer, typename OUTPUT>
-    bool serialize(ValueSerializer serializer, OUTPUT *fp) 
+    bool serialize(ValueSerializer serializer, OUTPUT *fp)
     {
         if (!write_metadata(fp))
             return false;
-        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) 
-            if (!serializer(fp, *it))  
+        for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it)
+            if (!serializer(fp, *it))
                 return false;
         return true;
     }
 
     // ValueSerializer: a functor.  operator()(INPUT*, value_type*)
     template <typename ValueSerializer, typename INPUT>
-    bool unserialize(ValueSerializer serializer, INPUT *fp) 
+    bool unserialize(ValueSerializer serializer, INPUT *fp)
     {
         clear();
         if (!read_metadata(fp))
             return false;
-        for (ne_iterator it = ne_begin(); it != ne_end(); ++it) 
-            if (!serializer(fp, &*it))  
+        for (ne_iterator it = ne_begin(); it != ne_end(); ++it)
+            if (!serializer(fp, &*it))
                 return false;
         return true;
     }
@@ -3684,36 +2444,27 @@ public:
                 _first_group == x._first_group);
     }
 
-    bool operator<(const sparsetable& x) const 
+    bool operator<(const sparsetable& x) const
     {
         return std::lexicographical_compare(begin(), end(), x.begin(), x.end());
     }
     bool operator!=(const sparsetable& x) const { return !(*this == x); }
     bool operator<=(const sparsetable& x) const { return !(x < *this); }
-    bool operator>(const sparsetable& x) const { return x < *this; }
+    bool operator>(const sparsetable& x)  const { return x < *this; }
     bool operator>=(const sparsetable& x) const { return !(*this < x); }
 
 
 private:
     // The actual data
     // ---------------
-    group_type *     _first_group;        
+    group_type *     _first_group;
     group_type *     _last_group;
     size_type        _table_size;          // how many buckets they want
     size_type        _num_buckets;         // number of non-empty buckets
     group_alloc_type _group_alloc;
-    value_alloc_type _alloc;
+    allocator_type   _alloc;
 };
 
-// We need a global swap as well
-// ---------------------------------------------------------------------------
-template <class T, class Alloc>
-inline void swap(sparsetable<T,Alloc> &x, sparsetable<T,Alloc> &y) 
-{
-    x.swap(y);
-}
-
-
 //  ----------------------------------------------------------------------
 //                  S P A R S E _ H A S H T A B L E
 //  ----------------------------------------------------------------------
@@ -3745,28 +2496,24 @@ inline void swap(sparsetable<T,Alloc> &x, sparsetable<T,Alloc> &y)
 // -------------------------------------------------------------------
 template <class Value, class Key, class HashFcn,
           class ExtractKey, class SetKey, class EqualKey, class Alloc>
-class sparse_hashtable 
+class sparse_hashtable
 {
-private:
-    typedef Value                                      mutable_value_type;
-    typedef typename Alloc::template rebind<Value>::other value_alloc_type;
-
 public:
     typedef Key                                        key_type;
-    typedef typename spp::cvt<Value>::type             value_type;
+    typedef Value                                      value_type;
     typedef HashFcn                                    hasher; // user provided or spp_hash<Key>
     typedef EqualKey                                   key_equal;
     typedef Alloc                                      allocator_type;
 
-    typedef typename value_alloc_type::size_type       size_type;
-    typedef typename value_alloc_type::difference_type difference_type;
+    typedef typename allocator_type::size_type         size_type;
+    typedef typename allocator_type::difference_type   difference_type;
     typedef value_type&                                reference;
     typedef const value_type&                          const_reference;
     typedef value_type*                                pointer;
     typedef const value_type*                          const_pointer;
-    
+
     // Table is the main storage class.
-    typedef sparsetable<mutable_value_type, value_alloc_type> Table;
+    typedef sparsetable<value_type, allocator_type>   Table;
     typedef typename Table::ne_iterator               ne_it;
     typedef typename Table::const_ne_iterator         cne_it;
     typedef typename Table::destructive_iterator      dest_it;
@@ -3819,12 +2566,12 @@ public:
     // bucket n to be the n-th element of the sparsetable, if it's occupied,
     // or some empty element, otherwise.
     // ---------------------------------------------------------------------
-    local_iterator begin(size_type i) 
+    local_iterator begin(size_type i)
     {
         return _mk_iterator(table.test(i) ? table.get_iter(i) : table.ne_end());
     }
 
-    local_iterator end(size_type i) 
+    local_iterator end(size_type i)
     {
         local_iterator it = begin(i);
         if (table.test(i))
@@ -3832,12 +2579,12 @@ public:
         return _mk_iterator(it);
     }
 
-    const_local_iterator begin(size_type i) const 
+    const_local_iterator begin(size_type i) const
     {
         return _mk_const_iterator(table.test(i) ? table.get_iter(i) : table.ne_cend());
     }
 
-    const_local_iterator end(size_type i) const 
+    const_local_iterator end(size_type i) const
     {
         const_local_iterator it = begin(i);
         if (table.test(i))
@@ -3871,11 +2618,11 @@ private:
     // -----------------------------------------------------------------------
     enum MoveDontCopyT {MoveDontCopy, MoveDontGrow};
 
-    void _squash_deleted() 
+    void _squash_deleted()
     {
         // gets rid of any deleted entries we have
         // ---------------------------------------
-        if (num_deleted) 
+        if (num_deleted)
         {
             // get rid of deleted before writing
             sparse_hashtable tmp(MoveDontGrow, *this);
@@ -3898,7 +2645,7 @@ public:
     size_type max_bucket_count() const  { return max_size(); }
     // These are tr1 methods.  Their idea of 'bucket' doesn't map well to
     // what we do.  We just say every bucket has 0 or 1 items in it.
-    size_type bucket_size(size_type i) const 
+    size_type bucket_size(size_type i) const
     {
         return (size_type)(begin(i) == end(i) ? 0 : 1);
     }
@@ -3912,7 +2659,7 @@ private:
     // TODO(csilvers): take a delta so we can take into account inserts
     // done after shrinking.  Maybe make part of the Settings class?
     // --------------------------------------------------------------------
-    bool _maybe_shrink() 
+    bool _maybe_shrink()
     {
         assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two
         assert(bucket_count() >= HT_MIN_BUCKETS);
@@ -3927,12 +2674,12 @@ private:
         const size_type num_remain = table.num_nonempty();
         const size_type shrink_threshold = settings.shrink_threshold();
         if (shrink_threshold > 0 && num_remain < shrink_threshold &&
-            bucket_count() > HT_DEFAULT_STARTING_BUCKETS) 
+            bucket_count() > HT_DEFAULT_STARTING_BUCKETS)
         {
             const float shrink_factor = settings.shrink_factor();
             size_type sz = (size_type)(bucket_count() / 2);    // find how much we should shrink
             while (sz > HT_DEFAULT_STARTING_BUCKETS &&
-                   num_remain < static_cast<size_type>(sz * shrink_factor)) 
+                   num_remain < static_cast<size_type>(sz * shrink_factor))
             {
                 sz /= 2;                            // stay a power of 2
             }
@@ -3951,7 +2698,7 @@ private:
     bool _resize_delta(size_type delta)
     {
         bool did_resize = false;
-        if (settings.consider_shrink()) 
+        if (settings.consider_shrink())
         {
             // see if lots of deletes happened
             if (_maybe_shrink())
@@ -3972,9 +2719,9 @@ private:
         // Sometimes, we need to resize just to get rid of all the
         // "deleted" buckets that are clogging up the hashtable.  So when
         // deciding whether to resize, count the deleted buckets (which
-        // are currently taking up room).  
+        // are currently taking up room).
         // -------------------------------------------------------------
-        const size_type needed_size = 
+        const size_type needed_size =
                   settings.min_buckets((size_type)(num_occupied + delta), (size_type)0);
 
         if (needed_size <= bucket_count())      // we have enough buckets
@@ -3983,7 +2730,7 @@ private:
         size_type resize_to = settings.min_buckets((size_type)(num_occupied + delta), bucket_count());
 
         if (resize_to < needed_size &&    // may double resize_to
-            resize_to < (std::numeric_limits<size_type>::max)() / 2) 
+            resize_to < (std::numeric_limits<size_type>::max)() / 2)
         {
             // This situation means that we have enough deleted elements,
             // that once we purge them, we won't actually have needed to
@@ -3994,7 +2741,7 @@ private:
             // deleted elements).
             const size_type target =
                 static_cast<size_type>(settings.shrink_size((size_type)(resize_to*2)));
-            if (table.num_nonempty() + delta >= target) 
+            if (table.num_nonempty() + delta >= target)
             {
                 // Good, we won't be below the shrink threshhold even if we double.
                 resize_to *= 2;
@@ -4015,7 +2762,7 @@ private:
         // If we need to change the size of our table, do it now
         const size_type resize_to = settings.min_buckets(ht.size(), min_buckets_wanted);
 
-        if (resize_to > bucket_count()) 
+        if (resize_to > bucket_count())
         {
             // we don't have enough buckets
             table.resize(resize_to);               // sets the number of buckets
@@ -4026,14 +2773,14 @@ private:
         // We could use insert() here, but since we know there are
         // no duplicates, we can be more efficient
         assert((bucket_count() & (bucket_count()-1)) == 0);      // a power of two
-        for (const_iterator it = ht.begin(); it != ht.end(); ++it) 
+        for (const_iterator it = ht.begin(); it != ht.end(); ++it)
         {
             size_type num_probes = 0;              // how many times we've probed
             size_type bucknum;
             const size_type bucket_count_minus_one = bucket_count() - 1;
             for (bucknum = hash(get_key(*it)) & bucket_count_minus_one;
                  table.test(bucknum);                                   // table.test() OK since no erase()
-                 bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one) 
+                 bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one)
             {
                 ++num_probes;
                 assert(num_probes < bucket_count()
@@ -4051,7 +2798,7 @@ private:
     void _move_from(MoveDontCopyT mover, sparse_hashtable &ht,
                    size_type min_buckets_wanted)
     {
-        clear(); 
+        clear();
 
         // If we need to change the size of our table, do it now
         size_type resize_to;
@@ -4059,7 +2806,7 @@ private:
             resize_to = ht.bucket_count();       // keep same size as old ht
         else                                     // MoveDontCopy
             resize_to = settings.min_buckets(ht.size(), min_buckets_wanted);
-        if (resize_to > bucket_count()) 
+        if (resize_to > bucket_count())
         {
             // we don't have enough buckets
             table.resize(resize_to);               // sets the number of buckets
@@ -4078,7 +2825,7 @@ private:
         {
             size_type num_probes = 0;
             size_type bucknum;
-            for (bucknum = hash(get_key(*it)) & bucket_count_minus_one; 
+            for (bucknum = hash(get_key(*it)) & bucket_count_minus_one;
                  table.test(bucknum);                          // table.test() OK since no erase()
                  bucknum = (size_type)((bucknum + JUMP_(key, num_probes)) & (bucket_count()-1)))
             {
@@ -4098,7 +2845,7 @@ public:
     // more useful as num_elements.  As a special feature, calling with
     // req_elements==0 will cause us to shrink if we can, saving space.
     // -----------------------------------------------------------------
-    void resize(size_type req_elements) 
+    void resize(size_type req_elements)
     {
         // resize to this or larger
         if (settings.consider_shrink() || req_elements == 0)
@@ -4112,7 +2859,7 @@ public:
     // the values.  Setting the shrink parameter to 0.0 ensures that the
     // table never shrinks.
     // ------------------------------------------------------------------
-    void get_resizing_parameters(float* shrink, float* grow) const 
+    void get_resizing_parameters(float* shrink, float* grow) const
     {
         *shrink = settings.shrink_factor();
         *grow = settings.enlarge_factor();
@@ -4121,13 +2868,14 @@ public:
     float get_shrink_factor() const  { return settings.shrink_factor(); }
     float get_enlarge_factor() const { return settings.enlarge_factor(); }
 
-    void set_resizing_parameters(float shrink, float grow) {
+    void set_resizing_parameters(float shrink, float grow) 
+    {
         settings.set_resizing_parameters(shrink, grow);
         settings.reset_thresholds(bucket_count());
     }
 
     void set_shrink_factor(float shrink)
-    {                                           
+    {
         set_resizing_parameters(shrink, get_enlarge_factor());
     }
 
@@ -4146,14 +2894,14 @@ public:
                               const EqualKey& eql = EqualKey(),
                               const ExtractKey& ext = ExtractKey(),
                               const SetKey& set = SetKey(),
-                              const Alloc& alloc = Alloc())
+                              const allocator_type& alloc = allocator_type())
         : settings(hf),
           key_info(ext, set, eql),
           num_deleted(0),
           table((expected_max_items_in_table == 0
                  ? HT_DEFAULT_STARTING_BUCKETS
                  : settings.min_buckets(expected_max_items_in_table, 0)),
-                value_alloc_type(alloc)) 
+                alloc)
     {
         settings.reset_thresholds(bucket_count());
     }
@@ -4171,7 +2919,7 @@ public:
           table(0)
     {
         settings.reset_thresholds(bucket_count());
-        _copy_from(ht, min_buckets_wanted); 
+        _copy_from(ht, min_buckets_wanted);
     }
 
 #if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
@@ -4184,7 +2932,7 @@ public:
     {
     }
 
-    sparse_hashtable(sparse_hashtable&& o, const Alloc& alloc) :
+    sparse_hashtable(sparse_hashtable&& o, const allocator_type& alloc) :
         settings(std::move(o.settings)),
         key_info(std::move(o.key_info)),
         num_deleted(o.num_deleted),
@@ -4200,23 +2948,24 @@ public:
         swap(tmp, *this);
         return *this;
     }
-#endif    
+#endif
 
-    sparse_hashtable(MoveDontCopyT mover, 
+    sparse_hashtable(MoveDontCopyT mover,
                      sparse_hashtable& ht,
                      size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS)
         : settings(ht.settings),
           key_info(ht.key_info),
           num_deleted(0),
           table(min_buckets_wanted, ht.table.get_allocator())
+          //table(min_buckets_wanted)
     {
         settings.reset_thresholds(bucket_count());
-        _move_from(mover, ht, min_buckets_wanted); 
+        _move_from(mover, ht, min_buckets_wanted);
     }
 
     sparse_hashtable& operator=(const sparse_hashtable& ht)
     {
-        if (&ht == this) 
+        if (&ht == this)
             return *this;        // don't copy onto ourselves
         settings = ht.settings;
         key_info = ht.key_info;
@@ -4230,7 +2979,7 @@ public:
     }
 
     // Many STL algorithms use swap instead of copy constructors
-    void swap(sparse_hashtable& ht) 
+    void swap(sparse_hashtable& ht)
     {
         using std::swap;
 
@@ -4244,9 +2993,9 @@ public:
     }
 
     // It's always nice to be able to clear a table without deallocating it
-    void clear() 
+    void clear()
     {
-        if (!empty() || num_deleted != 0) 
+        if (!empty() || num_deleted != 0)
         {
             table.clear();
             table = Table(HT_DEFAULT_STARTING_BUCKETS);
@@ -4257,7 +3006,7 @@ public:
 
     // LOOKUP ROUTINES
 private:
-    
+
     enum pos_type { pt_empty = 0, pt_erased, pt_full };
     // -------------------------------------------------------------------
     class Position
@@ -4266,12 +3015,12 @@ private:
 
         Position() : _t(pt_empty) {}
         Position(pos_type t, size_type idx) : _t(t), _idx(idx) {}
-        
+
         pos_type  _t;
         size_type _idx;
     };
 
-    // Returns a pair: 
+    // Returns a pair:
     //   - 'first' is a code, 2 if key already present, 0 or 1 otherwise.
     //   - 'second' is a position, where the key should go
     // Note: because of deletions where-to-insert is not trivial: it's the
@@ -4281,11 +3030,11 @@ private:
     {
         size_type num_probes = 0;                    // how many times we've probed
         const size_type bucket_count_minus_one = (const size_type)(bucket_count() - 1);
-        size_type bucknum = hash(key) & bucket_count_minus_one; 
+        size_type bucknum = hash(key) & bucket_count_minus_one;
         Position pos;
 
         while (1)
-        {    
+        {
             // probe until something happens
             // -----------------------------
             typename Table::GrpPos grp_pos(table, bucknum);
@@ -4294,7 +3043,7 @@ private:
             {
                 // bucket is empty => key not present
                 return pos._t ? pos : Position(pt_empty, bucknum);
-            } 
+            }
             else if (grp_pos.test())
             {
                 reference ref(grp_pos.unsafe_get());
@@ -4308,7 +3057,7 @@ private:
                 pos._t   = pt_erased;
                 pos._idx = bucknum;
             }
-            
+
             ++num_probes;                        // we're doing another probe
             bucknum = (size_type)((bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one);
             assert(num_probes < bucket_count()
@@ -4317,7 +3066,7 @@ private:
     }
 
 public:
-    // I hate to duplicate find() like that, but it is 
+    // I hate to duplicate find() like that, but it is
     // significantly faster to not have the intermediate pair
     // ------------------------------------------------------------------
     iterator find(const key_type& key)
@@ -4325,9 +3074,9 @@ public:
         size_type num_probes = 0;              // how many times we've probed
         const size_type bucket_count_minus_one = bucket_count() - 1;
         size_type bucknum = hash(key) & bucket_count_minus_one;
-        
+
         while (1)                        // probe until something happens
-        {            
+        {
             typename Table::GrpPos grp_pos(table, bucknum);
 
             if (!grp_pos.test_strict())
@@ -4355,7 +3104,7 @@ public:
         size_type bucknum = hash(key) & bucket_count_minus_one;
 
         while (1)                        // probe until something happens
-        {         
+        {
             typename Table::GrpPos grp_pos(table, bucknum);
 
             if (!grp_pos.test_strict())
@@ -4377,7 +3126,7 @@ public:
     // This is a tr1 method: the bucket a given key is in, or what bucket
     // it would be put in, if it were to be inserted.  Shrug.
     // ------------------------------------------------------------------
-    size_type bucket(const key_type& key) const 
+    size_type bucket(const key_type& key) const
     {
         Position pos = _find_position(key);
         return pos._idx;
@@ -4393,22 +3142,22 @@ public:
 
     // Likewise, equal_range doesn't really make sense for us.  Oh well.
     // -----------------------------------------------------------------
-    std::pair<iterator,iterator> equal_range(const key_type& key) 
+    std::pair<iterator,iterator> equal_range(const key_type& key)
     {
         iterator pos = find(key);      // either an iterator or end
-        if (pos == end()) 
+        if (pos == end())
             return std::pair<iterator,iterator>(pos, pos);
-        else 
+        else
         {
             const iterator startpos = pos++;
             return std::pair<iterator,iterator>(startpos, pos);
         }
     }
 
-    std::pair<const_iterator,const_iterator> equal_range(const key_type& key) const 
+    std::pair<const_iterator,const_iterator> equal_range(const key_type& key) const
     {
         const_iterator pos = find(key);      // either an iterator or end
-        if (pos == end()) 
+        if (pos == end())
             return std::pair<const_iterator,const_iterator>(pos, pos);
         else
         {
@@ -4422,9 +3171,9 @@ public:
 private:
     // Private method used by insert_noresize and find_or_insert.
     template <class T>
-    reference _insert_at(T& obj, size_type pos, bool erased) 
+    reference _insert_at(T& obj, size_type pos, bool erased)
     {
-        if (size() >= max_size()) 
+        if (size() >= max_size())
         {
             throw_exception(std::length_error("insert overflow"));
         }
@@ -4438,7 +3187,7 @@ private:
 
     // If you know *this is big enough to hold obj, use this routine
     template <class T>
-    std::pair<iterator, bool> _insert_noresize(T& obj) 
+    std::pair<iterator, bool> _insert_noresize(T& obj)
     {
         Position pos = _find_position(get_key(obj));
         bool already_there = (pos._t == pt_full);
@@ -4457,7 +3206,7 @@ private:
     void _insert(ForwardIterator f, ForwardIterator l, std::forward_iterator_tag /*unused*/)
     {
         int64_t dist = std::distance(f, l);
-        if (dist < 0 ||  static_cast<size_t>(dist) >= (std::numeric_limits<size_type>::max)()) 
+        if (dist < 0 ||  static_cast<size_t>(dist) >= (std::numeric_limits<size_type>::max)())
             throw_exception(std::length_error("insert-range overflow"));
 
         _resize_delta(static_cast<size_type>(dist));
@@ -4468,7 +3217,7 @@ private:
 
     // (2) Arbitrary iterator, can't tell how much to resize
     template <class InputIterator>
-    void _insert(InputIterator f, InputIterator l, std::input_iterator_tag /*unused*/) 
+    void _insert(InputIterator f, InputIterator l, std::input_iterator_tag /*unused*/)
     {
         for (; f != l; ++f)
             _insert(*f);
@@ -4478,9 +3227,9 @@ public:
 
 #if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES)
     template <class... Args>
-    std::pair<iterator, bool> emplace(Args&&... args) 
+    std::pair<iterator, bool> emplace(Args&&... args)
     {
-        _resize_delta(1);  
+        _resize_delta(1);
         value_type obj(std::forward<Args>(args)...);
         return _insert_noresize(obj);
     }
@@ -4493,9 +3242,19 @@ public:
         return _insert_noresize(obj);
     }
 
+#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
+    template< class P >
+    std::pair<iterator, bool> insert(P &&obj)
+    {
+        _resize_delta(1);                      // adding an object, grow if need be
+        value_type val(std::forward<value_type>(obj));
+        return _insert_noresize(val);
+    }
+#endif
+
     // When inserting a lot at a time, we specialize on the type of iterator
     template <class InputIterator>
-    void insert(InputIterator f, InputIterator l) 
+    void insert(InputIterator f, InputIterator l)
     {
         // specializes on iterator type
         _insert(f, l,
@@ -4515,7 +3274,7 @@ public:
         bool erased = false;
 
         while (1)                        // probe until something happens
-        {            
+        {
             typename Table::GrpPos grp_pos(table, bucknum);
 
             if (!grp_pos.test_strict())
@@ -4527,8 +3286,8 @@ public:
                     // Since we resized, we can't use pos, so recalculate where to insert.
                     value_type def(default_value(key));
                     return *(_insert_noresize(def).first);
-                } 
-                else 
+                }
+                else
                 {
                     // no need to rehash, insert right here
                     value_type def(default_value(key));
@@ -4556,14 +3315,14 @@ public:
         }
     }
 
-    size_type erase(const key_type& key) 
+    size_type erase(const key_type& key)
     {
         size_type num_probes = 0;              // how many times we've probed
         const size_type bucket_count_minus_one = bucket_count() - 1;
         size_type bucknum = hash(key) & bucket_count_minus_one;
-        
+
         while (1)                        // probe until something happens
-        {            
+        {
             typename Table::GrpPos grp_pos(table, bucknum);
 
             if (!grp_pos.test_strict())
@@ -4589,18 +3348,18 @@ public:
 
     const_iterator erase(const_iterator pos)
     {
-        if (pos == cend()) 
+        if (pos == cend())
             return cend();                 // sanity check
-        
+
         const_iterator nextpos = table.erase(pos);
         ++num_deleted;
         settings.set_consider_shrink(true);
         return nextpos;
     }
 
-    const_iterator erase(const_iterator f, const_iterator l) 
+    const_iterator erase(const_iterator f, const_iterator l)
     {
-        if (f == cend()) 
+        if (f == cend())
             return cend();                // sanity check
 
         size_type num_before = table.num_nonempty();
@@ -4613,35 +3372,26 @@ public:
     // Deleted key routines - just to keep google test framework happy
     // we don't actually use the deleted key
     // ---------------------------------------------------------------
-    void set_deleted_key(const key_type& key)   
+    void set_deleted_key(const key_type&)
     {
-        _squash_deleted();
-        key_info.delkey = key;
     }
 
     void clear_deleted_key()
     {
-        _squash_deleted();
-    }
-
-    key_type deleted_key() const 
-    {
-         return key_info.delkey;
     }
 
-
-    bool operator==(const sparse_hashtable& ht) const 
+    bool operator==(const sparse_hashtable& ht) const
     {
-        if (this == &ht) 
+        if (this == &ht)
             return true;
 
-        if (size() != ht.size()) 
+        if (size() != ht.size())
             return false;
 
-        for (const_iterator it = begin(); it != end(); ++it) 
+        for (const_iterator it = begin(); it != end(); ++it)
         {
             const_iterator it2 = ht.find(get_key(*it));
-            if ((it2 == ht.end()) || (*it != *it2)) 
+            if ((it2 == ht.end()) || (*it != *it2))
                 return false;
         }
 
@@ -4668,14 +3418,13 @@ public:
     // InputBuffer are appropriate types to pass in.
     // -------------------------------------------------------------
     template <typename OUTPUT>
-    bool write_metadata(OUTPUT *fp) 
+    bool write_metadata(OUTPUT *fp)
     {
-        _squash_deleted();           // so we don't have to worry about delkey
         return table.write_metadata(fp);
     }
 
     template <typename INPUT>
-    bool read_metadata(INPUT *fp) 
+    bool read_metadata(INPUT *fp)
     {
         num_deleted = 0;            // since we got rid before writing
         const bool result = table.read_metadata(fp);
@@ -4709,7 +3458,6 @@ public:
     template <typename ValueSerializer, typename OUTPUT>
     bool serialize(ValueSerializer serializer, OUTPUT *fp)
     {
-        _squash_deleted();           // so we don't have to worry about delkey
         return table.serialize(serializer, fp);
     }
 
@@ -4735,7 +3483,7 @@ private:
                                                    size_type, HT_MIN_BUCKETS>
     {
         explicit Settings(const hasher& hf)
-            : sparsehash_internal::sh_hashtable_settings<key_type, hasher, size_type, 
+            : sparsehash_internal::sh_hashtable_settings<key_type, hasher, size_type,
               HT_MIN_BUCKETS>
               (hf, HT_OCCUPANCY_PCT / 100.0f, HT_EMPTY_PCT / 100.0f) {}
     };
@@ -4747,7 +3495,7 @@ private:
     {
     public:
         KeyInfo(const ExtractKey& ek, const SetKey& sk, const EqualKey& eq)
-            : ExtractKey(ek), SetKey(sk), EqualKey(eq) 
+            : ExtractKey(ek), SetKey(sk), EqualKey(eq)
         {
         }
 
@@ -4757,12 +3505,10 @@ private:
             return ExtractKey::operator()(v);
         }
 
-        bool equals(const key_type& a, const key_type& b) const 
+        bool equals(const key_type& a, const key_type& b) const
         {
             return EqualKey::operator()(a, b);
         }
-
-        typename spp_::remove_const<key_type>::type delkey;
     };
 
     // Utility functions to access the templated operators
@@ -4771,35 +3517,25 @@ private:
         return settings.hash(v);
     }
 
-    bool equals(const key_type& a, const key_type& b) const 
+    bool equals(const key_type& a, const key_type& b) const
     {
         return key_info.equals(a, b);
     }
 
-    typename ExtractKey::result_type get_key(const_reference v) const 
+    typename ExtractKey::result_type get_key(const_reference v) const
     {
         return key_info.get_key(v);
     }
-    
+
 private:
     // Actual data
     // -----------
     Settings  settings;
     KeyInfo   key_info;
-    size_type num_deleted; 
+    size_type num_deleted;
     Table     table;         // holds num_buckets and num_elements too
 };
 
-
-// We need a global swap as well
-// -----------------------------
-template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
-inline void swap(sparse_hashtable<V,K,HF,ExK,SetK,EqK,A> &x,
-                 sparse_hashtable<V,K,HF,ExK,SetK,EqK,A> &y) 
-{
-    x.swap(y);
-}
-
 #undef JUMP_
 
 // -----------------------------------------------------------------------------
@@ -4822,55 +3558,55 @@ const int sparse_hashtable<V,K,HF,ExK,SetK,EqK,A>::HT_EMPTY_PCT
                    sparse_hashtable<V,K,HF,ExK,SetK,EqK,A>::HT_OCCUPANCY_PCT);
 
 
-
-
 //  ----------------------------------------------------------------------
 //                   S P A R S E _ H A S H _ M A P
 //  ----------------------------------------------------------------------
 template <class Key, class T,
-          class HashFcn = spp_hash<Key>,  
+          class HashFcn  = spp_hash<Key>,
           class EqualKey = std::equal_to<Key>,
-          class Alloc = libc_allocator_with_realloc<std::pair<const Key, T> > >
-class sparse_hash_map 
+          class Alloc    = SPP_DEFAULT_ALLOCATOR<std::pair<const Key, T> > >
+class sparse_hash_map
 {
+public:
+    typedef typename std::pair<const Key, T> value_type;
+
 private:
     // Apparently select1st is not stl-standard, so we define our own
-    struct SelectKey 
+    struct SelectKey
     {
         typedef const Key& result_type;
 
-        inline const Key& operator()(const std::pair<const Key, T>& p) const 
+        inline const Key& operator()(const value_type& p) const
         {
             return p.first;
         }
     };
 
-    struct SetKey 
+    struct SetKey
     {
-        inline void operator()(std::pair<const Key, T>* value, const Key& new_key) const
+        inline void operator()(value_type* value, const Key& new_key) const
         {
             *const_cast<Key*>(&value->first) = new_key;
         }
     };
 
     // For operator[].
-    struct DefaultValue 
+    struct DefaultValue
     {
-        inline std::pair<const Key, T> operator()(const Key& key)  const
+        inline value_type operator()(const Key& key)  const
         {
             return std::make_pair(key, T());
         }
     };
 
     // The actual data
-    typedef sparse_hashtable<std::pair<typename spp_::remove_const<Key>::type, T>, Key, HashFcn, SelectKey,
+    typedef sparse_hashtable<value_type, Key, HashFcn, SelectKey,
                              SetKey, EqualKey, Alloc> ht;
 
 public:
     typedef typename ht::key_type             key_type;
     typedef T                                 data_type;
     typedef T                                 mapped_type;
-    typedef typename std::pair<const Key, T>  value_type;
     typedef typename ht::hasher               hasher;
     typedef typename ht::key_equal            key_equal;
     typedef Alloc                             allocator_type;
@@ -4917,7 +3653,7 @@ public:
                              const hasher& hf = hasher(),
                              const key_equal& eql = key_equal(),
                              const allocator_type& alloc = allocator_type())
-        : rep(n, hf, eql, SelectKey(), SetKey(), alloc) 
+        : rep(n, hf, eql, SelectKey(), SetKey(), alloc)
     {
     }
 
@@ -4942,7 +3678,7 @@ public:
                     const hasher& hf = hasher(),
                     const key_equal& eql = key_equal(),
                     const allocator_type& alloc = allocator_type())
-        : rep(n, hf, eql, SelectKey(), SetKey(), alloc) 
+        : rep(n, hf, eql, SelectKey(), SetKey(), alloc)
     {
         rep.insert(f, l);
     }
@@ -4950,7 +3686,7 @@ public:
     template <class InputIterator>
     sparse_hash_map(InputIterator f, InputIterator l,
                     size_type n, const allocator_type& alloc)
-        : rep(n, hasher(), key_equal(), SelectKey(), SetKey(), alloc) 
+        : rep(n, hasher(), key_equal(), SelectKey(), SetKey(), alloc)
     {
         rep.insert(f, l);
     }
@@ -4958,28 +3694,28 @@ public:
     template <class InputIterator>
     sparse_hash_map(InputIterator f, InputIterator l,
                     size_type n, const hasher& hf, const allocator_type& alloc)
-        : rep(n, hf, key_equal(), SelectKey(), SetKey(), alloc) 
+        : rep(n, hf, key_equal(), SelectKey(), SetKey(), alloc)
     {
         rep.insert(f, l);
     }
 
-    sparse_hash_map(const sparse_hash_map &o) : 
-        rep(o.rep) 
+    sparse_hash_map(const sparse_hash_map &o) :
+        rep(o.rep)
     {}
 
     sparse_hash_map(const sparse_hash_map &o,
-                    const allocator_type& alloc) : 
-        rep(o.rep, alloc) 
+                    const allocator_type& alloc) :
+        rep(o.rep, alloc)
     {}
 
 #if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
-    sparse_hash_map(const sparse_hash_map &&o) : 
+    sparse_hash_map(sparse_hash_map &&o) :
         rep(std::move(o.rep))
     {}
 
-    sparse_hash_map(const sparse_hash_map &&o,
+    sparse_hash_map(sparse_hash_map &&o,
                     const allocator_type& alloc) :
-        rep(std::move(o.rep), alloc) 
+        rep(std::move(o.rep), alloc)
     {}
 #endif
 
@@ -4989,7 +3725,7 @@ public:
                     const hasher& hf = hasher(),
                     const key_equal& eql = key_equal(),
                     const allocator_type& alloc = allocator_type())
-        : rep(n, hf, eql, SelectKey(), SetKey(), alloc) 
+        : rep(n, hf, eql, SelectKey(), SetKey(), alloc)
     {
         rep.insert(init.begin(), init.end());
     }
@@ -5048,34 +3784,35 @@ public:
     float min_load_factor() const      { return rep.get_shrink_factor(); }
     void  min_load_factor(float shrink){ rep.set_shrink_factor(shrink); }
 
-    void set_resizing_parameters(float shrink, float grow) 
+    void set_resizing_parameters(float shrink, float grow)
     {
         rep.set_resizing_parameters(shrink, grow);
     }
 
     void resize(size_type cnt)        { rep.resize(cnt); }
     void rehash(size_type cnt)        { resize(cnt); } // c++11 name
-    void reserve(size_type cnt)       { resize(cnt); } // c++11 
+    void reserve(size_type cnt)       { resize(cnt); } // c++11
 
     // Lookup
     // ------
     iterator find(const key_type& key)                 { return rep.find(key); }
     const_iterator find(const key_type& key) const     { return rep.find(key); }
+    bool contains(const key_type& key) const           { return rep.find(key) != rep.end(); }
 
-    mapped_type& operator[](const key_type& key) 
+    mapped_type& operator[](const key_type& key)
     {
         return rep.template find_or_insert<DefaultValue>(key).second;
     }
 
     size_type count(const key_type& key) const         { return rep.count(key); }
 
-    std::pair<iterator, iterator> 
+    std::pair<iterator, iterator>
     equal_range(const key_type& key)             { return rep.equal_range(key); }
 
-    std::pair<const_iterator, const_iterator> 
+    std::pair<const_iterator, const_iterator>
     equal_range(const key_type& key) const       { return rep.equal_range(key); }
 
-    mapped_type& at(const key_type& key) 
+    mapped_type& at(const key_type& key)
     {
         iterator it = rep.find(key);
         if (it == rep.end())
@@ -5093,7 +3830,7 @@ public:
 
 #if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES)
     template <class... Args>
-    std::pair<iterator, bool> emplace(Args&&... args) 
+    std::pair<iterator, bool> emplace(Args&&... args)
     {
         return rep.emplace(std::forward<Args>(args)...);
     }
@@ -5107,14 +3844,19 @@ public:
 
     // Insert
     // ------
-    std::pair<iterator, bool> 
+    std::pair<iterator, bool>
     insert(const value_type& obj)                    { return rep.insert(obj); }
 
-    template <class InputIterator> 
+#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
+    template< class P >
+    std::pair<iterator, bool> insert(P&& obj)        { return rep.insert(std::forward<P>(obj)); }
+#endif
+
+    template <class InputIterator>
     void insert(InputIterator f, InputIterator l)    { rep.insert(f, l); }
 
     void insert(const_iterator f, const_iterator l)  { rep.insert(f, l); }
-    
+
     iterator insert(iterator /*unused*/, const value_type& obj) { return insert(obj).first; }
     iterator insert(const_iterator /*unused*/, const value_type& obj) { return insert(obj).first; }
 
@@ -5128,7 +3870,7 @@ public:
     // Erase
     // -----
     size_type erase(const key_type& key)               { return rep.erase(key); }
-    iterator  erase(iterator it)                       { return rep.erase(it); } 
+    iterator  erase(iterator it)                       { return rep.erase(it); }
     iterator  erase(iterator f, iterator l)            { return rep.erase(f, l); }
     iterator  erase(const_iterator it)                 { return rep.erase(it); }
     iterator  erase(const_iterator f, const_iterator l){ return rep.erase(f, l); }
@@ -5162,7 +3904,7 @@ public:
     //    Note basic_ostream<not_char> is not currently supported.
     // ---------------------------------------------------------------
     template <typename ValueSerializer, typename OUTPUT>
-    bool serialize(ValueSerializer serializer, OUTPUT* fp) 
+    bool serialize(ValueSerializer serializer, OUTPUT* fp)
     {
         return rep.serialize(serializer, fp);
     }
@@ -5210,35 +3952,27 @@ private:
     ht rep;
 };
 
-// We need a global swap as well
-template <class Key, class T, class HashFcn, class EqualKey, class Alloc>
-inline void swap(sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm1,
-                 sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm2) 
-{
-    hm1.swap(hm2);
-}
-
 //  ----------------------------------------------------------------------
 //                   S P A R S E _ H A S H _ S E T
 //  ----------------------------------------------------------------------
 
 template <class Value,
-          class HashFcn = spp_hash<Value>,
+          class HashFcn  = spp_hash<Value>,
           class EqualKey = std::equal_to<Value>,
-          class Alloc = libc_allocator_with_realloc<Value> >
-class sparse_hash_set 
+          class Alloc    = SPP_DEFAULT_ALLOCATOR<Value> >
+class sparse_hash_set
 {
 private:
     // Apparently identity is not stl-standard, so we define our own
-    struct Identity 
+    struct Identity
     {
         typedef const Value& result_type;
-        const Value& operator()(const Value& v) const { return v; }
+        inline const Value& operator()(const Value& v) const { return v; }
     };
 
-    struct SetKey 
+    struct SetKey
     {
-        void operator()(Value* value, const Value& new_key) const 
+        inline void operator()(Value* value, const Value& new_key) const
         {
             *value = new_key;
         }
@@ -5308,7 +4042,7 @@ public:
     {
     }
 
-    sparse_hash_set(size_type n, const hasher& hf, 
+    sparse_hash_set(size_type n, const hasher& hf,
                     const allocator_type& alloc) :
         rep(n, hf, key_equal(), Identity(), SetKey(), alloc)
     {
@@ -5323,7 +4057,7 @@ public:
         : rep(n, hf, eql, Identity(), SetKey(), alloc)
     {
         rep.insert(f, l);
-    } 
+    }
 
     template <class InputIterator>
     sparse_hash_set(InputIterator f, InputIterator l,
@@ -5331,7 +4065,7 @@ public:
         : rep(n, hasher(), key_equal(), Identity(), SetKey(), alloc)
     {
         rep.insert(f, l);
-    } 
+    }
 
     template <class InputIterator>
     sparse_hash_set(InputIterator f, InputIterator l,
@@ -5339,31 +4073,31 @@ public:
         : rep(n, hf, key_equal(), Identity(), SetKey(), alloc)
     {
         rep.insert(f, l);
-    } 
+    }
 
-    sparse_hash_set(const sparse_hash_set &o) : 
+    sparse_hash_set(const sparse_hash_set &o) :
         rep(o.rep)
     {}
 
     sparse_hash_set(const sparse_hash_set &o,
                     const allocator_type& alloc) :
-        rep(o.rep, alloc) 
+        rep(o.rep, alloc)
     {}
 
 #if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
-    sparse_hash_set(const sparse_hash_set &&o) : 
+    sparse_hash_set(sparse_hash_set &&o) :
         rep(std::move(o.rep))
     {}
 
-    sparse_hash_set(const sparse_hash_set &&o,
+    sparse_hash_set(sparse_hash_set &&o,
                     const allocator_type& alloc) :
-        rep(std::move(o.rep), alloc) 
+        rep(std::move(o.rep), alloc)
     {}
 #endif
 
 #if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST)
     sparse_hash_set(std::initializer_list<value_type> init,
-                    size_type n = 0, 
+                    size_type n = 0,
                     const hasher& hf = hasher(),
                     const key_equal& eql = key_equal(),
                     const allocator_type& alloc = allocator_type()) :
@@ -5380,7 +4114,7 @@ public:
     }
 
     sparse_hash_set(std::initializer_list<value_type> init,
-                    size_type n, const hasher& hf, 
+                    size_type n, const hasher& hf,
                     const allocator_type& alloc) :
         rep(n, hf, key_equal(), Identity(), SetKey(), alloc)
     {
@@ -5400,7 +4134,7 @@ public:
     }
 
 #endif
-    
+
     sparse_hash_set& operator=(const sparse_hash_set &o)
     {
         rep = o.rep;
@@ -5430,27 +4164,28 @@ public:
     float min_load_factor() const      { return rep.get_shrink_factor(); }
     void  min_load_factor(float shrink){ rep.set_shrink_factor(shrink); }
 
-    void set_resizing_parameters(float shrink, float grow) 
+    void set_resizing_parameters(float shrink, float grow)
     {
         rep.set_resizing_parameters(shrink, grow);
     }
 
     void resize(size_type cnt)        { rep.resize(cnt); }
     void rehash(size_type cnt)        { resize(cnt); } // c++11 name
-    void reserve(size_type cnt)       { resize(cnt); } // c++11 
+    void reserve(size_type cnt)       { resize(cnt); } // c++11
 
     // Lookup
     // ------
     iterator find(const key_type& key) const     { return rep.find(key); }
+    bool contains(const key_type& key) const     { return rep.find(key) != rep.end(); }
 
     size_type count(const key_type& key) const   { return rep.count(key); }
 
-    std::pair<iterator, iterator> 
+    std::pair<iterator, iterator>
     equal_range(const key_type& key) const       { return rep.equal_range(key); }
 
 #if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES)
     template <class... Args>
-    std::pair<iterator, bool> emplace(Args&&... args) 
+    std::pair<iterator, bool> emplace(Args&&... args)
     {
         return rep.emplace(std::forward<Args>(args)...);
     }
@@ -5464,12 +4199,17 @@ public:
 
     // Insert
     // ------
-    std::pair<iterator, bool> insert(const value_type& obj) 
+    std::pair<iterator, bool> insert(const value_type& obj)
     {
         std::pair<typename ht::iterator, bool> p = rep.insert(obj);
         return std::pair<iterator, bool>(p.first, p.second);   // const to non-const
     }
 
+#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
+    template<class P>
+    std::pair<iterator, bool> insert(P&& obj)        { return rep.insert(std::forward<P>(obj)); }
+#endif
+
     template <class InputIterator>
     void insert(InputIterator f, InputIterator l)    { rep.insert(f, l); }
 
@@ -5565,14 +4305,43 @@ private:
     ht rep;
 };
 
-template <class Val, class HashFcn, class EqualKey, class Alloc>
-inline void swap(sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs1,
-                 sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs2) 
+} // spp_ namespace
+
+
+// We need a global swap for all our classes as well
+// -------------------------------------------------
+
+template <class T, class Alloc>
+inline void swap(spp_::sparsegroup<T,Alloc> &x, spp_::sparsegroup<T,Alloc> &y)
 {
-    hs1.swap(hs2);
+    x.swap(y);
+}
+
+template <class T, class Alloc>
+inline void swap(spp_::sparsetable<T,Alloc> &x, spp_::sparsetable<T,Alloc> &y)
+{
+    x.swap(y);
+}
+
+template <class V, class K, class HF, class ExK, class SetK, class EqK, class A>
+inline void swap(spp_::sparse_hashtable<V,K,HF,ExK,SetK,EqK,A> &x,
+                 spp_::sparse_hashtable<V,K,HF,ExK,SetK,EqK,A> &y)
+{
+    x.swap(y);
 }
 
+template <class Key, class T, class HashFcn, class EqualKey, class Alloc>
+inline void swap(spp_::sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm1,
+                 spp_::sparse_hash_map<Key, T, HashFcn, EqualKey, Alloc>& hm2)
+{
+    hm1.swap(hm2);
+}
 
-SPP_END_NAMESPACE
+template <class Val, class HashFcn, class EqualKey, class Alloc>
+inline void swap(spp_::sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs1,
+                 spp_::sparse_hash_set<Val, HashFcn, EqualKey, Alloc>& hs2)
+{
+    hs1.swap(hs2);
+}
 
 #endif // sparsepp_h_guard_
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_config.h b/resources/3rdparty/sparsepp/sparsepp/spp_config.h
new file mode 100755
index 000000000..46eeee5c2
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_config.h
@@ -0,0 +1,781 @@
+#if !defined(spp_config_h_guard)
+#define spp_config_h_guard
+
+// --------------------------------------------------
+// Sparsepp config macros
+// some can be overriden on the command line
+// --------------------------------------------------
+#ifndef SPP_NAMESPACE
+     #define SPP_NAMESPACE spp
+#endif
+
+#ifndef spp_
+    #define spp_ SPP_NAMESPACE
+#endif
+
+#ifndef SPP_DEFAULT_ALLOCATOR
+    #if (defined(SPP_USE_SPP_ALLOC) && SPP_USE_SPP_ALLOC) && defined(_MSC_VER)
+        // -----------------------------------------------------------------------------
+        // When building with the Microsoft compiler, we use a custom allocator because
+        // the default one fragments memory when reallocating. This is desirable only 
+        // when creating large sparsepp hash maps. If you create lots of small hash_maps,
+        // define the following before including spp.h:
+        //     #define SPP_DEFAULT_ALLOCATOR spp::libc_allocator
+        // -----------------------------------------------------------------------------
+        #define SPP_DEFAULT_ALLOCATOR spp_::spp_allocator
+        #define SPP_INCLUDE_SPP_ALLOC
+    #else
+        #define SPP_DEFAULT_ALLOCATOR spp_::libc_allocator
+    #endif
+#endif
+
+#ifndef SPP_GROUP_SIZE
+    // must be 32 or 64
+    #define SPP_GROUP_SIZE 32
+#endif
+
+#ifndef SPP_ALLOC_SZ
+    // must be power of 2 (0 = agressive alloc, 1 = smallest memory usage, 2 = good compromise)
+    #define SPP_ALLOC_SZ 0
+#endif
+
+#ifndef SPP_STORE_NUM_ITEMS
+    // 1 uses a little bit more memory, but faster!!
+    #define SPP_STORE_NUM_ITEMS 1 
+#endif
+
+
+// ---------------------------------------------------------------------------
+// Compiler detection code (SPP_ proprocessor macros) derived from Boost
+// libraries. Therefore Boost software licence reproduced below.
+// ---------------------------------------------------------------------------
+// Boost Software License - Version 1.0 - August 17th, 2003
+//
+// Permission is hereby granted, free of charge, to any person or organization
+// obtaining a copy of the software and accompanying documentation covered by
+// this license (the "Software") to use, reproduce, display, distribute,
+// execute, and transmit the Software, and to prepare derivative works of the
+// Software, and to permit third-parties to whom the Software is furnished to
+// do so, all subject to the following:
+//
+// The copyright notices in the Software and this entire statement, including
+// the above license grant, this restriction and the following disclaimer,
+// must be included in all copies of the Software, in whole or in part, and
+// all derivative works of the Software, unless such copies or derivative
+// works are solely in the form of machine-executable object code generated by
+// a source language processor.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+// DEALINGS IN THE SOFTWARE.
+// ---------------------------------------------------------------------------
+
+// Boost like configuration
+// ------------------------
+#if defined __clang__
+
+    #if defined(i386)
+        #include <cpuid.h>
+        inline void spp_cpuid(int info[4], int InfoType) {
+            __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
+        }
+    #endif
+
+    #define SPP_POPCNT   __builtin_popcount
+    #define SPP_POPCNT64 __builtin_popcountll
+
+    #define SPP_HAS_CSTDINT
+
+    #ifndef __has_extension
+        #define __has_extension __has_feature
+    #endif
+
+    #if !__has_feature(cxx_exceptions) && !defined(SPP_NO_EXCEPTIONS)
+        #define SPP_NO_EXCEPTIONS
+    #endif
+
+    #if !__has_feature(cxx_rtti) && !defined(SPP_NO_RTTI)
+      #define SPP_NO_RTTI
+    #endif
+
+    #if !__has_feature(cxx_rtti) && !defined(SPP_NO_TYPEID)
+        #define SPP_NO_TYPEID
+    #endif
+
+    #if defined(__int64) && !defined(__GNUC__)
+        #define SPP_HAS_MS_INT64
+    #endif
+
+    #define SPP_HAS_NRVO
+
+    // Branch prediction hints
+    #if defined(__has_builtin)
+        #if __has_builtin(__builtin_expect)
+             #define SPP_LIKELY(x) __builtin_expect(x, 1)
+             #define SPP_UNLIKELY(x) __builtin_expect(x, 0)
+        #endif
+    #endif
+
+    // Clang supports "long long" in all compilation modes.
+    #define SPP_HAS_LONG_LONG
+
+    #if !__has_feature(cxx_constexpr)
+        #define SPP_NO_CXX11_CONSTEXPR
+    #endif
+
+    #if !__has_feature(cxx_decltype)
+        #define SPP_NO_CXX11_DECLTYPE
+    #endif
+
+    #if !__has_feature(cxx_decltype_incomplete_return_types)
+        #define SPP_NO_CXX11_DECLTYPE_N3276
+    #endif
+
+    #if !__has_feature(cxx_defaulted_functions)
+        #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
+    #endif
+
+    #if !__has_feature(cxx_deleted_functions)
+        #define SPP_NO_CXX11_DELETED_FUNCTIONS
+    #endif
+
+    #if !__has_feature(cxx_explicit_conversions)
+        #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
+    #endif
+
+    #if !__has_feature(cxx_default_function_template_args)
+        #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
+    #endif
+
+    #if !__has_feature(cxx_generalized_initializers)
+        #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
+    #endif
+
+    #if !__has_feature(cxx_lambdas)
+        #define SPP_NO_CXX11_LAMBDAS
+    #endif
+
+    #if !__has_feature(cxx_local_type_template_args)
+        #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS
+    #endif
+
+    #if !__has_feature(cxx_raw_string_literals)
+        #define SPP_NO_CXX11_RAW_LITERALS
+    #endif
+
+    #if !__has_feature(cxx_reference_qualified_functions)
+        #define SPP_NO_CXX11_REF_QUALIFIERS
+    #endif
+
+    #if !__has_feature(cxx_generalized_initializers)
+        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
+    #endif
+
+    #if !__has_feature(cxx_rvalue_references)
+        #define SPP_NO_CXX11_RVALUE_REFERENCES
+    #endif
+
+    #if !__has_feature(cxx_static_assert)
+        #define SPP_NO_CXX11_STATIC_ASSERT
+    #endif
+
+    #if !__has_feature(cxx_alias_templates)
+        #define SPP_NO_CXX11_TEMPLATE_ALIASES
+    #endif
+
+    #if !__has_feature(cxx_variadic_templates)
+        #define SPP_NO_CXX11_VARIADIC_TEMPLATES
+    #endif
+
+    #if !__has_feature(cxx_user_literals)
+        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
+    #endif
+
+    #if !__has_feature(cxx_alignas)
+        #define SPP_NO_CXX11_ALIGNAS
+    #endif
+
+    #if !__has_feature(cxx_trailing_return)
+        #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
+    #endif
+
+    #if !__has_feature(cxx_inline_namespaces)
+        #define SPP_NO_CXX11_INLINE_NAMESPACES
+    #endif
+
+    #if !__has_feature(cxx_override_control)
+        #define SPP_NO_CXX11_FINAL
+    #endif
+
+    #if !(__has_feature(__cxx_binary_literals__) || __has_extension(__cxx_binary_literals__))
+        #define SPP_NO_CXX14_BINARY_LITERALS
+    #endif
+
+    #if !__has_feature(__cxx_decltype_auto__)
+        #define SPP_NO_CXX14_DECLTYPE_AUTO
+    #endif
+
+    #if !__has_feature(__cxx_init_captures__)
+        #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
+    #endif
+
+    #if !__has_feature(__cxx_generic_lambdas__)
+        #define SPP_NO_CXX14_GENERIC_LAMBDAS
+    #endif
+
+
+    #if !__has_feature(__cxx_generic_lambdas__) || !__has_feature(__cxx_relaxed_constexpr__)
+        #define SPP_NO_CXX14_CONSTEXPR
+    #endif
+
+    #if !__has_feature(__cxx_return_type_deduction__)
+        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
+    #endif
+
+    #if !__has_feature(__cxx_variable_templates__)
+        #define SPP_NO_CXX14_VARIABLE_TEMPLATES
+    #endif
+
+    #if __cplusplus < 201400
+        #define SPP_NO_CXX14_DIGIT_SEPARATORS
+    #endif
+
+    #if defined(__has_builtin) && __has_builtin(__builtin_unreachable)
+      #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable();
+    #endif
+
+    #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__))
+
+    #ifndef SPP_COMPILER
+        #define SPP_COMPILER "Clang version " __clang_version__
+    #endif
+
+    #define SPP_CLANG 1
+
+
+#elif defined __GNUC__
+
+    #define SPP_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+
+    //  definition to expand macro then apply to pragma message
+    // #define VALUE_TO_STRING(x) #x
+    // #define VALUE(x) VALUE_TO_STRING(x)
+    // #define VAR_NAME_VALUE(var) #var "="  VALUE(var)
+    // #pragma message(VAR_NAME_VALUE(SPP_GCC_VERSION))
+
+    #if defined(i386)
+        #include <cpuid.h>
+        inline void spp_cpuid(int info[4], int InfoType) {
+            __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
+        }
+    #endif
+
+    // __POPCNT__ defined when the compiled with popcount support
+    // (-mpopcnt compiler option is given for example)
+    #ifdef __POPCNT__
+        // slower unless compiled iwith -mpopcnt
+        #define SPP_POPCNT   __builtin_popcount
+        #define SPP_POPCNT64 __builtin_popcountll
+    #endif
+
+    #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
+        #define SPP_GCC_CXX11
+    #endif
+
+    #if __GNUC__ == 3
+        #if defined (__PATHSCALE__)
+             #define SPP_NO_TWO_PHASE_NAME_LOOKUP
+             #define SPP_NO_IS_ABSTRACT
+        #endif
+
+        #if __GNUC_MINOR__ < 4
+             #define SPP_NO_IS_ABSTRACT
+        #endif
+
+        #define SPP_NO_CXX11_EXTERN_TEMPLATE
+    #endif
+
+    #if __GNUC__ < 4
+    //
+    // All problems to gcc-3.x and earlier here:
+    //
+    #define SPP_NO_TWO_PHASE_NAME_LOOKUP
+        #ifdef __OPEN64__
+            #define SPP_NO_IS_ABSTRACT
+        #endif
+    #endif
+
+    // GCC prior to 3.4 had     #pragma once too but it didn't work well with filesystem links
+    #if SPP_GCC_VERSION >= 30400
+        #define SPP_HAS_PRAGMA_ONCE
+    #endif
+
+    #if SPP_GCC_VERSION < 40400
+        // Previous versions of GCC did not completely implement value-initialization:
+        // GCC Bug 30111, "Value-initialization of POD base class doesn't initialize
+        // members", reported by Jonathan Wakely in 2006,
+        // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=30111 (fixed for GCC 4.4)
+        // GCC Bug 33916, "Default constructor fails to initialize array members",
+        // reported by Michael Elizabeth Chastain in 2007,
+        // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916 (fixed for GCC 4.2.4)
+        // See also: http://www.boost.org/libs/utility/value_init.htm    #compiler_issues
+        #define SPP_NO_COMPLETE_VALUE_INITIALIZATION
+    #endif
+
+    #if !defined(__EXCEPTIONS) && !defined(SPP_NO_EXCEPTIONS)
+        #define SPP_NO_EXCEPTIONS
+    #endif
+
+    //
+    // Threading support: Turn this on unconditionally here (except for
+    // those platforms where we can know for sure). It will get turned off again
+    // later if no threading API is detected.
+    //
+    #if !defined(__MINGW32__) && !defined(linux) && !defined(__linux) && !defined(__linux__)
+        #define SPP_HAS_THREADS
+    #endif
+
+    //
+    // gcc has "long long"
+    // Except on Darwin with standard compliance enabled (-pedantic)
+    // Apple gcc helpfully defines this macro we can query
+    //
+    #if !defined(__DARWIN_NO_LONG_LONG)
+        #define SPP_HAS_LONG_LONG
+    #endif
+
+    //
+    // gcc implements the named return value optimization since version 3.1
+    //
+    #define SPP_HAS_NRVO
+
+    // Branch prediction hints
+    #define SPP_LIKELY(x) __builtin_expect(x, 1)
+    #define SPP_UNLIKELY(x) __builtin_expect(x, 0)
+
+    //
+    // Dynamic shared object (DSO) and dynamic-link library (DLL) support
+    //
+    #if __GNUC__ >= 4
+       #if (defined(_WIN32) || defined(__WIN32__) || defined(WIN32)) && !defined(__CYGWIN__)
+            // All Win32 development environments, including 64-bit Windows and MinGW, define
+            // _WIN32 or one of its variant spellings. Note that Cygwin is a POSIX environment,
+            // so does not define _WIN32 or its variants.
+            #define SPP_HAS_DECLSPEC
+            #define SPP_SYMBOL_EXPORT __attribute__((__dllexport__))
+            #define SPP_SYMBOL_IMPORT __attribute__((__dllimport__))
+       #else
+            #define SPP_SYMBOL_EXPORT __attribute__((__visibility__("default")))
+            #define SPP_SYMBOL_IMPORT
+       #endif
+
+       #define SPP_SYMBOL_VISIBLE __attribute__((__visibility__("default")))
+    #else
+       // config/platform/win32.hpp will define SPP_SYMBOL_EXPORT, etc., unless already defined
+       #define SPP_SYMBOL_EXPORT
+    #endif
+
+    //
+    // RTTI and typeinfo detection is possible post gcc-4.3:
+    //
+    #if SPP_GCC_VERSION > 40300
+        #ifndef __GXX_RTTI
+            #ifndef SPP_NO_TYPEID
+                #define SPP_NO_TYPEID
+            #endif
+            #ifndef SPP_NO_RTTI
+                #define SPP_NO_RTTI
+            #endif
+        #endif
+    #endif
+
+    //
+    // Recent GCC versions have __int128 when in 64-bit mode.
+    //
+    // We disable this if the compiler is really nvcc with C++03 as it
+    // doesn't actually support __int128 as of CUDA_VERSION=7500
+    // even though it defines __SIZEOF_INT128__.
+    // See https://svn.boost.org/trac/boost/ticket/8048
+    //     https://svn.boost.org/trac/boost/ticket/11852
+    // Only re-enable this for nvcc if you're absolutely sure
+    // of the circumstances under which it's supported:
+    //
+    #if defined(__CUDACC__)
+        #if defined(SPP_GCC_CXX11)
+            #define SPP_NVCC_CXX11
+        #else
+            #define SPP_NVCC_CXX03
+        #endif
+    #endif
+
+    #if defined(__SIZEOF_INT128__) && !defined(SPP_NVCC_CXX03)
+        #define SPP_HAS_INT128
+    #endif
+    //
+    // Recent GCC versions have a __float128 native type, we need to
+    // include a std lib header to detect this - not ideal, but we'll
+    // be including <cstddef> later anyway when we select the std lib.
+    //
+    // Nevertheless, as of CUDA 7.5, using __float128 with the host
+    // compiler in pre-C++11 mode is still not supported.
+    // See https://svn.boost.org/trac/boost/ticket/11852
+    //
+    #ifdef __cplusplus
+        #include <cstddef>
+    #else
+        #include <stddef.h>
+    #endif
+
+    #if defined(_GLIBCXX_USE_FLOAT128) && !defined(__STRICT_ANSI__) && !defined(SPP_NVCC_CXX03)
+         #define SPP_HAS_FLOAT128
+    #endif
+
+    // C++0x features in 4.3.n and later
+    //
+    #if (SPP_GCC_VERSION >= 40300) && defined(SPP_GCC_CXX11)
+       // C++0x features are only enabled when -std=c++0x or -std=gnu++0x are
+       // passed on the command line, which in turn defines
+       // __GXX_EXPERIMENTAL_CXX0X__.
+       #define SPP_HAS_DECLTYPE
+       #define SPP_HAS_RVALUE_REFS
+       #define SPP_HAS_STATIC_ASSERT
+       #define SPP_HAS_VARIADIC_TMPL
+       #define SPP_HAS_CSTDINT
+    #else
+       #define SPP_NO_CXX11_DECLTYPE
+       #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
+       #define SPP_NO_CXX11_RVALUE_REFERENCES
+       #define SPP_NO_CXX11_STATIC_ASSERT
+    #endif
+
+    // C++0x features in 4.4.n and later
+    //
+    #if (SPP_GCC_VERSION < 40400) || !defined(SPP_GCC_CXX11)
+       #define SPP_NO_CXX11_AUTO_DECLARATIONS
+       #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS
+       #define SPP_NO_CXX11_CHAR16_T
+       #define SPP_NO_CXX11_CHAR32_T
+       #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
+       #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
+       #define SPP_NO_CXX11_DELETED_FUNCTIONS
+       #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
+       #define SPP_NO_CXX11_INLINE_NAMESPACES
+       #define SPP_NO_CXX11_VARIADIC_TEMPLATES
+    #endif
+
+    #if SPP_GCC_VERSION < 40500
+       #define SPP_NO_SFINAE_EXPR
+    #endif
+
+    // GCC 4.5 forbids declaration of defaulted functions in private or protected sections
+    #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 5) || !defined(SPP_GCC_CXX11)
+       #define SPP_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS
+    #endif
+
+    // C++0x features in 4.5.0 and later
+    //
+    #if (SPP_GCC_VERSION < 40500) || !defined(SPP_GCC_CXX11)
+       #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
+       #define SPP_NO_CXX11_LAMBDAS
+       #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS
+       #define SPP_NO_CXX11_RAW_LITERALS
+    #endif
+
+    // C++0x features in 4.6.n and later
+    //
+    #if (SPP_GCC_VERSION < 40600) || !defined(SPP_GCC_CXX11)
+        #define SPP_NO_CXX11_CONSTEXPR
+        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
+    #endif
+
+    // C++0x features in 4.7.n and later
+    //
+    #if (SPP_GCC_VERSION < 40700) || !defined(SPP_GCC_CXX11)
+        #define SPP_NO_CXX11_FINAL
+        #define SPP_NO_CXX11_TEMPLATE_ALIASES
+        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
+        #define SPP_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS
+    #endif
+
+    // C++0x features in 4.8.n and later
+    //
+    #if (SPP_GCC_VERSION < 40800) || !defined(SPP_GCC_CXX11)
+        #define SPP_NO_CXX11_ALIGNAS
+    #endif
+
+    // C++0x features in 4.8.1 and later
+    //
+    #if (SPP_GCC_VERSION < 40801) || !defined(SPP_GCC_CXX11)
+        #define SPP_NO_CXX11_DECLTYPE_N3276
+        #define SPP_NO_CXX11_REF_QUALIFIERS
+        #define SPP_NO_CXX14_BINARY_LITERALS
+    #endif
+
+    // C++14 features in 4.9.0 and later
+    //
+    #if (SPP_GCC_VERSION < 40900) || (__cplusplus < 201300)
+        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
+        #define SPP_NO_CXX14_GENERIC_LAMBDAS
+        #define SPP_NO_CXX14_DIGIT_SEPARATORS
+        #define SPP_NO_CXX14_DECLTYPE_AUTO
+        #if !((SPP_GCC_VERSION >= 40801) && (SPP_GCC_VERSION < 40900) && defined(SPP_GCC_CXX11))
+            #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
+        #endif
+    #endif
+
+
+    // C++ 14:
+    #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)
+        #define SPP_NO_CXX14_CONSTEXPR
+    #endif
+    #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)
+        #define SPP_NO_CXX14_VARIABLE_TEMPLATES
+    #endif
+
+    //
+    // Unused attribute:
+    #if __GNUC__ >= 4
+        #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__))
+    #endif
+    //
+    // __builtin_unreachable:
+    #if SPP_GCC_VERSION >= 40800
+        #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable();
+    #endif
+
+    #ifndef SPP_COMPILER
+        #define SPP_COMPILER "GNU C++ version " __VERSION__
+    #endif
+
+    // ConceptGCC compiler:
+    //   http://www.generic-programming.org/software/ConceptGCC/
+    #ifdef __GXX_CONCEPTS__
+        #define SPP_HAS_CONCEPTS
+        #define SPP_COMPILER "ConceptGCC version " __VERSION__
+    #endif
+
+#elif defined _MSC_VER
+
+    #include <intrin.h>                     // for __popcnt()
+
+    #define SPP_POPCNT_CHECK  // slower when defined, but we have to check!
+    #define spp_cpuid(info, x)    __cpuid(info, x)
+
+    #define SPP_POPCNT __popcnt
+    #if (SPP_GROUP_SIZE == 64 && INTPTR_MAX == INT64_MAX)
+        #define SPP_POPCNT64 __popcnt64
+    #endif
+
+    // Attempt to suppress VC6 warnings about the length of decorated names (obsolete):
+    #pragma warning( disable : 4503 ) // warning: decorated name length exceeded
+
+    #define SPP_HAS_PRAGMA_ONCE
+    #define SPP_HAS_CSTDINT
+
+   //
+    // versions check:
+    // we don't support Visual C++ prior to version 7.1:
+    #if _MSC_VER < 1310
+        #error "Antique compiler not supported"
+    #endif
+
+    #if _MSC_FULL_VER < 180020827
+        #define SPP_NO_FENV_H
+    #endif
+
+    #if _MSC_VER < 1400
+        // although a conforming signature for swprint exists in VC7.1
+        // it appears not to actually work:
+        #define SPP_NO_SWPRINTF
+
+        // Our extern template tests also fail for this compiler:
+        #define SPP_NO_CXX11_EXTERN_TEMPLATE
+
+        // Variadic macros do not exist for VC7.1 and lower
+        #define SPP_NO_CXX11_VARIADIC_MACROS
+    #endif
+
+    #if _MSC_VER < 1500  // 140X == VC++ 8.0
+        #undef SPP_HAS_CSTDINT
+        #define SPP_NO_MEMBER_TEMPLATE_FRIENDS
+    #endif
+
+    #if _MSC_VER < 1600  // 150X == VC++ 9.0
+        // A bug in VC9:
+        #define SPP_NO_ADL_BARRIER
+    #endif
+
+
+    // MSVC (including the latest checked version) has not yet completely
+    // implemented value-initialization, as is reported:
+    // "VC++ does not value-initialize members of derived classes without
+    // user-declared constructor", reported in 2009 by Sylvester Hesp:
+    // https:    //connect.microsoft.com/VisualStudio/feedback/details/484295
+    // "Presence of copy constructor breaks member class initialization",
+    // reported in 2009 by Alex Vakulenko:
+    // https:    //connect.microsoft.com/VisualStudio/feedback/details/499606
+    // "Value-initialization in new-expression", reported in 2005 by
+    // Pavel Kuznetsov (MetaCommunications Engineering):
+    // https:    //connect.microsoft.com/VisualStudio/feedback/details/100744
+    // See also: http:    //www.boost.org/libs/utility/value_init.htm    #compiler_issues
+    // (Niels Dekker, LKEB, May 2010)
+    #define SPP_NO_COMPLETE_VALUE_INITIALIZATION
+
+    #ifndef _NATIVE_WCHAR_T_DEFINED
+        #define SPP_NO_INTRINSIC_WCHAR_T
+    #endif
+
+    //
+    // check for exception handling support:
+    #if !defined(_CPPUNWIND) && !defined(SPP_NO_EXCEPTIONS)
+        #define SPP_NO_EXCEPTIONS
+    #endif
+
+    //
+    // __int64 support:
+    //
+    #define SPP_HAS_MS_INT64
+    #if defined(_MSC_EXTENSIONS) || (_MSC_VER >= 1400)
+        #define SPP_HAS_LONG_LONG
+    #else
+        #define SPP_NO_LONG_LONG
+    #endif
+
+    #if (_MSC_VER >= 1400) && !defined(_DEBUG)
+        #define SPP_HAS_NRVO
+    #endif
+
+    #if _MSC_VER >= 1500  // 150X == VC++ 9.0
+        #define SPP_HAS_PRAGMA_DETECT_MISMATCH
+    #endif
+
+    //
+    // disable Win32 API's if compiler extensions are
+    // turned off:
+    //
+    #if !defined(_MSC_EXTENSIONS) && !defined(SPP_DISABLE_WIN32)
+        #define SPP_DISABLE_WIN32
+    #endif
+
+    #if !defined(_CPPRTTI) && !defined(SPP_NO_RTTI)
+        #define SPP_NO_RTTI
+    #endif
+
+    //
+    // TR1 features:
+    //
+    #if _MSC_VER >= 1700
+        //      #define SPP_HAS_TR1_HASH	// don't know if this is true yet.
+        //      #define SPP_HAS_TR1_TYPE_TRAITS	// don't know if this is true yet.
+        #define SPP_HAS_TR1_UNORDERED_MAP
+        #define SPP_HAS_TR1_UNORDERED_SET
+    #endif
+
+    //
+    // C++0x features
+    //
+    //   See above for SPP_NO_LONG_LONG
+
+    // C++ features supported by VC++ 10 (aka 2010)
+    //
+    #if _MSC_VER < 1600
+        #define SPP_NO_CXX11_AUTO_DECLARATIONS
+        #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS
+        #define SPP_NO_CXX11_LAMBDAS
+        #define SPP_NO_CXX11_RVALUE_REFERENCES
+        #define SPP_NO_CXX11_STATIC_ASSERT
+        #define SPP_NO_CXX11_DECLTYPE
+    #endif // _MSC_VER < 1600
+
+    #if _MSC_VER >= 1600
+        #define SPP_HAS_STDINT_H
+    #endif
+
+    // C++11 features supported by VC++ 11 (aka 2012)
+    //
+    #if _MSC_VER < 1700
+        #define SPP_NO_CXX11_FINAL
+    #endif // _MSC_VER < 1700
+
+    // C++11 features supported by VC++ 12 (aka 2013).
+    //
+    #if _MSC_FULL_VER < 180020827
+        #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS
+        #define SPP_NO_CXX11_DELETED_FUNCTIONS
+        #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS
+        #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS
+        #define SPP_NO_CXX11_RAW_LITERALS
+        #define SPP_NO_CXX11_TEMPLATE_ALIASES
+        #define SPP_NO_CXX11_TRAILING_RESULT_TYPES
+        #define SPP_NO_CXX11_VARIADIC_TEMPLATES
+        #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX
+        #define SPP_NO_CXX11_DECLTYPE_N3276
+    #endif
+
+    // C++11 features supported by VC++ 14 (aka 2014) CTP1
+    #if (_MSC_FULL_VER < 190021730)
+        #define SPP_NO_CXX11_REF_QUALIFIERS
+        #define SPP_NO_CXX11_USER_DEFINED_LITERALS
+        #define SPP_NO_CXX11_ALIGNAS
+        #define SPP_NO_CXX11_INLINE_NAMESPACES
+        #define SPP_NO_CXX14_DECLTYPE_AUTO
+        #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES
+        #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION
+        #define SPP_NO_CXX11_HDR_INITIALIZER_LIST
+    #endif
+
+    // C++11 features not supported by any versions
+    #define SPP_NO_CXX11_CHAR16_T
+    #define SPP_NO_CXX11_CHAR32_T
+    #define SPP_NO_CXX11_CONSTEXPR
+    #define SPP_NO_SFINAE_EXPR
+    #define SPP_NO_TWO_PHASE_NAME_LOOKUP
+
+    // C++ 14:
+    #if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304)
+        #define SPP_NO_CXX14_BINARY_LITERALS
+    #endif
+
+    #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304)
+        #define SPP_NO_CXX14_CONSTEXPR
+    #endif
+
+    #if (__cplusplus < 201304) // There's no SD6 check for this....
+        #define SPP_NO_CXX14_DIGIT_SEPARATORS
+    #endif
+
+    #if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304)
+        #define SPP_NO_CXX14_GENERIC_LAMBDAS
+    #endif
+
+    #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304)
+         #define SPP_NO_CXX14_VARIABLE_TEMPLATES
+    #endif
+
+#endif
+
+// from boost/config/suffix.hpp
+// ----------------------------
+#ifndef SPP_ATTRIBUTE_UNUSED
+    #define SPP_ATTRIBUTE_UNUSED
+#endif
+
+/*
+  Try to persuade compilers to inline. 
+*/
+#ifndef SPP_FORCEINLINE
+    #if defined(__GNUC__)
+        #define SPP_FORCEINLINE __inline __attribute__ ((always_inline))
+    #elif defined(_MSC_VER)
+        #define SPP_FORCEINLINE __forceinline
+    #else
+        #define SPP_FORCEINLINE inline
+    #endif
+#endif
+
+
+#endif // spp_config_h_guard
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h b/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h
new file mode 100755
index 000000000..8e063fbab
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h
@@ -0,0 +1,4023 @@
+#ifndef spp_dlalloc__h_
+#define spp_dlalloc__h_
+
+/* This is a C++ allocator created from Doug Lea's dlmalloc
+   (Version 2.8.6 Wed Aug 29 06:57:58 2012)
+   see: http://g.oswego.edu/dl/html/malloc.html
+*/
+
+#include <sparsepp/spp_utils.h>
+#include <sparsepp/spp_smartptr.h>
+
+
+#ifndef SPP_FORCEINLINE
+    #if defined(__GNUC__)
+        #define SPP_FORCEINLINE __inline __attribute__ ((always_inline))
+    #elif defined(_MSC_VER)
+        #define SPP_FORCEINLINE __forceinline
+    #else
+        #define SPP_FORCEINLINE inline
+    #endif
+#endif
+
+
+#ifndef SPP_IMPL
+    #define SPP_IMPL SPP_FORCEINLINE
+#endif
+
+#ifndef SPP_API
+    #define SPP_API  static
+#endif
+
+
+namespace spp
+{
+    // ---------------------- allocator internal API -----------------------
+    typedef void* mspace;
+
+    /*
+      create_mspace creates and returns a new independent space with the
+      given initial capacity, or, if 0, the default granularity size.  It
+      returns null if there is no system memory available to create the
+      space.  If argument locked is non-zero, the space uses a separate
+      lock to control access. The capacity of the space will grow
+      dynamically as needed to service mspace_malloc requests.  You can
+      control the sizes of incremental increases of this space by
+      compiling with a different SPP_DEFAULT_GRANULARITY or dynamically
+      setting with mallopt(M_GRANULARITY, value).
+    */
+    SPP_API mspace create_mspace(size_t capacity, int locked);
+    SPP_API size_t destroy_mspace(mspace msp);
+    SPP_API void*  mspace_malloc(mspace msp, size_t bytes);
+    SPP_API void   mspace_free(mspace msp, void* mem);
+    SPP_API void*  mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+#if 0
+    SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+    SPP_API int    mspace_track_large_chunks(mspace msp, int enable);
+    SPP_API void*  mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+    SPP_API void*  mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+    SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                             size_t elem_size, void* chunks[]);
+    SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                               size_t sizes[], void* chunks[]);
+    SPP_API size_t mspace_footprint(mspace msp);
+    SPP_API size_t mspace_max_footprint(mspace msp);
+    SPP_API size_t mspace_usable_size(const void* mem);
+    SPP_API int    mspace_trim(mspace msp, size_t pad);
+    SPP_API int    mspace_mallopt(int, int);
+#endif
+
+    // -----------------------------------------------------------
+    // -----------------------------------------------------------
+    template<class T>
+    class spp_allocator
+    {
+    public:
+        typedef T         value_type;
+        typedef T*        pointer;
+        typedef ptrdiff_t difference_type;
+        typedef const T*  const_pointer;
+        typedef size_t    size_type;
+
+        spp_allocator() : _space(new MSpace) {}
+
+        void swap(spp_allocator &o)
+        {
+            std::swap(_space, o._space);
+        }
+
+        pointer allocate(size_t n, const_pointer  /* unused */ = 0)
+        {
+            pointer res = static_cast<pointer>(mspace_malloc(_space->_sp, n * sizeof(T)));
+            if (!res)
+                throw std::bad_alloc();
+            return res;
+        }
+
+        void deallocate(pointer p, size_t /* unused */)
+        {
+            mspace_free(_space->_sp, p);
+        }
+
+        pointer reallocate(pointer p, size_t new_size)
+        {
+            pointer res = static_cast<pointer>(mspace_realloc(_space->_sp, p, new_size * sizeof(T)));
+            if (!res)
+                throw std::bad_alloc();
+            return res;
+        }
+
+        size_type max_size() const
+        {
+            return static_cast<size_type>(-1) / sizeof(value_type);
+        }
+
+        void construct(pointer p, const value_type& val)
+        {
+            new (p) value_type(val);
+        }
+
+        void destroy(pointer p) { p->~value_type(); }
+
+        template<class U>
+        struct rebind
+        {
+            // rebind to libc_allocator because we want to use malloc_inspect_all in destructive_iterator 
+            // to reduce peak memory usage (we don't want <group_items> mixed with value_type when 
+            // we traverse the allocated memory).
+            typedef spp::spp_allocator<U> other;
+        };
+
+        mspace space() const { return _space->_sp; }
+
+        // check if we can clear the whole allocator memory at once => works only if the allocator 
+        // is not be shared. If can_clear() returns true, we expect that the next allocator call
+        // will be clear() - not allocate() or deallocate()
+        bool can_clear()
+        {
+            assert(!_space_to_clear);
+            _space_to_clear.reset();
+            _space_to_clear.swap(_space);
+            if (_space_to_clear->count() == 1)
+                return true;
+            else
+                _space_to_clear.swap(_space);
+            return false;
+        }
+
+        void clear()
+        {
+            assert(!_space && _space_to_clear);
+            _space_to_clear.reset();
+            _space = new MSpace;
+        }
+        
+    private:
+        struct MSpace : public spp_rc
+        {
+            MSpace() :
+                _sp(create_mspace(0, 0))
+            {}
+
+            ~MSpace()
+            {
+                destroy_mspace(_sp);
+            }
+
+            mspace _sp;
+        };
+
+        spp_sptr<MSpace> _space;
+        spp_sptr<MSpace> _space_to_clear;
+    };
+}
+
+
+// allocators are "equal" whenever memory allocated with one can be deallocated with the other
+template<class T>
+inline bool operator==(const spp_::spp_allocator<T> &a, const spp_::spp_allocator<T> &b)
+{
+    return a.space() == b.space();
+}
+
+template<class T>
+inline bool operator!=(const spp_::spp_allocator<T> &a, const spp_::spp_allocator<T> &b)
+{
+    return !(a == b);
+}
+
+namespace std
+{
+    template <class T>
+    inline void swap(spp_::spp_allocator<T> &a, spp_::spp_allocator<T> &b)
+    {
+        a.swap(b);
+    }
+}
+
+#if !defined(SPP_EXCLUDE_IMPLEMENTATION)
+
+#ifndef WIN32
+    #ifdef _WIN32
+        #define WIN32 1
+    #endif
+    #ifdef _WIN32_WCE
+        #define SPP_LACKS_FCNTL_H
+        #define WIN32 1
+    #endif
+#endif
+
+#ifdef WIN32
+    #define WIN32_LEAN_AND_MEAN
+    #include <windows.h>
+    #include <tchar.h>
+    #define SPP_HAVE_MMAP 1
+    #define SPP_LACKS_UNISTD_H
+    #define SPP_LACKS_SYS_PARAM_H
+    #define SPP_LACKS_SYS_MMAN_H
+    #define SPP_LACKS_STRING_H
+    #define SPP_LACKS_STRINGS_H
+    #define SPP_LACKS_SYS_TYPES_H
+    #define SPP_LACKS_ERRNO_H
+    #define SPP_LACKS_SCHED_H
+    #ifndef SPP_MALLOC_FAILURE_ACTION
+        #define SPP_MALLOC_FAILURE_ACTION
+    #endif
+    #ifndef SPP_MMAP_CLEARS
+        #ifdef _WIN32_WCE /* WINCE reportedly does not clear */
+            #define SPP_MMAP_CLEARS 0
+        #else
+            #define SPP_MMAP_CLEARS 1
+        #endif
+    #endif
+#endif
+
+#if defined(DARWIN) || defined(_DARWIN)
+    #define SPP_HAVE_MMAP 1
+    /* OSX allocators provide 16 byte alignment */
+    #ifndef SPP_MALLOC_ALIGNMENT
+        #define SPP_MALLOC_ALIGNMENT ((size_t)16U)
+    #endif
+#endif
+
+#ifndef SPP_LACKS_SYS_TYPES_H
+    #include <sys/types.h>  /* For size_t */
+#endif
+
+#ifndef SPP_MALLOC_ALIGNMENT
+    #define SPP_MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *)))
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+static const size_t spp_max_size_t = ~(size_t)0;
+static const size_t spp_size_t_bitsize = sizeof(size_t) << 3;
+static const size_t spp_half_max_size_t = spp_max_size_t / 2U;
+static const size_t spp_chunk_align_mask = SPP_MALLOC_ALIGNMENT - 1;
+
+#if defined(SPP_DEBUG) || !defined(NDEBUG)
+static bool spp_is_aligned(void *p) { return ((size_t)p & spp_chunk_align_mask) == 0; }
+#endif
+
+// the number of bytes to offset an address to align it
+static size_t align_offset(void *p)
+{
+    return (((size_t)p & spp_chunk_align_mask) == 0) ? 0 :
+           ((SPP_MALLOC_ALIGNMENT - ((size_t)p & spp_chunk_align_mask)) & spp_chunk_align_mask);
+}
+
+
+#ifndef SPP_FOOTERS
+    #define SPP_FOOTERS 0
+#endif
+
+#ifndef SPP_ABORT
+    #define SPP_ABORT  abort()
+#endif
+
+#ifndef SPP_ABORT_ON_ASSERT_FAILURE
+    #define SPP_ABORT_ON_ASSERT_FAILURE 1
+#endif
+
+#ifndef SPP_PROCEED_ON_ERROR
+    #define SPP_PROCEED_ON_ERROR 0
+#endif
+
+#ifndef SPP_INSECURE
+    #define SPP_INSECURE 0
+#endif
+
+#ifndef SPP_MALLOC_INSPECT_ALL
+    #define SPP_MALLOC_INSPECT_ALL 0
+#endif
+
+#ifndef SPP_HAVE_MMAP
+    #define SPP_HAVE_MMAP 1
+#endif
+
+#ifndef SPP_MMAP_CLEARS
+    #define SPP_MMAP_CLEARS 1
+#endif
+
+#ifndef SPP_HAVE_MREMAP
+    #ifdef linux
+        #define SPP_HAVE_MREMAP 1
+        #ifndef _GNU_SOURCE
+            #define _GNU_SOURCE /* Turns on mremap() definition */
+        #endif
+    #else
+        #define SPP_HAVE_MREMAP 0
+    #endif
+#endif
+
+#ifndef SPP_MALLOC_FAILURE_ACTION
+    #define SPP_MALLOC_FAILURE_ACTION  errno = ENOMEM
+#endif
+
+
+#ifndef SPP_DEFAULT_GRANULARITY
+    #if defined(WIN32)
+        #define SPP_DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
+    #else
+        #define SPP_DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+    #endif
+#endif
+
+#ifndef SPP_DEFAULT_TRIM_THRESHOLD
+    #define SPP_DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#endif
+
+#ifndef SPP_DEFAULT_MMAP_THRESHOLD
+    #if SPP_HAVE_MMAP
+        #define SPP_DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
+    #else
+        #define SPP_DEFAULT_MMAP_THRESHOLD spp_max_size_t
+    #endif
+#endif
+
+#ifndef SPP_MAX_RELEASE_CHECK_RATE
+    #if SPP_HAVE_MMAP
+        #define SPP_MAX_RELEASE_CHECK_RATE 4095
+    #else
+        #define SPP_MAX_RELEASE_CHECK_RATE spp_max_size_t
+    #endif
+#endif
+
+#ifndef SPP_USE_BUILTIN_FFS
+    #define SPP_USE_BUILTIN_FFS 0
+#endif
+
+#ifndef SPP_USE_DEV_RANDOM
+    #define SPP_USE_DEV_RANDOM 0
+#endif
+
+#ifndef SPP_NO_SEGMENT_TRAVERSAL
+    #define SPP_NO_SEGMENT_TRAVERSAL 0
+#endif
+
+
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef _MSC_VER
+    #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif
+#ifndef SPP_LACKS_ERRNO_H
+    #include <errno.h>       /* for SPP_MALLOC_FAILURE_ACTION */
+#endif
+
+#ifdef SPP_DEBUG
+    #if SPP_ABORT_ON_ASSERT_FAILURE
+        #undef assert
+        #define assert(x) if(!(x)) SPP_ABORT
+    #else
+        #include <assert.h>
+    #endif
+#else
+    #ifndef assert
+        #define assert(x)
+    #endif
+    #define SPP_DEBUG 0
+#endif
+
+#if !defined(WIN32) && !defined(SPP_LACKS_TIME_H)
+    #include <time.h>        /* for magic initialization */
+#endif
+
+#ifndef SPP_LACKS_STDLIB_H
+    #include <stdlib.h>      /* for abort() */
+#endif
+
+#ifndef SPP_LACKS_STRING_H
+    #include <string.h>      /* for memset etc */
+#endif
+
+#if SPP_USE_BUILTIN_FFS
+    #ifndef SPP_LACKS_STRINGS_H
+        #include <strings.h>     /* for ffs */
+    #endif
+#endif
+
+#if SPP_HAVE_MMAP
+    #ifndef SPP_LACKS_SYS_MMAN_H
+        /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
+        #if (defined(linux) && !defined(__USE_GNU))
+            #define __USE_GNU 1
+            #include <sys/mman.h>    /* for mmap */
+            #undef __USE_GNU
+        #else
+            #include <sys/mman.h>    /* for mmap */
+        #endif
+    #endif
+    #ifndef SPP_LACKS_FCNTL_H
+        #include <fcntl.h>
+    #endif
+#endif
+
+#ifndef SPP_LACKS_UNISTD_H
+    #include <unistd.h>     /* for sbrk, sysconf */
+#else
+    #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+        extern void*     sbrk(ptrdiff_t);
+    #endif
+#endif
+
+#include <new>
+
+namespace spp
+{
+
+/* Declarations for bit scanning on win32 */
+#if defined(_MSC_VER) && _MSC_VER>=1300
+    #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
+        extern "C" {
+            unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
+            unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
+        }
+        
+        #define BitScanForward _BitScanForward
+        #define BitScanReverse _BitScanReverse
+        #pragma intrinsic(_BitScanForward)
+        #pragma intrinsic(_BitScanReverse)
+    #endif /* BitScanForward */
+#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
+
+#ifndef WIN32
+    #ifndef malloc_getpagesize
+        #ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
+            #ifndef _SC_PAGE_SIZE
+                #define _SC_PAGE_SIZE _SC_PAGESIZE
+            #endif
+        #endif
+        #ifdef _SC_PAGE_SIZE
+            #define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+        #else
+            #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+                extern size_t getpagesize();
+                #define malloc_getpagesize getpagesize()
+            #else
+                #ifdef WIN32 /* use supplied emulation of getpagesize */
+                    #define malloc_getpagesize getpagesize()
+                #else
+                    #ifndef SPP_LACKS_SYS_PARAM_H
+                        #include <sys/param.h>
+                    #endif
+                    #ifdef EXEC_PAGESIZE
+                        #define malloc_getpagesize EXEC_PAGESIZE
+                    #else
+                        #ifdef NBPG
+                            #ifndef CLSIZE
+                                #define malloc_getpagesize NBPG
+                            #else
+                                #define malloc_getpagesize (NBPG * CLSIZE)
+                            #endif
+                        #else
+                            #ifdef NBPC
+                                #define malloc_getpagesize NBPC
+                            #else
+                                #ifdef PAGESIZE
+                                    #define malloc_getpagesize PAGESIZE
+                                #else /* just guess */
+                                    #define malloc_getpagesize ((size_t)4096U)
+                                #endif
+                            #endif
+                        #endif
+                    #endif
+                #endif
+            #endif
+        #endif
+    #endif
+#endif
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+   If SPP_HAVE_MORECORE or SPP_HAVE_MMAP are false, we just define calls and
+   checks to fail so compiler optimizer can delete code rather than
+   using so many "#if"s.
+*/
+
+
+/* MMAP must return mfail on failure */
+static void *mfail  = (void*)spp_max_size_t;
+static char *cmfail = (char*)mfail;
+
+#if SPP_HAVE_MMAP
+
+#ifndef WIN32
+    #define SPP_MUNMAP_DEFAULT(a, s)  munmap((a), (s))
+    #define SPP_MMAP_PROT            (PROT_READ | PROT_WRITE)
+    #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+        #define MAP_ANONYMOUS        MAP_ANON
+    #endif
+    
+    #ifdef MAP_ANONYMOUS
+        #define SPP_MMAP_FLAGS           (MAP_PRIVATE | MAP_ANONYMOUS)
+        #define SPP_MMAP_DEFAULT(s)       mmap(0, (s), SPP_MMAP_PROT, SPP_MMAP_FLAGS, -1, 0)
+    #else /* MAP_ANONYMOUS */
+        /*
+           Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+           is unlikely to be needed, but is supplied just in case.
+        */
+        #define SPP_MMAP_FLAGS           (MAP_PRIVATE)
+        static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+        void SPP_MMAP_DEFAULT(size_t s)
+        {
+            if (dev_zero_fd < 0)
+                dev_zero_fd = open("/dev/zero", O_RDWR);
+            mmap(0, s, SPP_MMAP_PROT, SPP_MMAP_FLAGS, dev_zero_fd, 0);
+        }
+    #endif /* MAP_ANONYMOUS */
+    
+    #define SPP_DIRECT_MMAP_DEFAULT(s) SPP_MMAP_DEFAULT(s)
+    
+#else /* WIN32 */
+    
+    /* Win32 MMAP via VirtualAlloc */
+    static SPP_FORCEINLINE void* win32mmap(size_t size)
+    {
+        void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+        return (ptr != 0) ? ptr : mfail;
+    }
+    
+    /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+    static SPP_FORCEINLINE void* win32direct_mmap(size_t size)
+    {
+        void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
+                                 PAGE_READWRITE);
+        return (ptr != 0) ? ptr : mfail;
+    }
+    
+    /* This function supports releasing coalesed segments */
+    static SPP_FORCEINLINE int win32munmap(void* ptr, size_t size)
+    {
+        MEMORY_BASIC_INFORMATION minfo;
+        char* cptr = (char*)ptr;
+        while (size)
+        {
+            if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+                return -1;
+            if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+                    minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+                return -1;
+            if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+                return -1;
+            cptr += minfo.RegionSize;
+            size -= minfo.RegionSize;
+        }
+        return 0;
+    }
+    
+    #define SPP_MMAP_DEFAULT(s)             win32mmap(s)
+    #define SPP_MUNMAP_DEFAULT(a, s)        win32munmap((a), (s))
+    #define SPP_DIRECT_MMAP_DEFAULT(s)      win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* SPP_HAVE_MMAP */
+
+#if SPP_HAVE_MREMAP
+    #ifndef WIN32
+        #define SPP_MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+    #endif
+#endif
+
+/**
+ * Define SPP_CALL_MMAP/SPP_CALL_MUNMAP/SPP_CALL_DIRECT_MMAP
+ */
+#if SPP_HAVE_MMAP
+    #define USE_MMAP_BIT                1
+
+    #ifdef SPP_MMAP
+        #define SPP_CALL_MMAP(s)        SPP_MMAP(s)
+    #else
+        #define SPP_CALL_MMAP(s)        SPP_MMAP_DEFAULT(s)
+    #endif
+
+    #ifdef SPP_MUNMAP
+        #define SPP_CALL_MUNMAP(a, s)   SPP_MUNMAP((a), (s))
+    #else
+        #define SPP_CALL_MUNMAP(a, s)   SPP_MUNMAP_DEFAULT((a), (s))
+    #endif
+
+    #ifdef SPP_DIRECT_MMAP
+        #define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP(s)
+    #else
+        #define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP_DEFAULT(s)
+    #endif
+
+#else  /* SPP_HAVE_MMAP */
+    #define USE_MMAP_BIT            0
+
+    #define SPP_MMAP(s)                 mfail
+    #define SPP_MUNMAP(a, s)            (-1)
+    #define SPP_DIRECT_MMAP(s)          mfail
+    #define SPP_CALL_DIRECT_MMAP(s)     SPP_DIRECT_MMAP(s)
+    #define SPP_CALL_MMAP(s)            SPP_MMAP(s)
+    #define SPP_CALL_MUNMAP(a, s)       SPP_MUNMAP((a), (s))
+#endif
+
+/**
+ * Define SPP_CALL_MREMAP
+ */
+#if SPP_HAVE_MMAP && SPP_HAVE_MREMAP
+    #ifdef MREMAP
+        #define SPP_CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
+    #else
+        #define SPP_CALL_MREMAP(addr, osz, nsz, mv) SPP_MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
+    #endif
+#else
+    #define SPP_CALL_MREMAP(addr, osz, nsz, mv)     mfail
+#endif
+
+/* mstate bit set if continguous morecore disabled or failed */
+static const unsigned USE_NONCONTIGUOUS_BIT = 4U;
+
+/* segment bit set in create_mspace_with_base */
+static const unsigned EXTERN_BIT = 8U;
+
+
+/* --------------------------- flags ------------------------ */
+
+static const unsigned PINUSE_BIT = 1;
+static const unsigned CINUSE_BIT = 2;
+static const unsigned FLAG4_BIT  = 4;
+static const unsigned INUSE_BITS = (PINUSE_BIT | CINUSE_BIT);
+static const unsigned FLAG_BITS  = (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT);
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#if SPP_FOOTERS
+    static const unsigned CHUNK_OVERHEAD = 2 * sizeof(size_t);
+#else
+    static const unsigned CHUNK_OVERHEAD = sizeof(size_t);
+#endif
+
+/* MMapped chunks need a second word of overhead ... */
+static const unsigned SPP_MMAP_CHUNK_OVERHEAD = 2 * sizeof(size_t);
+
+/* ... and additional padding for fake next-chunk at foot */
+static const unsigned SPP_MMAP_FOOT_PAD = 4 * sizeof(size_t);
+
+// ===============================================================================
+struct malloc_chunk_header
+{
+    void set_size_and_pinuse_of_free_chunk(size_t s)
+    {
+        _head = s | PINUSE_BIT;
+        set_foot(s);
+    }
+
+    void set_foot(size_t s)
+    {
+        ((malloc_chunk_header *)((char*)this + s))->_prev_foot = s;
+    }
+
+    // extraction of fields from head words
+    bool cinuse() const        { return !!(_head & CINUSE_BIT); }
+    bool pinuse() const        { return !!(_head & PINUSE_BIT); }
+    bool flag4inuse() const    { return !!(_head & FLAG4_BIT); }
+    bool is_inuse() const      { return (_head & INUSE_BITS) != PINUSE_BIT; }
+    bool is_mmapped() const    { return (_head & INUSE_BITS) == 0; }
+
+    size_t chunksize() const   { return _head & ~(FLAG_BITS); }
+
+    void clear_pinuse()        { _head &= ~PINUSE_BIT; }
+    void set_flag4()           { _head |= FLAG4_BIT; }
+    void clear_flag4()         { _head &= ~FLAG4_BIT; }
+
+    // Treat space at ptr +/- offset as a chunk
+    malloc_chunk_header * chunk_plus_offset(size_t s)
+    {
+        return (malloc_chunk_header *)((char*)this + s);
+    }
+    malloc_chunk_header * chunk_minus_offset(size_t s)
+    {
+        return (malloc_chunk_header *)((char*)this - s);
+    }
+
+    // Ptr to next or previous physical malloc_chunk.
+    malloc_chunk_header * next_chunk()
+    {
+        return (malloc_chunk_header *)((char*)this + (_head & ~FLAG_BITS));
+    }
+    malloc_chunk_header * prev_chunk()
+    {
+        return (malloc_chunk_header *)((char*)this - (_prev_foot));
+    }
+
+    // extract next chunk's pinuse bit
+    size_t next_pinuse()  { return next_chunk()->_head & PINUSE_BIT; }
+
+    size_t   _prev_foot;  // Size of previous chunk (if free).
+    size_t   _head;       // Size and inuse bits.
+};
+
+// ===============================================================================
+struct malloc_chunk : public malloc_chunk_header
+{
+    // Set size, pinuse bit, foot, and clear next pinuse
+    void set_free_with_pinuse(size_t s, malloc_chunk* n)
+    {
+        n->clear_pinuse();
+        set_size_and_pinuse_of_free_chunk(s);
+    }
+
+    // Get the internal overhead associated with chunk p
+    size_t overhead_for() { return is_mmapped() ? SPP_MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD; }
+
+    // Return true if malloced space is not necessarily cleared
+    bool calloc_must_clear()
+    {
+#if SPP_MMAP_CLEARS
+        return !is_mmapped();
+#else
+        return true;
+#endif
+    }
+
+    struct malloc_chunk* _fd;         // double links -- used only if free.
+    struct malloc_chunk* _bk;
+};
+
+static const unsigned MCHUNK_SIZE = sizeof(malloc_chunk);
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+static const unsigned MIN_CHUNK_SIZE = (MCHUNK_SIZE + spp_chunk_align_mask) & ~spp_chunk_align_mask;
+
+typedef malloc_chunk  mchunk;
+typedef malloc_chunk* mchunkptr;
+typedef malloc_chunk_header *hchunkptr;
+typedef malloc_chunk* sbinptr;         // The type of bins of chunks
+typedef unsigned int bindex_t;         // Described below
+typedef unsigned int binmap_t;         // Described below
+typedef unsigned int flag_t;           // The type of various bit flag sets
+
+// conversion from malloc headers to user pointers, and back
+static SPP_FORCEINLINE void *chunk2mem(const void *p)       { return (void *)((char *)p + 2 * sizeof(size_t)); }
+static SPP_FORCEINLINE mchunkptr mem2chunk(const void *mem) { return (mchunkptr)((char *)mem - 2 * sizeof(size_t)); }
+
+// chunk associated with aligned address A
+static SPP_FORCEINLINE mchunkptr align_as_chunk(char *A)    { return (mchunkptr)(A + align_offset(chunk2mem(A))); }
+
+// Bounds on request (not chunk) sizes.
+static const unsigned MAX_REQUEST = (-MIN_CHUNK_SIZE) << 2;
+static const unsigned MIN_REQUEST = MIN_CHUNK_SIZE - CHUNK_OVERHEAD - 1;
+
+// pad request bytes into a usable size
+static SPP_FORCEINLINE size_t pad_request(size_t req)
+{
+    return (req + CHUNK_OVERHEAD + spp_chunk_align_mask) & ~spp_chunk_align_mask;
+}
+
+// pad request, checking for minimum (but not maximum)
+static SPP_FORCEINLINE size_t request2size(size_t req)
+{
+    return req < MIN_REQUEST ? MIN_CHUNK_SIZE : pad_request(req);
+}
+
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+/*
+  The head field of a chunk is or'ed with PINUSE_BIT when previous
+  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+  use, unless mmapped, in which case both bits are cleared.
+
+  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
+*/
+
+// Head value for fenceposts
+static const unsigned FENCEPOST_HEAD = INUSE_BITS | sizeof(size_t);
+
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+/*
+  When chunks are not in use, they are treated as nodes of either
+  lists or trees.
+
+  "Small"  chunks are stored in circular doubly-linked lists, and look
+  like this:
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Size of previous chunk                            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `head:' |             Size of chunk, in bytes                         |P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Forward pointer to next chunk in list             |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Back pointer to previous chunk in list            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Unused space (may be 0 bytes long)                .
+            .                                                               .
+            .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `foot:' |             Size of chunk, in bytes                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+  Larger chunks are kept in a form of bitwise digital trees (aka
+  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
+  free chunks greater than 256 bytes, their size doesn't impose any
+  constraints on user chunk sizes.  Each node looks like:
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Size of previous chunk                            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `head:' |             Size of chunk, in bytes                         |P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Forward pointer to next chunk of same size        |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Back pointer to previous chunk of same size       |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to left child (child[0])                  |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to right child (child[1])                 |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to parent                                 |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             bin index of this chunk                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Unused space                                      .
+            .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `foot:' |             Size of chunk, in bytes                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
+  of the same size are arranged in a circularly-linked list, with only
+  the oldest chunk (the next to be used, in our FIFO ordering)
+  actually in the tree.  (Tree members are distinguished by a non-null
+  parent pointer.)  If a chunk with the same size an an existing node
+  is inserted, it is linked off the existing node using pointers that
+  work in the same way as fd/bk pointers of small chunks.
+
+  Each tree contains a power of 2 sized range of chunk sizes (the
+  smallest is 0x100 <= x < 0x180), which is is divided in half at each
+  tree level, with the chunks in the smaller half of the range (0x100
+  <= x < 0x140 for the top nose) in the left subtree and the larger
+  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
+  done by inspecting individual bits.
+
+  Using these rules, each node's left subtree contains all smaller
+  sizes than its right subtree.  However, the node at the root of each
+  subtree has no particular ordering relationship to either.  (The
+  dividing line between the subtree sizes is based on trie relation.)
+  If we remove the last chunk of a given size from the interior of the
+  tree, we need to replace it with a leaf node.  The tree ordering
+  rules permit a node to be replaced by any leaf below it.
+
+  The smallest chunk in a tree (a common operation in a best-fit
+  allocator) can be found by walking a path to the leftmost leaf in
+  the tree.  Unlike a usual binary tree, where we follow left child
+  pointers until we reach a null, here we follow the right child
+  pointer any time the left one is null, until we reach a leaf with
+  both child pointers null. The smallest chunk in the tree will be
+  somewhere along that path.
+
+  The worst case number of steps to add, find, or remove a node is
+  bounded by the number of bits differentiating chunks within
+  bins. Under current bin calculations, this ranges from 6 up to 21
+  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
+  is of course much better.
+*/
+
+// ===============================================================================
+struct malloc_tree_chunk : public malloc_chunk_header
+{
+    malloc_tree_chunk *leftmost_child()
+    {
+        return _child[0] ? _child[0] : _child[1];
+    }
+
+
+    malloc_tree_chunk* _fd;
+    malloc_tree_chunk* _bk;
+
+    malloc_tree_chunk* _child[2];
+    malloc_tree_chunk* _parent;
+    bindex_t           _index;
+};
+
+typedef malloc_tree_chunk  tchunk;
+typedef malloc_tree_chunk* tchunkptr;
+typedef malloc_tree_chunk* tbinptr; // The type of bins of trees
+
+/* ----------------------------- Segments -------------------------------- */
+
+/*
+  Each malloc space may include non-contiguous segments, held in a
+  list headed by an embedded malloc_segment record representing the
+  top-most space. Segments also include flags holding properties of
+  the space. Large chunks that are directly allocated by mmap are not
+  included in this list. They are instead independently created and
+  destroyed without otherwise keeping track of them.
+
+  Segment management mainly comes into play for spaces allocated by
+  MMAP.  Any call to MMAP might or might not return memory that is
+  adjacent to an existing segment.  MORECORE normally contiguously
+  extends the current space, so this space is almost always adjacent,
+  which is simpler and faster to deal with. (This is why MORECORE is
+  used preferentially to MMAP when both are available -- see
+  sys_alloc.)  When allocating using MMAP, we don't use any of the
+  hinting mechanisms (inconsistently) supported in various
+  implementations of unix mmap, or distinguish reserving from
+  committing memory. Instead, we just ask for space, and exploit
+  contiguity when we get it.  It is probably possible to do
+  better than this on some systems, but no general scheme seems
+  to be significantly better.
+
+  Management entails a simpler variant of the consolidation scheme
+  used for chunks to reduce fragmentation -- new adjacent memory is
+  normally prepended or appended to an existing segment. However,
+  there are limitations compared to chunk consolidation that mostly
+  reflect the fact that segment processing is relatively infrequent
+  (occurring only when getting memory from system) and that we
+  don't expect to have huge numbers of segments:
+
+  * Segments are not indexed, so traversal requires linear scans.  (It
+    would be possible to index these, but is not worth the extra
+    overhead and complexity for most programs on most platforms.)
+  * New segments are only appended to old ones when holding top-most
+    memory; if they cannot be prepended to others, they are held in
+    different segments.
+
+  Except for the top-most segment of an mstate, each segment record
+  is kept at the tail of its segment. Segments are added by pushing
+  segment records onto the list headed by &mstate.seg for the
+  containing mstate.
+
+  Segment flags control allocation/merge/deallocation policies:
+  * If EXTERN_BIT set, then we did not allocate this segment,
+    and so should not try to deallocate or merge with others.
+    (This currently holds only for the initial segment passed
+    into create_mspace_with_base.)
+  * If USE_MMAP_BIT set, the segment may be merged with
+    other surrounding mmapped segments and trimmed/de-allocated
+    using munmap.
+  * If neither bit is set, then the segment was obtained using
+    MORECORE so can be merged with surrounding MORECORE'd segments
+    and deallocated/trimmed using MORECORE with negative arguments.
+*/
+
+// ===============================================================================
+struct malloc_segment
+{
+    bool is_mmapped_segment()  { return !!(_sflags & USE_MMAP_BIT); }
+    bool is_extern_segment()   { return !!(_sflags & EXTERN_BIT); }
+
+    char*           _base;          // base address
+    size_t          _size;          // allocated size
+    malloc_segment* _next;          // ptr to next segment
+    flag_t          _sflags;        // mmap and extern flag
+};
+
+typedef malloc_segment  msegment;
+typedef malloc_segment* msegmentptr;
+
+/* ------------- Malloc_params ------------------- */
+
+/*
+  malloc_params holds global properties, including those that can be
+  dynamically set using mallopt. There is a single instance, mparams,
+  initialized in init_mparams. Note that the non-zeroness of "magic"
+  also serves as an initialization flag.
+*/
+
+// ===============================================================================
+struct malloc_params
+{
+    malloc_params() : _magic(0) {}
+
+    void ensure_initialization()
+    {
+        if (!_magic)
+            _init();
+    }
+    
+    SPP_IMPL int change(int param_number, int value);
+
+    size_t page_align(size_t sz)
+    {
+        return (sz + (_page_size - 1)) & ~(_page_size - 1);
+    }
+
+    size_t granularity_align(size_t sz)
+    {
+        return (sz + (_granularity - 1)) & ~(_granularity - 1);
+    }
+
+    bool is_page_aligned(char *S)
+    {
+        return ((size_t)S & (_page_size - 1)) == 0;
+    }
+
+    SPP_IMPL int _init();
+
+    size_t _magic;
+    size_t _page_size;
+    size_t _granularity;
+    size_t _mmap_threshold;
+    size_t _trim_threshold;
+    flag_t _default_mflags;
+};
+
+static malloc_params mparams;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/*
+   A malloc_state holds all of the bookkeeping for a space.
+   The main fields are:
+
+  Top
+    The topmost chunk of the currently active segment. Its size is
+    cached in topsize.  The actual size of topmost space is
+    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
+    fenceposts and segment records if necessary when getting more
+    space from the system.  The size at which to autotrim top is
+    cached from mparams in trim_check, except that it is disabled if
+    an autotrim fails.
+
+  Designated victim (dv)
+    This is the preferred chunk for servicing small requests that
+    don't have exact fits.  It is normally the chunk split off most
+    recently to service another small request.  Its size is cached in
+    dvsize. The link fields of this chunk are not maintained since it
+    is not kept in a bin.
+
+  SmallBins
+    An array of bin headers for free chunks.  These bins hold chunks
+    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
+    chunks of all the same size, spaced 8 bytes apart.  To simplify
+    use in double-linked lists, each bin header acts as a malloc_chunk
+    pointing to the real first node, if it exists (else pointing to
+    itself).  This avoids special-casing for headers.  But to avoid
+    waste, we allocate only the fd/bk pointers of bins, and then use
+    repositioning tricks to treat these as the fields of a chunk.
+
+  TreeBins
+    Treebins are pointers to the roots of trees holding a range of
+    sizes. There are 2 equally spaced treebins for each power of two
+    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
+    larger.
+
+  Bin maps
+    There is one bit map for small bins ("smallmap") and one for
+    treebins ("treemap).  Each bin sets its bit when non-empty, and
+    clears the bit when empty.  Bit operations are then used to avoid
+    bin-by-bin searching -- nearly all "search" is done without ever
+    looking at bins that won't be selected.  The bit maps
+    conservatively use 32 bits per map word, even if on 64bit system.
+    For a good description of some of the bit-based techniques used
+    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
+    supplement at http://hackersdelight.org/). Many of these are
+    intended to reduce the branchiness of paths through malloc etc, as
+    well as to reduce the number of memory locations read or written.
+
+  Segments
+    A list of segments headed by an embedded malloc_segment record
+    representing the initial space.
+
+  Address check support
+    The least_addr field is the least address ever obtained from
+    MORECORE or MMAP. Attempted frees and reallocs of any address less
+    than this are trapped (unless SPP_INSECURE is defined).
+
+  Magic tag
+    A cross-check field that should always hold same value as mparams._magic.
+
+  Max allowed footprint
+    The maximum allowed bytes to allocate from system (zero means no limit)
+
+  Flags
+    Bits recording whether to use MMAP, locks, or contiguous MORECORE
+
+  Statistics
+    Each space keeps track of current and maximum system memory
+    obtained via MORECORE or MMAP.
+
+  Trim support
+    Fields holding the amount of unused topmost memory that should trigger
+    trimming, and a counter to force periodic scanning to release unused
+    non-topmost segments.
+
+  Extension support
+    A void* pointer and a size_t field that can be used to help implement
+    extensions to this malloc.
+*/
+
+
+// ================================================================================
+class malloc_state
+{
+public:
+    /* ----------------------- _malloc, _free, etc... --- */
+    SPP_FORCEINLINE void* _malloc(size_t bytes);
+    SPP_FORCEINLINE void  _free(mchunkptr p);
+
+
+    /* ------------------------ Relays to internal calls to malloc/free from realloc, memalign etc */
+    void *internal_malloc(size_t b) { return mspace_malloc(this, b); }
+    void internal_free(void *mem)   { mspace_free(this, mem); }
+
+    /* ------------------------ ----------------------- */
+
+    SPP_IMPL void      init_top(mchunkptr p, size_t psize);
+    SPP_IMPL void      init_bins();
+    SPP_IMPL void      init(char* tbase, size_t tsize);
+
+    /* ------------------------ System alloc/dealloc -------------------------- */
+    SPP_IMPL void*     sys_alloc(size_t nb);
+    SPP_IMPL size_t    release_unused_segments();
+    SPP_IMPL int       sys_trim(size_t pad);
+    SPP_IMPL void      dispose_chunk(mchunkptr p, size_t psize);
+
+    /* ----------------------- Internal support for realloc, memalign, etc --- */
+    SPP_IMPL mchunkptr try_realloc_chunk(mchunkptr p, size_t nb, int can_move);
+    SPP_IMPL void*     internal_memalign(size_t alignment, size_t bytes);
+    SPP_IMPL void**    ialloc(size_t n_elements, size_t* sizes, int opts, void* chunks[]);
+    SPP_IMPL size_t    internal_bulk_free(void* array[], size_t nelem);
+    SPP_IMPL void      internal_inspect_all(void(*handler)(void *start, void *end,
+                                                           size_t used_bytes, void* callback_arg),
+                                            void* arg);
+
+    /* -------------------------- system alloc setup (Operations on mflags) ----- */
+    bool      use_lock() const { return false; }
+    void      enable_lock()    {}
+    void      set_lock(int)    {}
+    void      disable_lock()   {}
+
+    bool      use_mmap() const { return !!(_mflags & USE_MMAP_BIT); }
+    void      enable_mmap()    { _mflags |=  USE_MMAP_BIT; }
+
+#if SPP_HAVE_MMAP
+    void      disable_mmap()   { _mflags &= ~USE_MMAP_BIT; }
+#else
+    void      disable_mmap()   {}
+#endif
+
+    /* ----------------------- Runtime Check Support ------------------------- */
+
+    /*
+      For security, the main invariant is that malloc/free/etc never
+      writes to a static address other than malloc_state, unless static
+      malloc_state itself has been corrupted, which cannot occur via
+      malloc (because of these checks). In essence this means that we
+      believe all pointers, sizes, maps etc held in malloc_state, but
+      check all of those linked or offsetted from other embedded data
+      structures.  These checks are interspersed with main code in a way
+      that tends to minimize their run-time cost.
+
+      When SPP_FOOTERS is defined, in addition to range checking, we also
+      verify footer fields of inuse chunks, which can be used guarantee
+      that the mstate controlling malloc/free is intact.  This is a
+      streamlined version of the approach described by William Robertson
+      et al in "Run-time Detection of Heap-based Overflows" LISA'03
+      http://www.usenix.org/events/lisa03/tech/robertson.html The footer
+      of an inuse chunk holds the xor of its mstate and a random seed,
+      that is checked upon calls to free() and realloc().  This is
+      (probabalistically) unguessable from outside the program, but can be
+      computed by any code successfully malloc'ing any chunk, so does not
+      itself provide protection against code that has already broken
+      security through some other means.  Unlike Robertson et al, we
+      always dynamically check addresses of all offset chunks (previous,
+      next, etc). This turns out to be cheaper than relying on hashes.
+    */
+
+
+#if !SPP_INSECURE
+    // Check if address a is at least as high as any from MORECORE or MMAP
+    bool        ok_address(void *a) const { return (char *)a >= _least_addr; }
+
+    // Check if address of next chunk n is higher than base chunk p
+    static bool ok_next(void *p, void *n) { return p < n; }
+
+    // Check if p has inuse status
+    static bool ok_inuse(mchunkptr p)     { return p->is_inuse(); }
+
+    // Check if p has its pinuse bit on
+    static bool ok_pinuse(mchunkptr p)    { return p->pinuse(); }
+
+    // Check if (alleged) mstate m has expected magic field
+    bool        ok_magic() const          { return _magic == mparams._magic; }
+
+    // In gcc, use __builtin_expect to minimize impact of checks
+  #if defined(__GNUC__) && __GNUC__ >= 3
+    static bool rtcheck(bool e)       { return __builtin_expect(e, 1); }
+  #else
+    static bool rtcheck(bool e)       { return e; }
+  #endif
+#else
+    static bool ok_address(void *)       { return true; }
+    static bool ok_next(void *, void *)  { return true; }
+    static bool ok_inuse(mchunkptr)      { return true; }
+    static bool ok_pinuse(mchunkptr)     { return true; }
+    static bool ok_magic()               { return true; }
+    static bool rtcheck(bool)            { return true; }
+#endif
+
+    bool is_initialized() const           { return _top != 0; }
+
+    bool use_noncontiguous()  const       { return !!(_mflags & USE_NONCONTIGUOUS_BIT); }
+    void disable_contiguous()             { _mflags |=  USE_NONCONTIGUOUS_BIT; }
+
+    // Return segment holding given address
+    msegmentptr segment_holding(char* addr) const
+    {
+        msegmentptr sp = (msegmentptr)&_seg;
+        for (;;)
+        {
+            if (addr >= sp->_base && addr < sp->_base + sp->_size)
+                return sp;
+            if ((sp = sp->_next) == 0)
+                return 0;
+        }
+    }
+
+    // Return true if segment contains a segment link
+    int has_segment_link(msegmentptr ss) const
+    {
+        msegmentptr sp = (msegmentptr)&_seg;
+        for (;;)
+        {
+            if ((char*)sp >= ss->_base && (char*)sp < ss->_base + ss->_size)
+                return 1;
+            if ((sp = sp->_next) == 0)
+                return 0;
+        }
+    }
+
+    bool should_trim(size_t s) const { return s > _trim_check; }
+
+    /* -------------------------- Debugging setup ---------------------------- */
+
+#if ! SPP_DEBUG
+    void check_free_chunk(mchunkptr) {}
+    void check_inuse_chunk(mchunkptr) {}
+    void check_malloced_chunk(void*, size_t) {}
+    void check_mmapped_chunk(mchunkptr) {}
+    void check_malloc_state() {}
+    void check_top_chunk(mchunkptr) {}
+#else /* SPP_DEBUG */
+    void check_free_chunk(mchunkptr p)       { do_check_free_chunk(p); }
+    void check_inuse_chunk(mchunkptr p)      { do_check_inuse_chunk(p); }
+    void check_malloced_chunk(void* p, size_t s) { do_check_malloced_chunk(p, s); }
+    void check_mmapped_chunk(mchunkptr p)    { do_check_mmapped_chunk(p); }
+    void check_malloc_state()                { do_check_malloc_state(); }
+    void check_top_chunk(mchunkptr p)        { do_check_top_chunk(p); }
+
+    void do_check_any_chunk(mchunkptr p) const;
+    void do_check_top_chunk(mchunkptr p) const;
+    void do_check_mmapped_chunk(mchunkptr p) const;
+    void do_check_inuse_chunk(mchunkptr p) const;
+    void do_check_free_chunk(mchunkptr p) const;
+    void do_check_malloced_chunk(void* mem, size_t s) const;
+    void do_check_tree(tchunkptr t);
+    void do_check_treebin(bindex_t i);
+    void do_check_smallbin(bindex_t i);
+    void do_check_malloc_state();
+    int  bin_find(mchunkptr x);
+    size_t traverse_and_check();
+#endif
+
+private:
+
+    /* ---------------------------- Indexing Bins ---------------------------- */
+
+    static bool  is_small(size_t s)          { return (s >> SMALLBIN_SHIFT) < NSMALLBINS; }
+    static bindex_t  small_index(size_t s)   { return (bindex_t)(s  >> SMALLBIN_SHIFT); }
+    static size_t small_index2size(size_t i) { return i << SMALLBIN_SHIFT; }
+    static bindex_t  MIN_SMALL_INDEX()       { return small_index(MIN_CHUNK_SIZE); }
+
+    // assign tree index for size S to variable I. Use x86 asm if possible
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+    SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
+    {
+        unsigned int X = S >> TREEBIN_SHIFT;
+        if (X == 0)
+            return 0;
+        else if (X > 0xFFFF)
+            return NTREEBINS - 1;
+
+        unsigned int K = (unsigned) sizeof(X) * __CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X);
+        return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
+    }
+
+#elif defined (__INTEL_COMPILER)
+    SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
+    {
+        size_t X = S >> TREEBIN_SHIFT;
+        if (X == 0)
+            return 0;
+        else if (X > 0xFFFF)
+            return NTREEBINS - 1;
+
+        unsigned int K = _bit_scan_reverse(X);
+        return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
+    }
+
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+    SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
+    {
+        size_t X = S >> TREEBIN_SHIFT;
+        if (X == 0)
+            return 0;
+        else if (X > 0xFFFF)
+            return NTREEBINS - 1;
+
+        unsigned int K;
+        _BitScanReverse((DWORD *) &K, (DWORD) X);
+        return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)));
+    }
+
+#else // GNUC
+    SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S)
+    {
+        size_t X = S >> TREEBIN_SHIFT;
+        if (X == 0)
+            return 0;
+        else if (X > 0xFFFF)
+            return NTREEBINS - 1;
+
+        unsigned int Y = (unsigned int)X;
+        unsigned int N = ((Y - 0x100) >> 16) & 8;
+        unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;
+        N += K;
+        N += K = (((Y <<= K) - 0x4000) >> 16) & 2;
+        K = 14 - N + ((Y <<= K) >> 15);
+        return (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1));
+    }
+#endif
+
+    // Shift placing maximum resolved bit in a treebin at i as sign bit
+    static bindex_t leftshift_for_tree_index(bindex_t i)
+    {
+        return (i == NTREEBINS - 1) ? 0 :
+               ((spp_size_t_bitsize - 1) - ((i >> 1) + TREEBIN_SHIFT - 2));
+    }
+
+    // The size of the smallest chunk held in bin with index i
+    static bindex_t minsize_for_tree_index(bindex_t i)
+    {
+        return ((size_t)1 << ((i >> 1) + TREEBIN_SHIFT)) |
+               (((size_t)(i & 1)) << ((i >> 1) + TREEBIN_SHIFT - 1));
+    }
+
+
+    // ----------- isolate the least set bit of a bitmap
+    static binmap_t least_bit(binmap_t x) { return x & -x; }
+
+    // ----------- mask with all bits to left of least bit of x on
+    static binmap_t left_bits(binmap_t x) { return (x << 1) | -(x << 1); }
+
+    // index corresponding to given bit. Use x86 asm if possible
+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+    static bindex_t compute_bit2idx(binmap_t X)
+    {
+        unsigned int J;
+        J = __builtin_ctz(X);
+        return (bindex_t)J;
+    }
+
+#elif defined (__INTEL_COMPILER)
+    static bindex_t compute_bit2idx(binmap_t X)
+    {
+        unsigned int J;
+        J = _bit_scan_forward(X);
+        return (bindex_t)J;
+    }
+
+#elif defined(_MSC_VER) && _MSC_VER>=1300
+    static bindex_t compute_bit2idx(binmap_t X)
+    {
+        unsigned int J;
+        _BitScanForward((DWORD *) &J, X);
+        return (bindex_t)J;
+    }
+
+#elif SPP_USE_BUILTIN_FFS
+    static bindex_t compute_bit2idx(binmap_t X) { return ffs(X) - 1; }
+
+#else
+    static bindex_t compute_bit2idx(binmap_t X)
+    {
+        unsigned int Y = X - 1;
+        unsigned int K = Y >> (16 - 4) & 16;
+        unsigned int N = K;        Y >>= K;
+        N += K = Y >> (8 - 3) &  8;  Y >>= K;
+        N += K = Y >> (4 - 2) &  4;  Y >>= K;
+        N += K = Y >> (2 - 1) &  2;  Y >>= K;
+        N += K = Y >> (1 - 0) &  1;  Y >>= K;
+        return (bindex_t)(N + Y);
+    }
+#endif
+
+    /* ------------------------ Set up inuse chunks with or without footers ---*/
+#if !SPP_FOOTERS
+    void mark_inuse_foot(malloc_chunk_header *, size_t) {}
+#else
+    //Set foot of inuse chunk to be xor of mstate and seed
+    void  mark_inuse_foot(malloc_chunk_header *p, size_t s)
+    {
+        (((mchunkptr)((char*)p + s))->prev_foot = (size_t)this ^ mparams._magic);
+    }
+#endif
+
+    void set_inuse(malloc_chunk_header *p, size_t s)
+    {
+        p->_head = (p->_head & PINUSE_BIT) | s | CINUSE_BIT;
+        ((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT;
+        mark_inuse_foot(p, s);
+    }
+
+    void set_inuse_and_pinuse(malloc_chunk_header *p, size_t s)
+    {
+        p->_head = s | PINUSE_BIT | CINUSE_BIT;
+        ((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT;
+        mark_inuse_foot(p, s);
+    }
+
+    void set_size_and_pinuse_of_inuse_chunk(malloc_chunk_header *p, size_t s)
+    {
+        p->_head = s | PINUSE_BIT | CINUSE_BIT;
+        mark_inuse_foot(p, s);
+    }
+
+    /* ------------------------ Addressing by index. See  about smallbin repositioning --- */
+    sbinptr  smallbin_at(bindex_t i) const { return (sbinptr)((char*)&_smallbins[i << 1]); }
+    tbinptr* treebin_at(bindex_t i)  { return &_treebins[i]; }
+
+    /* ----------------------- bit corresponding to given index ---------*/
+    static binmap_t idx2bit(bindex_t i) { return ((binmap_t)1 << i); }
+
+    // --------------- Mark/Clear bits with given index
+    void     mark_smallmap(bindex_t i)      { _smallmap |=  idx2bit(i); }
+    void     clear_smallmap(bindex_t i)     { _smallmap &= ~idx2bit(i); }
+    binmap_t smallmap_is_marked(bindex_t i) const { return _smallmap & idx2bit(i); }
+
+    void     mark_treemap(bindex_t i)       { _treemap  |=  idx2bit(i); }
+    void     clear_treemap(bindex_t i)      { _treemap  &= ~idx2bit(i); }
+    binmap_t treemap_is_marked(bindex_t i)  const { return _treemap & idx2bit(i); }
+
+    /* ------------------------ ----------------------- */
+    SPP_FORCEINLINE void insert_small_chunk(mchunkptr P, size_t S);
+    SPP_FORCEINLINE void unlink_small_chunk(mchunkptr P, size_t S);
+    SPP_FORCEINLINE void unlink_first_small_chunk(mchunkptr B, mchunkptr P, bindex_t I);
+    SPP_FORCEINLINE void replace_dv(mchunkptr P, size_t S);
+
+    /* ------------------------- Operations on trees ------------------------- */
+    SPP_FORCEINLINE void insert_large_chunk(tchunkptr X, size_t S);
+    SPP_FORCEINLINE void unlink_large_chunk(tchunkptr X);
+
+    /* ------------------------ Relays to large vs small bin operations */
+    SPP_FORCEINLINE void insert_chunk(mchunkptr P, size_t S);
+    SPP_FORCEINLINE void unlink_chunk(mchunkptr P, size_t S);
+
+    /* -----------------------  Direct-mmapping chunks ----------------------- */
+    SPP_IMPL void*       mmap_alloc(size_t nb);
+    SPP_IMPL mchunkptr   mmap_resize(mchunkptr oldp, size_t nb, int flags);
+
+    SPP_IMPL void        reset_on_error();
+    SPP_IMPL void*       prepend_alloc(char* newbase, char* oldbase, size_t nb);
+    SPP_IMPL void        add_segment(char* tbase, size_t tsize, flag_t mmapped);
+
+    /* ------------------------ malloc --------------------------- */
+    SPP_IMPL void*       tmalloc_large(size_t nb);
+    SPP_IMPL void*       tmalloc_small(size_t nb);
+
+    /* ------------------------Bin types, widths and sizes -------- */
+    static const size_t NSMALLBINS      = 32;
+    static const size_t NTREEBINS       = 32;
+    static const size_t SMALLBIN_SHIFT  = 3;
+    static const size_t SMALLBIN_WIDTH  = 1 << SMALLBIN_SHIFT;
+    static const size_t TREEBIN_SHIFT   = 8;
+    static const size_t MIN_LARGE_SIZE  = 1 << TREEBIN_SHIFT;
+    static const size_t MAX_SMALL_SIZE  = (MIN_LARGE_SIZE - 1);
+    static const size_t MAX_SMALL_REQUEST = (MAX_SMALL_SIZE - spp_chunk_align_mask - CHUNK_OVERHEAD);
+
+    /* ------------------------ data members --------------------------- */
+    binmap_t   _smallmap;
+    binmap_t   _treemap;
+    size_t     _dvsize;
+    size_t     _topsize;
+    char*      _least_addr;
+    mchunkptr  _dv;
+    mchunkptr  _top;
+    size_t     _trim_check;
+    size_t     _release_checks;
+    size_t     _magic;
+    mchunkptr  _smallbins[(NSMALLBINS + 1) * 2];
+    tbinptr    _treebins[NTREEBINS];
+public:
+    size_t     _footprint;
+    size_t     _max_footprint;
+    size_t     _footprint_limit; // zero means no limit
+    flag_t     _mflags;
+
+    msegment   _seg;
+
+private:
+    void*      _extp;      // Unused but available for extensions
+    size_t     _exts;
+};
+
+typedef malloc_state*    mstate;
+
+/* ------------- end malloc_state ------------------- */
+
+#if SPP_FOOTERS
+static malloc_state* get_mstate_for(malloc_chunk_header *p)
+{
+    return (malloc_state*)(((mchunkptr)((char*)(p) +
+                                        (p->chunksize())))->prev_foot ^ mparams._magic);
+}
+#endif
+
+/* -------------------------- system alloc setup ------------------------- */
+
+
+
+// For mmap, use granularity alignment on windows, else page-align
+#ifdef WIN32
+    #define mmap_align(S) mparams.granularity_align(S)
+#else
+    #define mmap_align(S) mparams.page_align(S)
+#endif
+
+//  True if segment S holds address A
+static bool segment_holds(msegmentptr S, mchunkptr A)
+{
+    return (char*)A >= S->_base && (char*)A < S->_base + S->_size;
+}
+
+/*
+  top_foot_size is padding at the end of a segment, including space
+  that may be needed to place segment records and fenceposts when new
+  noncontiguous segments are added.
+*/
+static SPP_FORCEINLINE size_t top_foot_size()
+{
+    return align_offset(chunk2mem((void *)0)) + 
+        pad_request(sizeof(struct malloc_segment)) + 
+        MIN_CHUNK_SIZE;
+}
+
+
+// For sys_alloc, enough padding to ensure can malloc request on success
+static SPP_FORCEINLINE size_t sys_alloc_padding()
+{
+    return  top_foot_size() + SPP_MALLOC_ALIGNMENT;
+}
+
+
+#define SPP_USAGE_ERROR_ACTION(m,p) SPP_ABORT
+
+/* ---------------------------- setting mparams -------------------------- */
+
+// Initialize mparams
+int malloc_params::_init()
+{
+#ifdef NEED_GLOBAL_LOCK_INIT
+    if (malloc_global_mutex_status <= 0)
+        init_malloc_global_mutex();
+#endif
+
+    if (_magic == 0)
+    {
+        size_t magic;
+        size_t psize;
+        size_t gsize;
+
+#ifndef WIN32
+        psize = malloc_getpagesize;
+        gsize = ((SPP_DEFAULT_GRANULARITY != 0) ? SPP_DEFAULT_GRANULARITY : psize);
+#else
+        {
+            SYSTEM_INFO system_info;
+            GetSystemInfo(&system_info);
+            psize = system_info.dwPageSize;
+            gsize = ((SPP_DEFAULT_GRANULARITY != 0) ?
+                     SPP_DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
+        }
+#endif
+
+        /* Sanity-check configuration:
+           size_t must be unsigned and as wide as pointer type.
+           ints must be at least 4 bytes.
+           alignment must be at least 8.
+           Alignment, min chunk size, and page size must all be powers of 2.
+        */
+        if ((sizeof(size_t) != sizeof(char*)) ||
+                (spp_max_size_t < MIN_CHUNK_SIZE)  ||
+                (sizeof(int) < 4)  ||
+                (SPP_MALLOC_ALIGNMENT < (size_t)8U) ||
+                ((SPP_MALLOC_ALIGNMENT & (SPP_MALLOC_ALIGNMENT - 1)) != 0) ||
+                ((MCHUNK_SIZE      & (MCHUNK_SIZE - 1))      != 0) ||
+                ((gsize            & (gsize - 1))            != 0) ||
+                ((psize            & (psize - 1))            != 0))
+            SPP_ABORT;
+        _granularity = gsize;
+        _page_size = psize;
+        _mmap_threshold = SPP_DEFAULT_MMAP_THRESHOLD;
+        _trim_threshold = SPP_DEFAULT_TRIM_THRESHOLD;
+        _default_mflags = USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
+
+        {
+#if SPP_USE_DEV_RANDOM
+            int fd;
+            unsigned char buf[sizeof(size_t)];
+            // Try to use /dev/urandom, else fall back on using time
+            if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
+                    read(fd, buf, sizeof(buf)) == sizeof(buf))
+            {
+                magic = *((size_t *) buf);
+                close(fd);
+            }
+            else
+#endif
+            {
+#ifdef WIN32
+                magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
+#elif defined(SPP_LACKS_TIME_H)
+                magic = (size_t)&magic ^ (size_t)0x55555555U;
+#else
+                magic = (size_t)(time(0) ^ (size_t)0x55555555U);
+#endif
+            }
+            magic |= (size_t)8U;    // ensure nonzero
+            magic &= ~(size_t)7U;   // improve chances of fault for bad values
+            // Until memory modes commonly available, use volatile-write
+            (*(volatile size_t *)(&(_magic))) = magic;
+        }
+    }
+
+    return 1;
+}
+
+/*
+  mallopt tuning options.  SVID/XPG defines four standard parameter
+  numbers for mallopt, normally defined in malloc.h.  None of these
+  are used in this malloc, so setting them has no effect. But this
+  malloc does support the following options.
+*/
+static const int  m_trim_threshold = -1;
+static const int  m_granularity    = -2;
+static const int  m_mmap_threshold = -3;
+
+// support for mallopt
+int malloc_params::change(int param_number, int value)
+{
+    size_t val;
+    ensure_initialization();
+    val = (value == -1) ? spp_max_size_t : (size_t)value;
+
+    switch (param_number)
+    {
+    case m_trim_threshold:
+        _trim_threshold = val;
+        return 1;
+
+    case m_granularity:
+        if (val >= _page_size && ((val & (val - 1)) == 0))
+        {
+            _granularity = val;
+            return 1;
+        }
+        else
+            return 0;
+
+    case m_mmap_threshold:
+        _mmap_threshold = val;
+        return 1;
+
+    default:
+        return 0;
+    }
+}
+
+#if SPP_DEBUG
+/* ------------------------- Debugging Support --------------------------- */
+
+// Check properties of any chunk, whether free, inuse, mmapped etc
+void malloc_state::do_check_any_chunk(mchunkptr p)  const
+{
+    assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
+    assert(ok_address(p));
+}
+
+// Check properties of top chunk
+void malloc_state::do_check_top_chunk(mchunkptr p) const
+{
+    msegmentptr sp = segment_holding((char*)p);
+    size_t  sz = p->_head & ~INUSE_BITS; // third-lowest bit can be set!
+    assert(sp != 0);
+    assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
+    assert(ok_address(p));
+    assert(sz == _topsize);
+    assert(sz > 0);
+    assert(sz == ((sp->_base + sp->_size) - (char*)p) - top_foot_size());
+    assert(p->pinuse());
+    assert(!p->chunk_plus_offset(sz)->pinuse());
+}
+
+// Check properties of (inuse) mmapped chunks
+void malloc_state::do_check_mmapped_chunk(mchunkptr p) const
+{
+    size_t  sz = p->chunksize();
+    size_t len = (sz + (p->_prev_foot) + SPP_MMAP_FOOT_PAD);
+    assert(p->is_mmapped());
+    assert(use_mmap());
+    assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD));
+    assert(ok_address(p));
+    assert(!is_small(sz));
+    assert((len & (mparams._page_size - 1)) == 0);
+    assert(p->chunk_plus_offset(sz)->_head == FENCEPOST_HEAD);
+    assert(p->chunk_plus_offset(sz + sizeof(size_t))->_head == 0);
+}
+
+// Check properties of inuse chunks
+void malloc_state::do_check_inuse_chunk(mchunkptr p) const
+{
+    do_check_any_chunk(p);
+    assert(p->is_inuse());
+    assert(p->next_pinuse());
+    // If not pinuse and not mmapped, previous chunk has OK offset
+    assert(p->is_mmapped() || p->pinuse() || (mchunkptr)p->prev_chunk()->next_chunk() == p);
+    if (p->is_mmapped())
+        do_check_mmapped_chunk(p);
+}
+
+// Check properties of free chunks
+void malloc_state::do_check_free_chunk(mchunkptr p) const
+{
+    size_t sz = p->chunksize();
+    mchunkptr next = (mchunkptr)p->chunk_plus_offset(sz);
+    do_check_any_chunk(p);
+    assert(!p->is_inuse());
+    assert(!p->next_pinuse());
+    assert(!p->is_mmapped());
+    if (p != _dv && p != _top)
+    {
+        if (sz >= MIN_CHUNK_SIZE)
+        {
+            assert((sz & spp_chunk_align_mask) == 0);
+            assert(spp_is_aligned(chunk2mem(p)));
+            assert(next->_prev_foot == sz);
+            assert(p->pinuse());
+            assert(next == _top || next->is_inuse());
+            assert(p->_fd->_bk == p);
+            assert(p->_bk->_fd == p);
+        }
+        else  // markers are always of size sizeof(size_t)
+            assert(sz == sizeof(size_t));
+    }
+}
+
+// Check properties of malloced chunks at the point they are malloced
+void malloc_state::do_check_malloced_chunk(void* mem, size_t s) const
+{
+    if (mem != 0)
+    {
+        mchunkptr p = mem2chunk(mem);
+        size_t sz = p->_head & ~INUSE_BITS;
+        do_check_inuse_chunk(p);
+        assert((sz & spp_chunk_align_mask) == 0);
+        assert(sz >= MIN_CHUNK_SIZE);
+        assert(sz >= s);
+        // unless mmapped, size is less than MIN_CHUNK_SIZE more than request
+        assert(p->is_mmapped() || sz < (s + MIN_CHUNK_SIZE));
+    }
+}
+
+// Check a tree and its subtrees.
+void malloc_state::do_check_tree(tchunkptr t)
+{
+    tchunkptr head = 0;
+    tchunkptr u = t;
+    bindex_t tindex = t->_index;
+    size_t tsize = t->chunksize();
+    bindex_t idx = compute_tree_index(tsize);
+    assert(tindex == idx);
+    assert(tsize >= MIN_LARGE_SIZE);
+    assert(tsize >= minsize_for_tree_index(idx));
+    assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1))));
+
+    do
+    {
+        // traverse through chain of same-sized nodes
+        do_check_any_chunk((mchunkptr)u);
+        assert(u->_index == tindex);
+        assert(u->chunksize() == tsize);
+        assert(!u->is_inuse());
+        assert(!u->next_pinuse());
+        assert(u->_fd->_bk == u);
+        assert(u->_bk->_fd == u);
+        if (u->_parent == 0)
+        {
+            assert(u->_child[0] == 0);
+            assert(u->_child[1] == 0);
+        }
+        else
+        {
+            assert(head == 0); // only one node on chain has parent
+            head = u;
+            assert(u->_parent != u);
+            assert(u->_parent->_child[0] == u ||
+                   u->_parent->_child[1] == u ||
+                   *((tbinptr*)(u->_parent)) == u);
+            if (u->_child[0] != 0)
+            {
+                assert(u->_child[0]->_parent == u);
+                assert(u->_child[0] != u);
+                do_check_tree(u->_child[0]);
+            }
+            if (u->_child[1] != 0)
+            {
+                assert(u->_child[1]->_parent == u);
+                assert(u->_child[1] != u);
+                do_check_tree(u->_child[1]);
+            }
+            if (u->_child[0] != 0 && u->_child[1] != 0)
+                assert(u->_child[0]->chunksize() < u->_child[1]->chunksize());
+        }
+        u = u->_fd;
+    }
+    while (u != t);
+    assert(head != 0);
+}
+
+//  Check all the chunks in a treebin.
+void malloc_state::do_check_treebin(bindex_t i)
+{
+    tbinptr* tb = (tbinptr*)treebin_at(i);
+    tchunkptr t = *tb;
+    int empty = (_treemap & (1U << i)) == 0;
+    if (t == 0)
+        assert(empty);
+    if (!empty)
+        do_check_tree(t);
+}
+
+//  Check all the chunks in a smallbin.
+void malloc_state::do_check_smallbin(bindex_t i)
+{
+    sbinptr b = smallbin_at(i);
+    mchunkptr p = b->_bk;
+    unsigned int empty = (_smallmap & (1U << i)) == 0;
+    if (p == b)
+        assert(empty);
+    if (!empty)
+    {
+        for (; p != b; p = p->_bk)
+        {
+            size_t size = p->chunksize();
+            mchunkptr q;
+            // each chunk claims to be free
+            do_check_free_chunk(p);
+            // chunk belongs in bin
+            assert(small_index(size) == i);
+            assert(p->_bk == b || p->_bk->chunksize() == p->chunksize());
+            // chunk is followed by an inuse chunk
+            q = (mchunkptr)p->next_chunk();
+            if (q->_head != FENCEPOST_HEAD)
+                do_check_inuse_chunk(q);
+        }
+    }
+}
+
+// Find x in a bin. Used in other check functions.
+int malloc_state::bin_find(mchunkptr x)
+{
+    size_t size = x->chunksize();
+    if (is_small(size))
+    {
+        bindex_t sidx = small_index(size);
+        sbinptr b = smallbin_at(sidx);
+        if (smallmap_is_marked(sidx))
+        {
+            mchunkptr p = b;
+            do
+            {
+                if (p == x)
+                    return 1;
+            }
+            while ((p = p->_fd) != b);
+        }
+    }
+    else
+    {
+        bindex_t tidx = compute_tree_index(size);
+        if (treemap_is_marked(tidx))
+        {
+            tchunkptr t = *treebin_at(tidx);
+            size_t sizebits = size << leftshift_for_tree_index(tidx);
+            while (t != 0 && t->chunksize() != size)
+            {
+                t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1];
+                sizebits <<= 1;
+            }
+            if (t != 0)
+            {
+                tchunkptr u = t;
+                do
+                {
+                    if (u == (tchunkptr)x)
+                        return 1;
+                }
+                while ((u = u->_fd) != t);
+            }
+        }
+    }
+    return 0;
+}
+
+// Traverse each chunk and check it; return total
+size_t malloc_state::traverse_and_check()
+{
+    size_t sum = 0;
+    if (is_initialized())
+    {
+        msegmentptr s = (msegmentptr)&_seg;
+        sum += _topsize + top_foot_size();
+        while (s != 0)
+        {
+            mchunkptr q = align_as_chunk(s->_base);
+            mchunkptr lastq = 0;
+            assert(q->pinuse());
+            while (segment_holds(s, q) &&
+                    q != _top && q->_head != FENCEPOST_HEAD)
+            {
+                sum += q->chunksize();
+                if (q->is_inuse())
+                {
+                    assert(!bin_find(q));
+                    do_check_inuse_chunk(q);
+                }
+                else
+                {
+                    assert(q == _dv || bin_find(q));
+                    assert(lastq == 0 || lastq->is_inuse()); // Not 2 consecutive free
+                    do_check_free_chunk(q);
+                }
+                lastq = q;
+                q = (mchunkptr)q->next_chunk();
+            }
+            s = s->_next;
+        }
+    }
+    return sum;
+}
+
+
+// Check all properties of malloc_state.
+void malloc_state::do_check_malloc_state()
+{
+    bindex_t i;
+    size_t total;
+    // check bins
+    for (i = 0; i < NSMALLBINS; ++i)
+        do_check_smallbin(i);
+    for (i = 0; i < NTREEBINS; ++i)
+        do_check_treebin(i);
+
+    if (_dvsize != 0)
+    {
+        // check dv chunk
+        do_check_any_chunk(_dv);
+        assert(_dvsize == _dv->chunksize());
+        assert(_dvsize >= MIN_CHUNK_SIZE);
+        assert(bin_find(_dv) == 0);
+    }
+
+    if (_top != 0)
+    {
+        // check top chunk
+        do_check_top_chunk(_top);
+        //assert(topsize == top->chunksize()); redundant
+        assert(_topsize > 0);
+        assert(bin_find(_top) == 0);
+    }
+
+    total = traverse_and_check();
+    assert(total <= _footprint);
+    assert(_footprint <= _max_footprint);
+}
+#endif // SPP_DEBUG
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/*
+  Various forms of linking and unlinking are defined as macros.  Even
+  the ones for trees, which are very long but have very short typical
+  paths.  This is ugly but reduces reliance on inlining support of
+  compilers.
+*/
+
+// Link a free chunk into a smallbin
+void malloc_state::insert_small_chunk(mchunkptr p, size_t s)
+{
+    bindex_t I  = small_index(s);
+    mchunkptr B = smallbin_at(I);
+    mchunkptr F = B;
+    assert(s >= MIN_CHUNK_SIZE);
+    if (!smallmap_is_marked(I))
+        mark_smallmap(I);
+    else if (rtcheck(ok_address(B->_fd)))
+        F = B->_fd;
+    else
+        SPP_ABORT;
+    B->_fd = p;
+    F->_bk = p;
+    p->_fd = F;
+    p->_bk = B;
+}
+
+// Unlink a chunk from a smallbin
+void malloc_state::unlink_small_chunk(mchunkptr p, size_t s)
+{
+    mchunkptr F = p->_fd;
+    mchunkptr B = p->_bk;
+    bindex_t I = small_index(s);
+    assert(p != B);
+    assert(p != F);
+    assert(p->chunksize() == small_index2size(I));
+    if (rtcheck(F == smallbin_at(I) || (ok_address(F) && F->_bk == p)))
+    {
+        if (B == F)
+            clear_smallmap(I);
+        else if (rtcheck(B == smallbin_at(I) ||
+                         (ok_address(B) && B->_fd == p)))
+        {
+            F->_bk = B;
+            B->_fd = F;
+        }
+        else
+            SPP_ABORT;
+    }
+    else
+        SPP_ABORT;
+}
+
+// Unlink the first chunk from a smallbin
+void malloc_state::unlink_first_small_chunk(mchunkptr B, mchunkptr p, bindex_t I)
+{
+    mchunkptr F = p->_fd;
+    assert(p != B);
+    assert(p != F);
+    assert(p->chunksize() == small_index2size(I));
+    if (B == F)
+        clear_smallmap(I);
+    else if (rtcheck(ok_address(F) && F->_bk == p))
+    {
+        F->_bk = B;
+        B->_fd = F;
+    }
+    else
+        SPP_ABORT;
+}
+
+// Replace dv node, binning the old one
+// Used only when dvsize known to be small
+void malloc_state::replace_dv(mchunkptr p, size_t s)
+{
+    size_t DVS = _dvsize;
+    assert(is_small(DVS));
+    if (DVS != 0)
+    {
+        mchunkptr DV = _dv;
+        insert_small_chunk(DV, DVS);
+    }
+    _dvsize = s;
+    _dv = p;
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+// Insert chunk into tree
+void malloc_state::insert_large_chunk(tchunkptr X, size_t s)
+{
+    tbinptr* H;
+    bindex_t I = compute_tree_index(s);
+    H = treebin_at(I);
+    X->_index = I;
+    X->_child[0] = X->_child[1] = 0;
+    if (!treemap_is_marked(I))
+    {
+        mark_treemap(I);
+        *H = X;
+        X->_parent = (tchunkptr)H;
+        X->_fd = X->_bk = X;
+    }
+    else
+    {
+        tchunkptr T = *H;
+        size_t K = s << leftshift_for_tree_index(I);
+        for (;;)
+        {
+            if (T->chunksize() != s)
+            {
+                tchunkptr* C = &(T->_child[(K >> (spp_size_t_bitsize - 1)) & 1]);
+                K <<= 1;
+                if (*C != 0)
+                    T = *C;
+                else if (rtcheck(ok_address(C)))
+                {
+                    *C = X;
+                    X->_parent = T;
+                    X->_fd = X->_bk = X;
+                    break;
+                }
+                else
+                {
+                    SPP_ABORT;
+                    break;
+                }
+            }
+            else
+            {
+                tchunkptr F = T->_fd;
+                if (rtcheck(ok_address(T) && ok_address(F)))
+                {
+                    T->_fd = F->_bk = X;
+                    X->_fd = F;
+                    X->_bk = T;
+                    X->_parent = 0;
+                    break;
+                }
+                else
+                {
+                    SPP_ABORT;
+                    break;
+                }
+            }
+        }
+    }
+}
+
+/*
+  Unlink steps:
+
+  1. If x is a chained node, unlink it from its same-sized fd/bk links
+     and choose its bk node as its replacement.
+  2. If x was the last node of its size, but not a leaf node, it must
+     be replaced with a leaf node (not merely one with an open left or
+     right), to make sure that lefts and rights of descendents
+     correspond properly to bit masks.  We use the rightmost descendent
+     of x.  We could use any other leaf, but this is easy to locate and
+     tends to counteract removal of leftmosts elsewhere, and so keeps
+     paths shorter than minimally guaranteed.  This doesn't loop much
+     because on average a node in a tree is near the bottom.
+  3. If x is the base of a chain (i.e., has parent links) relink
+     x's parent and children to x's replacement (or null if none).
+*/
+
+void malloc_state::unlink_large_chunk(tchunkptr X)
+{
+    tchunkptr XP = X->_parent;
+    tchunkptr R;
+    if (X->_bk != X)
+    {
+        tchunkptr F = X->_fd;
+        R = X->_bk;
+        if (rtcheck(ok_address(F) && F->_bk == X && R->_fd == X))
+        {
+            F->_bk = R;
+            R->_fd = F;
+        }
+        else
+            SPP_ABORT;
+    }
+    else
+    {
+        tchunkptr* RP;
+        if (((R = *(RP = &(X->_child[1]))) != 0) ||
+                ((R = *(RP = &(X->_child[0]))) != 0))
+        {
+            tchunkptr* CP;
+            while ((*(CP = &(R->_child[1])) != 0) ||
+                    (*(CP = &(R->_child[0])) != 0))
+                R = *(RP = CP);
+            if (rtcheck(ok_address(RP)))
+                *RP = 0;
+            else
+                SPP_ABORT;
+        }
+    }
+    if (XP != 0)
+    {
+        tbinptr* H = treebin_at(X->_index);
+        if (X == *H)
+        {
+            if ((*H = R) == 0)
+                clear_treemap(X->_index);
+        }
+        else if (rtcheck(ok_address(XP)))
+        {
+            if (XP->_child[0] == X)
+                XP->_child[0] = R;
+            else
+                XP->_child[1] = R;
+        }
+        else
+            SPP_ABORT;
+        if (R != 0)
+        {
+            if (rtcheck(ok_address(R)))
+            {
+                tchunkptr C0, C1;
+                R->_parent = XP;
+                if ((C0 = X->_child[0]) != 0)
+                {
+                    if (rtcheck(ok_address(C0)))
+                    {
+                        R->_child[0] = C0;
+                        C0->_parent = R;
+                    }
+                    else
+                        SPP_ABORT;
+                }
+                if ((C1 = X->_child[1]) != 0)
+                {
+                    if (rtcheck(ok_address(C1)))
+                    {
+                        R->_child[1] = C1;
+                        C1->_parent = R;
+                    }
+                    else
+                        SPP_ABORT;
+                }
+            }
+            else
+                SPP_ABORT;
+        }
+    }
+}
+
+// Relays to large vs small bin operations
+
+void malloc_state::insert_chunk(mchunkptr p, size_t s)
+{
+    if (is_small(s))
+        insert_small_chunk(p, s);
+    else
+    {
+        tchunkptr tp = (tchunkptr)(p);
+        insert_large_chunk(tp, s);
+    }
+}
+
+void malloc_state::unlink_chunk(mchunkptr p, size_t s)
+{
+    if (is_small(s))
+        unlink_small_chunk(p, s);
+    else
+    {
+        tchunkptr tp = (tchunkptr)(p);
+        unlink_large_chunk(tp);
+    }
+}
+
+
+/* -----------------------  Direct-mmapping chunks ----------------------- */
+
+/*
+  Directly mmapped chunks are set up with an offset to the start of
+  the mmapped region stored in the prev_foot field of the chunk. This
+  allows reconstruction of the required argument to MUNMAP when freed,
+  and also allows adjustment of the returned chunk to meet alignment
+  requirements (especially in memalign).
+*/
+
+// Malloc using mmap
+void* malloc_state::mmap_alloc(size_t nb)
+{
+    size_t mmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask);
+    if (_footprint_limit != 0)
+    {
+        size_t fp = _footprint + mmsize;
+        if (fp <= _footprint || fp > _footprint_limit)
+            return 0;
+    }
+    if (mmsize > nb)
+    {
+        // Check for wrap around 0
+        char* mm = (char*)(SPP_CALL_DIRECT_MMAP(mmsize));
+        if (mm != cmfail)
+        {
+            size_t offset = align_offset(chunk2mem(mm));
+            size_t psize = mmsize - offset - SPP_MMAP_FOOT_PAD;
+            mchunkptr p = (mchunkptr)(mm + offset);
+            p->_prev_foot = offset;
+            p->_head = psize;
+            mark_inuse_foot(p, psize);
+            p->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD;
+            p->chunk_plus_offset(psize + sizeof(size_t))->_head = 0;
+
+            if (_least_addr == 0 || mm < _least_addr)
+                _least_addr = mm;
+            if ((_footprint += mmsize) > _max_footprint)
+                _max_footprint = _footprint;
+            assert(spp_is_aligned(chunk2mem(p)));
+            check_mmapped_chunk(p);
+            return chunk2mem(p);
+        }
+    }
+    return 0;
+}
+
+// Realloc using mmap
+mchunkptr malloc_state::mmap_resize(mchunkptr oldp, size_t nb, int flags)
+{
+    size_t oldsize = oldp->chunksize();
+    (void)flags;      // placate people compiling -Wunused
+    if (is_small(nb)) // Can't shrink mmap regions below small size
+        return 0;
+
+    // Keep old chunk if big enough but not too big
+    if (oldsize >= nb + sizeof(size_t) &&
+            (oldsize - nb) <= (mparams._granularity << 1))
+        return oldp;
+    else
+    {
+        size_t offset = oldp->_prev_foot;
+        size_t oldmmsize = oldsize + offset + SPP_MMAP_FOOT_PAD;
+        size_t newmmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask);
+        char* cp = (char*)SPP_CALL_MREMAP((char*)oldp - offset,
+                                      oldmmsize, newmmsize, flags);
+        if (cp != cmfail)
+        {
+            mchunkptr newp = (mchunkptr)(cp + offset);
+            size_t psize = newmmsize - offset - SPP_MMAP_FOOT_PAD;
+            newp->_head = psize;
+            mark_inuse_foot(newp, psize);
+            newp->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD;
+            newp->chunk_plus_offset(psize + sizeof(size_t))->_head = 0;
+
+            if (cp < _least_addr)
+                _least_addr = cp;
+            if ((_footprint += newmmsize - oldmmsize) > _max_footprint)
+                _max_footprint = _footprint;
+            check_mmapped_chunk(newp);
+            return newp;
+        }
+    }
+    return 0;
+}
+
+
+/* -------------------------- mspace management -------------------------- */
+
+// Initialize top chunk and its size
+void malloc_state::init_top(mchunkptr p, size_t psize)
+{
+    // Ensure alignment
+    size_t offset = align_offset(chunk2mem(p));
+    p = (mchunkptr)((char*)p + offset);
+    psize -= offset;
+
+    _top = p;
+    _topsize = psize;
+    p->_head = psize | PINUSE_BIT;
+    // set size of fake trailing chunk holding overhead space only once
+    p->chunk_plus_offset(psize)->_head = top_foot_size();
+    _trim_check = mparams._trim_threshold; // reset on each update
+}
+
+// Initialize bins for a new mstate that is otherwise zeroed out
+void malloc_state::init_bins()
+{
+    // Establish circular links for smallbins
+    bindex_t i;
+    for (i = 0; i < NSMALLBINS; ++i)
+    {
+        sbinptr bin = smallbin_at(i);
+        bin->_fd = bin->_bk = bin;
+    }
+}
+
+#if SPP_PROCEED_ON_ERROR
+
+// default corruption action
+void malloc_state::reset_on_error()
+{
+    int i;
+    ++malloc_corruption_error_count;
+    // Reinitialize fields to forget about all memory
+    _smallmap = _treemap = 0;
+    _dvsize = _topsize = 0;
+    _seg._base = 0;
+    _seg._size = 0;
+    _seg._next = 0;
+    _top = _dv = 0;
+    for (i = 0; i < NTREEBINS; ++i)
+        *treebin_at(i) = 0;
+    init_bins();
+}
+#endif
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+void* malloc_state::prepend_alloc(char* newbase, char* oldbase, size_t nb)
+{
+    mchunkptr p = align_as_chunk(newbase);
+    mchunkptr oldfirst = align_as_chunk(oldbase);
+    size_t psize = (char*)oldfirst - (char*)p;
+    mchunkptr q = (mchunkptr)p->chunk_plus_offset(nb);
+    size_t qsize = psize - nb;
+    set_size_and_pinuse_of_inuse_chunk(p, nb);
+
+    assert((char*)oldfirst > (char*)q);
+    assert(oldfirst->pinuse());
+    assert(qsize >= MIN_CHUNK_SIZE);
+
+    // consolidate remainder with first chunk of old base
+    if (oldfirst == _top)
+    {
+        size_t tsize = _topsize += qsize;
+        _top = q;
+        q->_head = tsize | PINUSE_BIT;
+        check_top_chunk(q);
+    }
+    else if (oldfirst == _dv)
+    {
+        size_t dsize = _dvsize += qsize;
+        _dv = q;
+        q->set_size_and_pinuse_of_free_chunk(dsize);
+    }
+    else
+    {
+        if (!oldfirst->is_inuse())
+        {
+            size_t nsize = oldfirst->chunksize();
+            unlink_chunk(oldfirst, nsize);
+            oldfirst = (mchunkptr)oldfirst->chunk_plus_offset(nsize);
+            qsize += nsize;
+        }
+        q->set_free_with_pinuse(qsize, oldfirst);
+        insert_chunk(q, qsize);
+        check_free_chunk(q);
+    }
+
+    check_malloced_chunk(chunk2mem(p), nb);
+    return chunk2mem(p);
+}
+
+// Add a segment to hold a new noncontiguous region
+void malloc_state::add_segment(char* tbase, size_t tsize, flag_t mmapped)
+{
+    // Determine locations and sizes of segment, fenceposts, old top
+    char* old_top = (char*)_top;
+    msegmentptr oldsp = segment_holding(old_top);
+    char* old_end = oldsp->_base + oldsp->_size;
+    size_t ssize = pad_request(sizeof(struct malloc_segment));
+    char* rawsp = old_end - (ssize + 4 * sizeof(size_t) + spp_chunk_align_mask);
+    size_t offset = align_offset(chunk2mem(rawsp));
+    char* asp = rawsp + offset;
+    char* csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
+    mchunkptr sp = (mchunkptr)csp;
+    msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+    mchunkptr tnext = (mchunkptr)sp->chunk_plus_offset(ssize);
+    mchunkptr p = tnext;
+    int nfences = 0;
+
+    // reset top to new space
+    init_top((mchunkptr)tbase, tsize - top_foot_size());
+
+    // Set up segment record
+    assert(spp_is_aligned(ss));
+    set_size_and_pinuse_of_inuse_chunk(sp, ssize);
+    *ss = _seg; // Push current record
+    _seg._base = tbase;
+    _seg._size = tsize;
+    _seg._sflags = mmapped;
+    _seg._next = ss;
+
+    // Insert trailing fenceposts
+    for (;;)
+    {
+        mchunkptr nextp = (mchunkptr)p->chunk_plus_offset(sizeof(size_t));
+        p->_head = FENCEPOST_HEAD;
+        ++nfences;
+        if ((char*)(&(nextp->_head)) < old_end)
+            p = nextp;
+        else
+            break;
+    }
+    assert(nfences >= 2);
+
+    // Insert the rest of old top into a bin as an ordinary free chunk
+    if (csp != old_top)
+    {
+        mchunkptr q = (mchunkptr)old_top;
+        size_t psize = csp - old_top;
+        mchunkptr tn = (mchunkptr)q->chunk_plus_offset(psize);
+        q->set_free_with_pinuse(psize, tn);
+        insert_chunk(q, psize);
+    }
+
+    check_top_chunk(_top);
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+// Get memory from system using MMAP
+void* malloc_state::sys_alloc(size_t nb)
+{
+    char* tbase = cmfail;
+    size_t tsize = 0;
+    flag_t mmap_flag = 0;
+    size_t asize; // allocation size
+
+    mparams.ensure_initialization();
+
+    // Directly map large chunks, but only if already initialized
+    if (use_mmap() && nb >= mparams._mmap_threshold && _topsize != 0)
+    {
+        void* mem = mmap_alloc(nb);
+        if (mem != 0)
+            return mem;
+    }
+
+    asize = mparams.granularity_align(nb + sys_alloc_padding());
+    if (asize <= nb)
+        return 0; // wraparound
+    if (_footprint_limit != 0)
+    {
+        size_t fp = _footprint + asize;
+        if (fp <= _footprint || fp > _footprint_limit)
+            return 0;
+    }
+
+    /*
+      Try getting memory with a call to MMAP new space (disabled if not SPP_HAVE_MMAP).
+      We need to request enough bytes from system to ensure
+      we can malloc nb bytes upon success, so pad with enough space for
+      top_foot, plus alignment-pad to make sure we don't lose bytes if
+      not on boundary, and round this up to a granularity unit.
+    */
+
+    if (SPP_HAVE_MMAP && tbase == cmfail)
+    {
+        // Try MMAP
+        char* mp = (char*)(SPP_CALL_MMAP(asize));
+        if (mp != cmfail)
+        {
+            tbase = mp;
+            tsize = asize;
+            mmap_flag = USE_MMAP_BIT;
+        }
+    }
+
+    if (tbase != cmfail)
+    {
+
+        if ((_footprint += tsize) > _max_footprint)
+            _max_footprint = _footprint;
+
+        if (!is_initialized())
+        {
+            // first-time initialization
+            if (_least_addr == 0 || tbase < _least_addr)
+                _least_addr = tbase;
+            _seg._base = tbase;
+            _seg._size = tsize;
+            _seg._sflags = mmap_flag;
+            _magic = mparams._magic;
+            _release_checks = SPP_MAX_RELEASE_CHECK_RATE;
+            init_bins();
+
+            // Offset top by embedded malloc_state
+            mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk();
+            init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size());
+        }
+
+        else
+        {
+            // Try to merge with an existing segment
+            msegmentptr sp = &_seg;
+            // Only consider most recent segment if traversal suppressed
+            while (sp != 0 && tbase != sp->_base + sp->_size)
+                sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next;
+            if (sp != 0 &&
+                    !sp->is_extern_segment() &&
+                    (sp->_sflags & USE_MMAP_BIT) == mmap_flag &&
+                    segment_holds(sp, _top))
+            {
+                // append
+                sp->_size += tsize;
+                init_top(_top, _topsize + tsize);
+            }
+            else
+            {
+                if (tbase < _least_addr)
+                    _least_addr = tbase;
+                sp = &_seg;
+                while (sp != 0 && sp->_base != tbase + tsize)
+                    sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next;
+                if (sp != 0 &&
+                        !sp->is_extern_segment() &&
+                        (sp->_sflags & USE_MMAP_BIT) == mmap_flag)
+                {
+                    char* oldbase = sp->_base;
+                    sp->_base = tbase;
+                    sp->_size += tsize;
+                    return prepend_alloc(tbase, oldbase, nb);
+                }
+                else
+                    add_segment(tbase, tsize, mmap_flag);
+            }
+        }
+
+        if (nb < _topsize)
+        {
+            // Allocate from new or extended top space
+            size_t rsize = _topsize -= nb;
+            mchunkptr p = _top;
+            mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb);
+            r->_head = rsize | PINUSE_BIT;
+            set_size_and_pinuse_of_inuse_chunk(p, nb);
+            check_top_chunk(_top);
+            check_malloced_chunk(chunk2mem(p), nb);
+            return chunk2mem(p);
+        }
+    }
+
+    SPP_MALLOC_FAILURE_ACTION;
+    return 0;
+}
+
+/* -----------------------  system deallocation -------------------------- */
+
+// Unmap and unlink any mmapped segments that don't contain used chunks
+size_t malloc_state::release_unused_segments()
+{
+    size_t released = 0;
+    int nsegs = 0;
+    msegmentptr pred = &_seg;
+    msegmentptr sp = pred->_next;
+    while (sp != 0)
+    {
+        char* base = sp->_base;
+        size_t size = sp->_size;
+        msegmentptr next = sp->_next;
+        ++nsegs;
+        if (sp->is_mmapped_segment() && !sp->is_extern_segment())
+        {
+            mchunkptr p = align_as_chunk(base);
+            size_t psize = p->chunksize();
+            // Can unmap if first chunk holds entire segment and not pinned
+            if (!p->is_inuse() && (char*)p + psize >= base + size - top_foot_size())
+            {
+                tchunkptr tp = (tchunkptr)p;
+                assert(segment_holds(sp, p));
+                if (p == _dv)
+                {
+                    _dv = 0;
+                    _dvsize = 0;
+                }
+                else
+                    unlink_large_chunk(tp);
+                if (SPP_CALL_MUNMAP(base, size) == 0)
+                {
+                    released += size;
+                    _footprint -= size;
+                    // unlink obsoleted record
+                    sp = pred;
+                    sp->_next = next;
+                }
+                else
+                {
+                    // back out if cannot unmap
+                    insert_large_chunk(tp, psize);
+                }
+            }
+        }
+        if (SPP_NO_SEGMENT_TRAVERSAL) // scan only first segment
+            break;
+        pred = sp;
+        sp = next;
+    }
+    // Reset check counter
+    _release_checks = (((size_t) nsegs > (size_t) SPP_MAX_RELEASE_CHECK_RATE) ?
+                       (size_t) nsegs : (size_t) SPP_MAX_RELEASE_CHECK_RATE);
+    return released;
+}
+
+int malloc_state::sys_trim(size_t pad)
+{
+    size_t released = 0;
+    mparams.ensure_initialization();
+    if (pad < MAX_REQUEST && is_initialized())
+    {
+        pad += top_foot_size(); // ensure enough room for segment overhead
+
+        if (_topsize > pad)
+        {
+            // Shrink top space in _granularity - size units, keeping at least one
+            size_t unit = mparams._granularity;
+            size_t extra = ((_topsize - pad + (unit - 1)) / unit -
+                            1) * unit;
+            msegmentptr sp = segment_holding((char*)_top);
+
+            if (!sp->is_extern_segment())
+            {
+                if (sp->is_mmapped_segment())
+                {
+                    if (SPP_HAVE_MMAP &&
+                        sp->_size >= extra &&
+                        !has_segment_link(sp))
+                    {
+                        // can't shrink if pinned
+                        size_t newsize = sp->_size - extra;
+                        (void)newsize; // placate people compiling -Wunused-variable
+                        // Prefer mremap, fall back to munmap
+                        if ((SPP_CALL_MREMAP(sp->_base, sp->_size, newsize, 0) != mfail) ||
+                            (SPP_CALL_MUNMAP(sp->_base + newsize, extra) == 0))
+                            released = extra;
+                    }
+                }
+            }
+
+            if (released != 0)
+            {
+                sp->_size -= released;
+                _footprint -= released;
+                init_top(_top, _topsize - released);
+                check_top_chunk(_top);
+            }
+        }
+
+        // Unmap any unused mmapped segments
+        if (SPP_HAVE_MMAP)
+            released += release_unused_segments();
+
+        // On failure, disable autotrim to avoid repeated failed future calls
+        if (released == 0 && _topsize > _trim_check)
+            _trim_check = spp_max_size_t;
+    }
+
+    return (released != 0) ? 1 : 0;
+}
+
+/* Consolidate and bin a chunk. Differs from exported versions
+   of free mainly in that the chunk need not be marked as inuse.
+*/
+void malloc_state::dispose_chunk(mchunkptr p, size_t psize)
+{
+    mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize);
+    if (!p->pinuse())
+    {
+        mchunkptr prev;
+        size_t prevsize = p->_prev_foot;
+        if (p->is_mmapped())
+        {
+            psize += prevsize + SPP_MMAP_FOOT_PAD;
+            if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+                _footprint -= psize;
+            return;
+        }
+        prev = (mchunkptr)p->chunk_minus_offset(prevsize);
+        psize += prevsize;
+        p = prev;
+        if (rtcheck(ok_address(prev)))
+        {
+            // consolidate backward
+            if (p != _dv)
+                unlink_chunk(p, prevsize);
+            else if ((next->_head & INUSE_BITS) == INUSE_BITS)
+            {
+                _dvsize = psize;
+                p->set_free_with_pinuse(psize, next);
+                return;
+            }
+        }
+        else
+        {
+            SPP_ABORT;
+            return;
+        }
+    }
+    if (rtcheck(ok_address(next)))
+    {
+        if (!next->cinuse())
+        {
+            // consolidate forward
+            if (next == _top)
+            {
+                size_t tsize = _topsize += psize;
+                _top = p;
+                p->_head = tsize | PINUSE_BIT;
+                if (p == _dv)
+                {
+                    _dv = 0;
+                    _dvsize = 0;
+                }
+                return;
+            }
+            else if (next == _dv)
+            {
+                size_t dsize = _dvsize += psize;
+                _dv = p;
+                p->set_size_and_pinuse_of_free_chunk(dsize);
+                return;
+            }
+            else
+            {
+                size_t nsize = next->chunksize();
+                psize += nsize;
+                unlink_chunk(next, nsize);
+                p->set_size_and_pinuse_of_free_chunk(psize);
+                if (p == _dv)
+                {
+                    _dvsize = psize;
+                    return;
+                }
+            }
+        }
+        else
+            p->set_free_with_pinuse(psize, next);
+        insert_chunk(p, psize);
+    }
+    else
+        SPP_ABORT;
+}
+
+/* ---------------------------- malloc --------------------------- */
+
+// allocate a large request from the best fitting chunk in a treebin
+void* malloc_state::tmalloc_large(size_t nb)
+{
+    tchunkptr v = 0;
+    size_t rsize = -nb; // Unsigned negation
+    tchunkptr t;
+    bindex_t idx = compute_tree_index(nb);
+    if ((t = *treebin_at(idx)) != 0)
+    {
+        // Traverse tree for this bin looking for node with size == nb
+        size_t sizebits = nb << leftshift_for_tree_index(idx);
+        tchunkptr rst = 0;  // The deepest untaken right subtree
+        for (;;)
+        {
+            tchunkptr rt;
+            size_t trem = t->chunksize() - nb;
+            if (trem < rsize)
+            {
+                v = t;
+                if ((rsize = trem) == 0)
+                    break;
+            }
+            rt = t->_child[1];
+            t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1];
+            if (rt != 0 && rt != t)
+                rst = rt;
+            if (t == 0)
+            {
+                t = rst; // set t to least subtree holding sizes > nb
+                break;
+            }
+            sizebits <<= 1;
+        }
+    }
+    if (t == 0 && v == 0)
+    {
+        // set t to root of next non-empty treebin
+        binmap_t leftbits = left_bits(idx2bit(idx)) & _treemap;
+        if (leftbits != 0)
+        {
+            binmap_t leastbit = least_bit(leftbits);
+            bindex_t i = compute_bit2idx(leastbit);
+            t = *treebin_at(i);
+        }
+    }
+
+    while (t != 0)
+    {
+        // find smallest of tree or subtree
+        size_t trem = t->chunksize() - nb;
+        if (trem < rsize)
+        {
+            rsize = trem;
+            v = t;
+        }
+        t = t->leftmost_child();
+    }
+
+    //  If dv is a better fit, return 0 so malloc will use it
+    if (v != 0 && rsize < (size_t)(_dvsize - nb))
+    {
+        if (rtcheck(ok_address(v)))
+        {
+            // split
+            mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb);
+            assert(v->chunksize() == rsize + nb);
+            if (rtcheck(ok_next(v, r)))
+            {
+                unlink_large_chunk(v);
+                if (rsize < MIN_CHUNK_SIZE)
+                    set_inuse_and_pinuse(v, (rsize + nb));
+                else
+                {
+                    set_size_and_pinuse_of_inuse_chunk(v, nb);
+                    r->set_size_and_pinuse_of_free_chunk(rsize);
+                    insert_chunk(r, rsize);
+                }
+                return chunk2mem(v);
+            }
+        }
+        SPP_ABORT;
+    }
+    return 0;
+}
+
+// allocate a small request from the best fitting chunk in a treebin
+void* malloc_state::tmalloc_small(size_t nb)
+{
+    tchunkptr t, v;
+    size_t rsize;
+    binmap_t leastbit = least_bit(_treemap);
+    bindex_t i = compute_bit2idx(leastbit);
+    v = t = *treebin_at(i);
+    rsize = t->chunksize() - nb;
+
+    while ((t = t->leftmost_child()) != 0)
+    {
+        size_t trem = t->chunksize() - nb;
+        if (trem < rsize)
+        {
+            rsize = trem;
+            v = t;
+        }
+    }
+
+    if (rtcheck(ok_address(v)))
+    {
+        mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb);
+        assert(v->chunksize() == rsize + nb);
+        if (rtcheck(ok_next(v, r)))
+        {
+            unlink_large_chunk(v);
+            if (rsize < MIN_CHUNK_SIZE)
+                set_inuse_and_pinuse(v, (rsize + nb));
+            else
+            {
+                set_size_and_pinuse_of_inuse_chunk(v, nb);
+                r->set_size_and_pinuse_of_free_chunk(rsize);
+                replace_dv(r, rsize);
+            }
+            return chunk2mem(v);
+        }
+    }
+
+    SPP_ABORT;
+    return 0;
+}
+
+/* ---------------------------- malloc --------------------------- */
+
+void* malloc_state::_malloc(size_t bytes)
+{
+    if (1)
+    {
+        void* mem;
+        size_t nb;
+        if (bytes <= MAX_SMALL_REQUEST)
+        {
+            bindex_t idx;
+            binmap_t smallbits;
+            nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
+            idx = small_index(nb);
+            smallbits = _smallmap >> idx;
+
+            if ((smallbits & 0x3U) != 0)
+            {
+                // Remainderless fit to a smallbin.
+                mchunkptr b, p;
+                idx += ~smallbits & 1;       // Uses next bin if idx empty
+                b = smallbin_at(idx);
+                p = b->_fd;
+                assert(p->chunksize() == small_index2size(idx));
+                unlink_first_small_chunk(b, p, idx);
+                set_inuse_and_pinuse(p, small_index2size(idx));
+                mem = chunk2mem(p);
+                check_malloced_chunk(mem, nb);
+                goto postaction;
+            }
+
+            else if (nb > _dvsize)
+            {
+                if (smallbits != 0)
+                {
+                    // Use chunk in next nonempty smallbin
+                    mchunkptr b, p, r;
+                    size_t rsize;
+                    binmap_t leftbits = (smallbits << idx) & left_bits(malloc_state::idx2bit(idx));
+                    binmap_t leastbit = least_bit(leftbits);
+                    bindex_t i = compute_bit2idx(leastbit);
+                    b = smallbin_at(i);
+                    p = b->_fd;
+                    assert(p->chunksize() == small_index2size(i));
+                    unlink_first_small_chunk(b, p, i);
+                    rsize = small_index2size(i) - nb;
+                    // Fit here cannot be remainderless if 4byte sizes
+                    if (sizeof(size_t) != 4 && rsize < MIN_CHUNK_SIZE)
+                        set_inuse_and_pinuse(p, small_index2size(i));
+                    else
+                    {
+                        set_size_and_pinuse_of_inuse_chunk(p, nb);
+                        r = (mchunkptr)p->chunk_plus_offset(nb);
+                        r->set_size_and_pinuse_of_free_chunk(rsize);
+                        replace_dv(r, rsize);
+                    }
+                    mem = chunk2mem(p);
+                    check_malloced_chunk(mem, nb);
+                    goto postaction;
+                }
+
+                else if (_treemap != 0 && (mem = tmalloc_small(nb)) != 0)
+                {
+                    check_malloced_chunk(mem, nb);
+                    goto postaction;
+                }
+            }
+        }
+        else if (bytes >= MAX_REQUEST)
+            nb = spp_max_size_t; // Too big to allocate. Force failure (in sys alloc)
+        else
+        {
+            nb = pad_request(bytes);
+            if (_treemap != 0 && (mem = tmalloc_large(nb)) != 0)
+            {
+                check_malloced_chunk(mem, nb);
+                goto postaction;
+            }
+        }
+
+        if (nb <= _dvsize)
+        {
+            size_t rsize = _dvsize - nb;
+            mchunkptr p = _dv;
+            if (rsize >= MIN_CHUNK_SIZE)
+            {
+                // split dv
+                mchunkptr r = _dv = (mchunkptr)p->chunk_plus_offset(nb);
+                _dvsize = rsize;
+                r->set_size_and_pinuse_of_free_chunk(rsize);
+                set_size_and_pinuse_of_inuse_chunk(p, nb);
+            }
+            else   // exhaust dv
+            {
+                size_t dvs = _dvsize;
+                _dvsize = 0;
+                _dv = 0;
+                set_inuse_and_pinuse(p, dvs);
+            }
+            mem = chunk2mem(p);
+            check_malloced_chunk(mem, nb);
+            goto postaction;
+        }
+
+        else if (nb < _topsize)
+        {
+            // Split top
+            size_t rsize = _topsize -= nb;
+            mchunkptr p = _top;
+            mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb);
+            r->_head = rsize | PINUSE_BIT;
+            set_size_and_pinuse_of_inuse_chunk(p, nb);
+            mem = chunk2mem(p);
+            check_top_chunk(_top);
+            check_malloced_chunk(mem, nb);
+            goto postaction;
+        }
+
+        mem = sys_alloc(nb);
+
+postaction:
+        return mem;
+    }
+
+    return 0;
+}
+
+/* ---------------------------- free --------------------------- */
+
+void malloc_state::_free(mchunkptr p)
+{
+    if (1)
+    {
+        check_inuse_chunk(p);
+        if (rtcheck(ok_address(p) && ok_inuse(p)))
+        {
+            size_t psize = p->chunksize();
+            mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize);
+            if (!p->pinuse())
+            {
+                size_t prevsize = p->_prev_foot;
+                if (p->is_mmapped())
+                {
+                    psize += prevsize + SPP_MMAP_FOOT_PAD;
+                    if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+                        _footprint -= psize;
+                    goto postaction;
+                }
+                else
+                {
+                    mchunkptr prev = (mchunkptr)p->chunk_minus_offset(prevsize);
+                    psize += prevsize;
+                    p = prev;
+                    if (rtcheck(ok_address(prev)))
+                    {
+                        // consolidate backward
+                        if (p != _dv)
+                            unlink_chunk(p, prevsize);
+                        else if ((next->_head & INUSE_BITS) == INUSE_BITS)
+                        {
+                            _dvsize = psize;
+                            p->set_free_with_pinuse(psize, next);
+                            goto postaction;
+                        }
+                    }
+                    else
+                        goto erroraction;
+                }
+            }
+
+            if (rtcheck(ok_next(p, next) && ok_pinuse(next)))
+            {
+                if (!next->cinuse())
+                {
+                    // consolidate forward
+                    if (next == _top)
+                    {
+                        size_t tsize = _topsize += psize;
+                        _top = p;
+                        p->_head = tsize | PINUSE_BIT;
+                        if (p == _dv)
+                        {
+                            _dv = 0;
+                            _dvsize = 0;
+                        }
+                        if (should_trim(tsize))
+                            sys_trim(0);
+                        goto postaction;
+                    }
+                    else if (next == _dv)
+                    {
+                        size_t dsize = _dvsize += psize;
+                        _dv = p;
+                        p->set_size_and_pinuse_of_free_chunk(dsize);
+                        goto postaction;
+                    }
+                    else
+                    {
+                        size_t nsize = next->chunksize();
+                        psize += nsize;
+                        unlink_chunk(next, nsize);
+                        p->set_size_and_pinuse_of_free_chunk(psize);
+                        if (p == _dv)
+                        {
+                            _dvsize = psize;
+                            goto postaction;
+                        }
+                    }
+                }
+                else
+                    p->set_free_with_pinuse(psize, next);
+
+                if (is_small(psize))
+                {
+                    insert_small_chunk(p, psize);
+                    check_free_chunk(p);
+                }
+                else
+                {
+                    tchunkptr tp = (tchunkptr)p;
+                    insert_large_chunk(tp, psize);
+                    check_free_chunk(p);
+                    if (--_release_checks == 0)
+                        release_unused_segments();
+                }
+                goto postaction;
+            }
+        }
+erroraction:
+        SPP_USAGE_ERROR_ACTION(this, p);
+postaction:
+        ;
+    }
+}
+
+/* ------------ Internal support for realloc, memalign, etc -------------- */
+
+// Try to realloc; only in-place unless can_move true
+mchunkptr malloc_state::try_realloc_chunk(mchunkptr p, size_t nb, int can_move)
+{
+    mchunkptr newp = 0;
+    size_t oldsize = p->chunksize();
+    mchunkptr next = (mchunkptr)p->chunk_plus_offset(oldsize);
+    if (rtcheck(ok_address(p) && ok_inuse(p) &&
+                ok_next(p, next) && ok_pinuse(next)))
+    {
+        if (p->is_mmapped())
+            newp = mmap_resize(p, nb, can_move);
+        else if (oldsize >= nb)
+        {
+            // already big enough
+            size_t rsize = oldsize - nb;
+            if (rsize >= MIN_CHUNK_SIZE)
+            {
+                // split off remainder
+                mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
+                set_inuse(p, nb);
+                set_inuse(r, rsize);
+                dispose_chunk(r, rsize);
+            }
+            newp = p;
+        }
+        else if (next == _top)
+        {
+            // extend into top
+            if (oldsize + _topsize > nb)
+            {
+                size_t newsize = oldsize + _topsize;
+                size_t newtopsize = newsize - nb;
+                mchunkptr newtop = (mchunkptr)p->chunk_plus_offset(nb);
+                set_inuse(p, nb);
+                newtop->_head = newtopsize | PINUSE_BIT;
+                _top = newtop;
+                _topsize = newtopsize;
+                newp = p;
+            }
+        }
+        else if (next == _dv)
+        {
+            // extend into dv
+            size_t dvs = _dvsize;
+            if (oldsize + dvs >= nb)
+            {
+                size_t dsize = oldsize + dvs - nb;
+                if (dsize >= MIN_CHUNK_SIZE)
+                {
+                    mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
+                    mchunkptr n = (mchunkptr)r->chunk_plus_offset(dsize);
+                    set_inuse(p, nb);
+                    r->set_size_and_pinuse_of_free_chunk(dsize);
+                    n->clear_pinuse();
+                    _dvsize = dsize;
+                    _dv = r;
+                }
+                else
+                {
+                    // exhaust dv
+                    size_t newsize = oldsize + dvs;
+                    set_inuse(p, newsize);
+                    _dvsize = 0;
+                    _dv = 0;
+                }
+                newp = p;
+            }
+        }
+        else if (!next->cinuse())
+        {
+            // extend into next free chunk
+            size_t nextsize = next->chunksize();
+            if (oldsize + nextsize >= nb)
+            {
+                size_t rsize = oldsize + nextsize - nb;
+                unlink_chunk(next, nextsize);
+                if (rsize < MIN_CHUNK_SIZE)
+                {
+                    size_t newsize = oldsize + nextsize;
+                    set_inuse(p, newsize);
+                }
+                else
+                {
+                    mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb);
+                    set_inuse(p, nb);
+                    set_inuse(r, rsize);
+                    dispose_chunk(r, rsize);
+                }
+                newp = p;
+            }
+        }
+    }
+    else
+        SPP_USAGE_ERROR_ACTION(m, chunk2mem(p));
+    return newp;
+}
+
+void* malloc_state::internal_memalign(size_t alignment, size_t bytes)
+{
+    void* mem = 0;
+    if (alignment < MIN_CHUNK_SIZE) // must be at least a minimum chunk size
+        alignment = MIN_CHUNK_SIZE;
+    if ((alignment & (alignment - 1)) != 0)
+    {
+        // Ensure a power of 2
+        size_t a = SPP_MALLOC_ALIGNMENT << 1;
+        while (a < alignment)
+            a <<= 1;
+        alignment = a;
+    }
+    if (bytes >= MAX_REQUEST - alignment)
+        SPP_MALLOC_FAILURE_ACTION;
+    else
+    {
+        size_t nb = request2size(bytes);
+        size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
+        mem = internal_malloc(req);
+        if (mem != 0)
+        {
+            mchunkptr p = mem2chunk(mem);
+            if ((((size_t)(mem)) & (alignment - 1)) != 0)
+            {
+                // misaligned
+                /*
+                  Find an aligned spot inside chunk.  Since we need to give
+                  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
+                  the first calculation places us at a spot with less than
+                  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
+                  We've allocated enough total room so that this is always
+                  possible.
+                */
+                char* br = (char*)mem2chunk((void *)(((size_t)((char*)mem + alignment - 1)) &
+                                                     -alignment));
+                char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE) ?
+                            br : br + alignment;
+                mchunkptr newp = (mchunkptr)pos;
+                size_t leadsize = pos - (char*)(p);
+                size_t newsize = p->chunksize() - leadsize;
+
+                if (p->is_mmapped())
+                {
+                    // For mmapped chunks, just adjust offset
+                    newp->_prev_foot = p->_prev_foot + leadsize;
+                    newp->_head = newsize;
+                }
+                else
+                {
+                    // Otherwise, give back leader, use the rest
+                    set_inuse(newp, newsize);
+                    set_inuse(p, leadsize);
+                    dispose_chunk(p, leadsize);
+                }
+                p = newp;
+            }
+
+            // Give back spare room at the end
+            if (!p->is_mmapped())
+            {
+                size_t size = p->chunksize();
+                if (size > nb + MIN_CHUNK_SIZE)
+                {
+                    size_t remainder_size = size - nb;
+                    mchunkptr remainder = (mchunkptr)p->chunk_plus_offset(nb);
+                    set_inuse(p, nb);
+                    set_inuse(remainder, remainder_size);
+                    dispose_chunk(remainder, remainder_size);
+                }
+            }
+
+            mem = chunk2mem(p);
+            assert(p->chunksize() >= nb);
+            assert(((size_t)mem & (alignment - 1)) == 0);
+            check_inuse_chunk(p);
+        }
+    }
+    return mem;
+}
+
+/*
+  Common support for independent_X routines, handling
+    all of the combinations that can result.
+  The opts arg has:
+    bit 0 set if all elements are same size (using sizes[0])
+    bit 1 set if elements should be zeroed
+*/
+void** malloc_state::ialloc(size_t n_elements, size_t* sizes, int opts,
+                            void* chunks[])
+{
+
+    size_t    element_size;   // chunksize of each element, if all same
+    size_t    contents_size;  // total size of elements
+    size_t    array_size;     // request size of pointer array
+    void*     mem;            // malloced aggregate space
+    mchunkptr p;              // corresponding chunk
+    size_t    remainder_size; // remaining bytes while splitting
+    void**    marray;         // either "chunks" or malloced ptr array
+    mchunkptr array_chunk;    // chunk for malloced ptr array
+    flag_t    was_enabled;    // to disable mmap
+    size_t    size;
+    size_t    i;
+
+    mparams.ensure_initialization();
+    // compute array length, if needed
+    if (chunks != 0)
+    {
+        if (n_elements == 0)
+            return chunks; // nothing to do
+        marray = chunks;
+        array_size = 0;
+    }
+    else
+    {
+        // if empty req, must still return chunk representing empty array
+        if (n_elements == 0)
+            return (void**)internal_malloc(0);
+        marray = 0;
+        array_size = request2size(n_elements * (sizeof(void*)));
+    }
+
+    // compute total element size
+    if (opts & 0x1)
+    {
+        // all-same-size
+        element_size = request2size(*sizes);
+        contents_size = n_elements * element_size;
+    }
+    else
+    {
+        // add up all the sizes
+        element_size = 0;
+        contents_size = 0;
+        for (i = 0; i != n_elements; ++i)
+            contents_size += request2size(sizes[i]);
+    }
+
+    size = contents_size + array_size;
+
+    /*
+      Allocate the aggregate chunk.  First disable direct-mmapping so
+      malloc won't use it, since we would not be able to later
+      free/realloc space internal to a segregated mmap region.
+    */
+    was_enabled = use_mmap();
+    disable_mmap();
+    mem = internal_malloc(size - CHUNK_OVERHEAD);
+    if (was_enabled)
+        enable_mmap();
+    if (mem == 0)
+        return 0;
+
+    p = mem2chunk(mem);
+    remainder_size = p->chunksize();
+
+    assert(!p->is_mmapped());
+
+    if (opts & 0x2)
+    {
+        // optionally clear the elements
+        memset((size_t*)mem, 0, remainder_size - sizeof(size_t) - array_size);
+    }
+
+    // If not provided, allocate the pointer array as final part of chunk
+    if (marray == 0)
+    {
+        size_t  array_chunk_size;
+        array_chunk = (mchunkptr)p->chunk_plus_offset(contents_size);
+        array_chunk_size = remainder_size - contents_size;
+        marray = (void**)(chunk2mem(array_chunk));
+        set_size_and_pinuse_of_inuse_chunk(array_chunk, array_chunk_size);
+        remainder_size = contents_size;
+    }
+
+    // split out elements
+    for (i = 0; ; ++i)
+    {
+        marray[i] = chunk2mem(p);
+        if (i != n_elements - 1)
+        {
+            if (element_size != 0)
+                size = element_size;
+            else
+                size = request2size(sizes[i]);
+            remainder_size -= size;
+            set_size_and_pinuse_of_inuse_chunk(p, size);
+            p = (mchunkptr)p->chunk_plus_offset(size);
+        }
+        else
+        {
+            // the final element absorbs any overallocation slop
+            set_size_and_pinuse_of_inuse_chunk(p, remainder_size);
+            break;
+        }
+    }
+
+#if SPP_DEBUG
+    if (marray != chunks)
+    {
+        // final element must have exactly exhausted chunk
+        if (element_size != 0)
+            assert(remainder_size == element_size);
+        else
+            assert(remainder_size == request2size(sizes[i]));
+        check_inuse_chunk(mem2chunk(marray));
+    }
+    for (i = 0; i != n_elements; ++i)
+        check_inuse_chunk(mem2chunk(marray[i]));
+
+#endif
+
+    return marray;
+}
+
+/* Try to free all pointers in the given array.
+   Note: this could be made faster, by delaying consolidation,
+   at the price of disabling some user integrity checks, We
+   still optimize some consolidations by combining adjacent
+   chunks before freeing, which will occur often if allocated
+   with ialloc or the array is sorted.
+*/
+size_t malloc_state::internal_bulk_free(void* array[], size_t nelem)
+{
+    size_t unfreed = 0;
+    if (1)
+    {
+        void** a;
+        void** fence = &(array[nelem]);
+        for (a = array; a != fence; ++a)
+        {
+            void* mem = *a;
+            if (mem != 0)
+            {
+                mchunkptr p = mem2chunk(mem);
+                size_t psize = p->chunksize();
+#if SPP_FOOTERS
+                if (get_mstate_for(p) != m)
+                {
+                    ++unfreed;
+                    continue;
+                }
+#endif
+                check_inuse_chunk(p);
+                *a = 0;
+                if (rtcheck(ok_address(p) && ok_inuse(p)))
+                {
+                    void ** b = a + 1; // try to merge with next chunk
+                    mchunkptr next = (mchunkptr)p->next_chunk();
+                    if (b != fence && *b == chunk2mem(next))
+                    {
+                        size_t newsize = next->chunksize() + psize;
+                        set_inuse(p, newsize);
+                        *b = chunk2mem(p);
+                    }
+                    else
+                        dispose_chunk(p, psize);
+                }
+                else
+                {
+                    SPP_ABORT;
+                    break;
+                }
+            }
+        }
+        if (should_trim(_topsize))
+            sys_trim(0);
+    }
+    return unfreed;
+}
+
+void malloc_state::init(char* tbase, size_t tsize)
+{
+    _seg._base = _least_addr = tbase;
+    _seg._size = _footprint = _max_footprint = tsize;
+    _magic    = mparams._magic;
+    _release_checks = SPP_MAX_RELEASE_CHECK_RATE;
+    _mflags   = mparams._default_mflags;
+    _extp     = 0;
+    _exts     = 0;
+    disable_contiguous();
+    init_bins();
+    mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk();
+    init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size());
+    check_top_chunk(_top);
+}
+
+/* Traversal */
+#if SPP_MALLOC_INSPECT_ALL
+void malloc_state::internal_inspect_all(void(*handler)(void *start, void *end,
+                                        size_t used_bytes,
+                                        void* callback_arg),
+                                        void* arg)
+{
+    if (is_initialized())
+    {
+        mchunkptr top = top;
+        msegmentptr s;
+        for (s = &seg; s != 0; s = s->next)
+        {
+            mchunkptr q = align_as_chunk(s->base);
+            while (segment_holds(s, q) && q->head != FENCEPOST_HEAD)
+            {
+                mchunkptr next = (mchunkptr)q->next_chunk();
+                size_t sz = q->chunksize();
+                size_t used;
+                void* start;
+                if (q->is_inuse())
+                {
+                    used = sz - CHUNK_OVERHEAD; // must not be mmapped
+                    start = chunk2mem(q);
+                }
+                else
+                {
+                    used = 0;
+                    if (is_small(sz))
+                    {
+                        // offset by possible bookkeeping
+                        start = (void*)((char*)q + sizeof(struct malloc_chunk));
+                    }
+                    else
+                        start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
+                }
+                if (start < (void*)next)  // skip if all space is bookkeeping
+                    handler(start, next, used, arg);
+                if (q == top)
+                    break;
+                q = next;
+            }
+        }
+    }
+}
+#endif // SPP_MALLOC_INSPECT_ALL
+
+
+
+/* ----------------------------- user mspaces ---------------------------- */
+
+static mstate init_user_mstate(char* tbase, size_t tsize)
+{
+    size_t msize = pad_request(sizeof(malloc_state));
+    mchunkptr msp = align_as_chunk(tbase);
+    mstate m = (mstate)(chunk2mem(msp));
+    memset(m, 0, msize);
+    msp->_head = (msize | INUSE_BITS);
+    m->init(tbase, tsize);
+    return m;
+}
+
+SPP_API mspace create_mspace(size_t capacity, int locked)
+{
+    mstate m = 0;
+    size_t msize;
+    mparams.ensure_initialization();
+    msize = pad_request(sizeof(malloc_state));
+    if (capacity < (size_t) - (msize + top_foot_size() + mparams._page_size))
+    {
+        size_t rs = ((capacity == 0) ? mparams._granularity :
+                     (capacity + top_foot_size() + msize));
+        size_t tsize = mparams.granularity_align(rs);
+        char* tbase = (char*)(SPP_CALL_MMAP(tsize));
+        if (tbase != cmfail)
+        {
+            m = init_user_mstate(tbase, tsize);
+            m->_seg._sflags = USE_MMAP_BIT;
+            m->set_lock(locked);
+        }
+    }
+    return (mspace)m;
+}
+
+SPP_API size_t destroy_mspace(mspace msp)
+{
+    size_t freed = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+    {
+        msegmentptr sp = &ms->_seg;
+        while (sp != 0)
+        {
+            char* base = sp->_base;
+            size_t size = sp->_size;
+            flag_t flag = sp->_sflags;
+            (void)base; // placate people compiling -Wunused-variable
+            sp = sp->_next;
+            if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
+                SPP_CALL_MUNMAP(base, size) == 0)
+                freed += size;
+        }
+    }
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return freed;
+}
+
+/* ----------------------------  mspace versions of malloc/calloc/free routines -------------------- */
+SPP_API void* mspace_malloc(mspace msp, size_t bytes)
+{
+    mstate ms = (mstate)msp;
+    if (!ms->ok_magic())
+    {
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+        return 0;
+    }
+    return ms->_malloc(bytes);
+}
+
+SPP_API void mspace_free(mspace msp, void* mem)
+{
+    if (mem != 0)
+    {
+        mchunkptr p  = mem2chunk(mem);
+#if SPP_FOOTERS
+        mstate fm = get_mstate_for(p);
+        (void)msp; // placate people compiling -Wunused
+#else
+        mstate fm = (mstate)msp;
+#endif
+        if (!fm->ok_magic())
+        {
+            SPP_USAGE_ERROR_ACTION(fm, p);
+            return;
+        }
+        fm->_free(p);
+    }
+}
+
+SPP_API void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
+{
+    void* mem;
+    size_t req = 0;
+    mstate ms = (mstate)msp;
+    if (!ms->ok_magic())
+    {
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+        return 0;
+    }
+    if (n_elements != 0)
+    {
+        req = n_elements * elem_size;
+        if (((n_elements | elem_size) & ~(size_t)0xffff) &&
+                (req / n_elements != elem_size))
+            req = spp_max_size_t; // force downstream failure on overflow
+    }
+    mem = ms->internal_malloc(req);
+    if (mem != 0 && mem2chunk(mem)->calloc_must_clear())
+        memset(mem, 0, req);
+    return mem;
+}
+
+SPP_API void* mspace_realloc(mspace msp, void* oldmem, size_t bytes)
+{
+    void* mem = 0;
+    if (oldmem == 0)
+        mem = mspace_malloc(msp, bytes);
+    else if (bytes >= MAX_REQUEST)
+        SPP_MALLOC_FAILURE_ACTION;
+#ifdef REALLOC_ZERO_BYTES_FREES
+    else if (bytes == 0)
+        mspace_free(msp, oldmem);
+#endif
+    else
+    {
+        size_t nb = request2size(bytes);
+        mchunkptr oldp = mem2chunk(oldmem);
+#if ! SPP_FOOTERS
+        mstate m = (mstate)msp;
+#else
+        mstate m = get_mstate_for(oldp);
+        if (!m->ok_magic())
+        {
+            SPP_USAGE_ERROR_ACTION(m, oldmem);
+            return 0;
+        }
+#endif
+        if (1)
+        {
+            mchunkptr newp = m->try_realloc_chunk(oldp, nb, 1);
+            if (newp != 0)
+            {
+                m->check_inuse_chunk(newp);
+                mem = chunk2mem(newp);
+            }
+            else
+            {
+                mem = mspace_malloc(m, bytes);
+                if (mem != 0)
+                {
+                    size_t oc = oldp->chunksize() - oldp->overhead_for();
+                    memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
+                    mspace_free(m, oldmem);
+                }
+            }
+        }
+    }
+    return mem;
+}
+
+#if 0
+
+SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked)
+{
+    mstate m = 0;
+    size_t msize;
+    mparams.ensure_initialization();
+    msize = pad_request(sizeof(malloc_state));
+    if (capacity > msize + top_foot_size() &&
+        capacity < (size_t) - (msize + top_foot_size() + mparams._page_size))
+    {
+        m = init_user_mstate((char*)base, capacity);
+        m->_seg._sflags = EXTERN_BIT;
+        m->set_lock(locked);
+    }
+    return (mspace)m;
+}
+
+SPP_API int mspace_track_large_chunks(mspace msp, int enable)
+{
+    int ret = 0;
+    mstate ms = (mstate)msp;
+    if (1)
+    {
+        if (!ms->use_mmap())
+            ret = 1;
+        if (!enable)
+            ms->enable_mmap();
+        else
+            ms->disable_mmap();
+    }
+    return ret;
+}
+
+SPP_API void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes)
+{
+    void* mem = 0;
+    if (oldmem != 0)
+    {
+        if (bytes >= MAX_REQUEST)
+            SPP_MALLOC_FAILURE_ACTION;
+        else
+        {
+            size_t nb = request2size(bytes);
+            mchunkptr oldp = mem2chunk(oldmem);
+#if ! SPP_FOOTERS
+            mstate m = (mstate)msp;
+#else
+            mstate m = get_mstate_for(oldp);
+            (void)msp; // placate people compiling -Wunused
+            if (!m->ok_magic())
+            {
+                SPP_USAGE_ERROR_ACTION(m, oldmem);
+                return 0;
+            }
+#endif
+            if (1)
+            {
+                mchunkptr newp = m->try_realloc_chunk(oldp, nb, 0);
+                if (newp == oldp)
+                {
+                    m->check_inuse_chunk(newp);
+                    mem = oldmem;
+                }
+            }
+        }
+    }
+    return mem;
+}
+
+SPP_API void* mspace_memalign(mspace msp, size_t alignment, size_t bytes)
+{
+    mstate ms = (mstate)msp;
+    if (!ms->ok_magic())
+    {
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+        return 0;
+    }
+    if (alignment <= SPP_MALLOC_ALIGNMENT)
+        return mspace_malloc(msp, bytes);
+    return ms->internal_memalign(alignment, bytes);
+}
+
+SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                        size_t elem_size, void* chunks[])
+{
+    size_t sz = elem_size; // serves as 1-element array
+    mstate ms = (mstate)msp;
+    if (!ms->ok_magic())
+    {
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+        return 0;
+    }
+    return ms->ialloc(n_elements, &sz, 3, chunks);
+}
+
+SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                          size_t sizes[], void* chunks[])
+{
+    mstate ms = (mstate)msp;
+    if (!ms->ok_magic())
+    {
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+        return 0;
+    }
+    return ms->ialloc(n_elements, sizes, 0, chunks);
+}
+
+#endif
+
+SPP_API size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem)
+{
+    return ((mstate)msp)->internal_bulk_free(array, nelem);
+}
+
+#if SPP_MALLOC_INSPECT_ALL
+SPP_API void mspace_inspect_all(mspace msp,
+                                void(*handler)(void *start,
+                                               void *end,
+                                               size_t used_bytes,
+                                               void* callback_arg),
+                                void* arg)
+{
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+        internal_inspect_all(ms, handler, arg);
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+}
+#endif
+
+SPP_API int mspace_trim(mspace msp, size_t pad)
+{
+    int result = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+        result = ms->sys_trim(pad);
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return result;
+}
+
+SPP_API size_t mspace_footprint(mspace msp)
+{
+    size_t result = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+        result = ms->_footprint;
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return result;
+}
+
+SPP_API size_t mspace_max_footprint(mspace msp)
+{
+    size_t result = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+        result = ms->_max_footprint;
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return result;
+}
+
+SPP_API size_t mspace_footprint_limit(mspace msp)
+{
+    size_t result = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+    {
+        size_t maf = ms->_footprint_limit;
+        result = (maf == 0) ? spp_max_size_t : maf;
+    }
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return result;
+}
+
+SPP_API size_t mspace_set_footprint_limit(mspace msp, size_t bytes)
+{
+    size_t result = 0;
+    mstate ms = (mstate)msp;
+    if (ms->ok_magic())
+    {
+        if (bytes == 0)
+            result = mparams.granularity_align(1); // Use minimal size
+        if (bytes == spp_max_size_t)
+            result = 0;                    // disable
+        else
+            result = mparams.granularity_align(bytes);
+        ms->_footprint_limit = result;
+    }
+    else
+        SPP_USAGE_ERROR_ACTION(ms, ms);
+    return result;
+}
+
+SPP_API size_t mspace_usable_size(const void* mem)
+{
+    if (mem != 0)
+    {
+        mchunkptr p = mem2chunk(mem);
+        if (p->is_inuse())
+            return p->chunksize() - p->overhead_for();
+    }
+    return 0;
+}
+
+SPP_API int mspace_mallopt(int param_number, int value)
+{
+    return mparams.change(param_number, value);
+}
+
+} // spp_ namespace
+
+
+#endif // SPP_EXCLUDE_IMPLEMENTATION
+
+#endif // spp_dlalloc__h_
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_memory.h b/resources/3rdparty/sparsepp/sparsepp/spp_memory.h
new file mode 100755
index 000000000..f208e73cb
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_memory.h
@@ -0,0 +1,121 @@
+#if !defined(spp_memory_h_guard)
+#define spp_memory_h_guard
+
+#include <cstdint>
+#include <cstring>
+#include <cstdlib>
+
+#if defined(_WIN32) || defined( __CYGWIN__)
+    #define SPP_WIN
+#endif
+
+#ifdef SPP_WIN
+    #include <windows.h>
+    #include <Psapi.h>
+    #undef min
+    #undef max
+#else
+    #include <sys/types.h>
+    #include <sys/sysinfo.h>
+#endif
+
+namespace spp
+{
+    uint64_t GetSystemMemory()
+    {
+#ifdef SPP_WIN
+        MEMORYSTATUSEX memInfo;
+        memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+        GlobalMemoryStatusEx(&memInfo);
+        return static_cast<uint64_t>(memInfo.ullTotalPageFile);
+#else
+        struct sysinfo memInfo;
+        sysinfo (&memInfo);
+        auto totalVirtualMem = memInfo.totalram;
+
+        totalVirtualMem += memInfo.totalswap;
+        totalVirtualMem *= memInfo.mem_unit;
+        return static_cast<uint64_t>(totalVirtualMem);
+#endif
+    }
+
+    uint64_t GetTotalMemoryUsed()
+    {
+#ifdef SPP_WIN
+        MEMORYSTATUSEX memInfo;
+        memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+        GlobalMemoryStatusEx(&memInfo);
+        return static_cast<uint64_t>(memInfo.ullTotalPageFile - memInfo.ullAvailPageFile);
+#else
+        struct sysinfo memInfo;
+        sysinfo(&memInfo);
+        auto virtualMemUsed = memInfo.totalram - memInfo.freeram;
+
+        virtualMemUsed += memInfo.totalswap - memInfo.freeswap;
+        virtualMemUsed *= memInfo.mem_unit;
+
+        return static_cast<uint64_t>(virtualMemUsed);
+#endif
+    }
+
+    uint64_t GetProcessMemoryUsed()
+    {
+#ifdef SPP_WIN
+        PROCESS_MEMORY_COUNTERS_EX pmc;
+        GetProcessMemoryInfo(GetCurrentProcess(), reinterpret_cast<PPROCESS_MEMORY_COUNTERS>(&pmc), sizeof(pmc));
+        return static_cast<uint64_t>(pmc.PrivateUsage);
+#else
+        auto parseLine = 
+            [](char* line)->int
+            {
+                auto i = strlen(line);
+				
+                while(*line < '0' || *line > '9') 
+                {
+                    line++;
+                }
+
+                line[i-3] = '\0';
+                i = atoi(line);
+                return i;
+            };
+
+        auto file = fopen("/proc/self/status", "r");
+        auto result = -1;
+        char line[128];
+
+        while(fgets(line, 128, file) != nullptr)
+        {
+            if(strncmp(line, "VmSize:", 7) == 0)
+            {
+                result = parseLine(line);
+                break;
+            }
+        }
+
+        fclose(file);
+        return static_cast<uint64_t>(result) * 1024;
+#endif
+    }
+
+    uint64_t GetPhysicalMemory()
+    {
+#ifdef SPP_WIN
+        MEMORYSTATUSEX memInfo;
+        memInfo.dwLength = sizeof(MEMORYSTATUSEX);
+        GlobalMemoryStatusEx(&memInfo);
+        return static_cast<uint64_t>(memInfo.ullTotalPhys);
+#else
+        struct sysinfo memInfo;
+        sysinfo(&memInfo);
+
+        auto totalPhysMem = memInfo.totalram;
+
+        totalPhysMem *= memInfo.mem_unit;
+        return static_cast<uint64_t>(totalPhysMem);
+#endif
+    }
+
+}
+
+#endif // spp_memory_h_guard
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h b/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h
new file mode 100755
index 000000000..28c4588e4
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h
@@ -0,0 +1,76 @@
+#if !defined(spp_smartptr_h_guard)
+#define spp_smartptr_h_guard
+
+
+/* -----------------------------------------------------------------------------------------------
+ * quick version of intrusive_ptr
+ * -----------------------------------------------------------------------------------------------
+ */
+
+#include <cassert>
+#include <sparsepp/spp_config.h>
+
+// ------------------------------------------------------------------------
+class spp_rc
+{
+public:
+    spp_rc() : _cnt(0) {}
+    spp_rc(const spp_rc &) : _cnt(0) {}
+    void increment() const { ++_cnt; }
+    void decrement() const { assert(_cnt); if (--_cnt == 0) delete this; }
+    unsigned count() const { return _cnt; }
+
+protected:
+    virtual ~spp_rc() {}
+
+private:
+    mutable unsigned _cnt;
+};
+
+// ------------------------------------------------------------------------
+template <class T>
+class spp_sptr
+{
+public:
+    spp_sptr() : _p(0) {}
+    spp_sptr(T *p) : _p(p)                  { if (_p) _p->increment(); }
+    spp_sptr(const spp_sptr &o) : _p(o._p)  { if (_p) _p->increment(); }
+#ifndef SPP_NO_CXX11_RVALUE_REFERENCES 
+    spp_sptr(spp_sptr &&o) : _p(o._p)       { o._p = (T *)0; }
+    spp_sptr& operator=(spp_sptr &&o)
+    {
+        if (_p) _p->decrement(); 
+        _p = o._p;
+        o._p = (T *)0; 
+    }
+#endif    
+    ~spp_sptr()                             { if (_p) _p->decrement(); }
+    spp_sptr& operator=(const spp_sptr &o)  { reset(o._p); return *this; }
+    T* get() const                          { return _p; }
+    void swap(spp_sptr &o)                  { T *tmp = _p; _p = o._p; o._p = tmp; }
+    void reset(const T *p = 0)             
+    { 
+        if (p == _p) 
+            return; 
+        if (_p) _p->decrement(); 
+        _p = (T *)p; 
+        if (_p) _p->increment();
+    }
+    T*   operator->() const { return const_cast<T *>(_p); }
+    bool operator!()  const { return _p == 0; }
+
+private:
+    T *_p;
+};    
+
+// ------------------------------------------------------------------------
+namespace std
+{
+    template <class T>
+    inline void swap(spp_sptr<T> &a, spp_sptr<T> &b)
+    {
+        a.swap(b);
+    }
+}
+
+#endif // spp_smartptr_h_guard
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h b/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h
new file mode 100755
index 000000000..500d3d35b
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h
@@ -0,0 +1,16 @@
+#if !defined(spp_stdint_h_guard)
+#define spp_stdint_h_guard
+
+#include <sparsepp/spp_config.h>
+
+#if defined(SPP_HAS_CSTDINT) && (__cplusplus >= 201103)
+    #include <cstdint>
+#else
+    #if defined(__FreeBSD__) || defined(__IBMCPP__) || defined(_AIX)
+        #include <inttypes.h>
+    #else
+        #include <stdint.h>
+    #endif
+#endif
+
+#endif // spp_stdint_h_guard
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_timer.h b/resources/3rdparty/sparsepp/sparsepp/spp_timer.h
new file mode 100755
index 000000000..48180f4d0
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_timer.h
@@ -0,0 +1,58 @@
+/**
+   Copyright (c) 2016 Mariano Gonzalez
+
+   Permission is hereby granted, free of charge, to any person obtaining a copy
+   of this software and associated documentation files (the "Software"), to deal
+   in the Software without restriction, including without limitation the rights
+   to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+   copies of the Software, and to permit persons to whom the Software is
+   furnished to do so, subject to the following conditions:
+
+   The above copyright notice and this permission notice shall be included in all
+   copies or substantial portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+   IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+   AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+   OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+   SOFTWARE.
+*/
+
+#ifndef spp_timer_h_guard
+#define spp_timer_h_guard
+
+#include <chrono>
+
+namespace spp
+{
+    template<typename time_unit = std::milli>
+    class Timer 
+    {
+    public:
+        Timer()                 { reset(); }
+        void reset()            { _start = _snap = clock::now();  }
+        void snap()             { _snap = clock::now();  }
+
+        float get_total() const { return get_diff<float>(_start, clock::now()); }
+        float get_delta() const { return get_diff<float>(_snap, clock::now());  }
+        
+    private:
+        using clock = std::chrono::high_resolution_clock;
+        using point = std::chrono::time_point<clock>;
+
+        template<typename T>
+        static T get_diff(const point& start, const point& end) 
+        {
+            using duration_t = std::chrono::duration<T, time_unit>;
+
+            return std::chrono::duration_cast<duration_t>(end - start).count();
+        }
+
+        point _start;
+        point _snap;
+    };
+}
+
+#endif // spp_timer_h_guard
diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_traits.h b/resources/3rdparty/sparsepp/sparsepp/spp_traits.h
new file mode 100755
index 000000000..bd105093f
--- /dev/null
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_traits.h
@@ -0,0 +1,122 @@
+#if !defined(spp_traits_h_guard)
+#define spp_traits_h_guard
+
+#include <sparsepp/spp_config.h>
+
+template<int S, int H> class HashObject; // for Google's benchmark, not in spp namespace!
+
+namespace spp_
+{
+
+// ---------------------------------------------------------------------------
+//                       type_traits we need
+// ---------------------------------------------------------------------------
+template<class T, T v>
+struct integral_constant { static const T value = v; };
+
+template <class T, T v> const T integral_constant<T, v>::value;
+
+typedef integral_constant<bool, true>  true_type;
+typedef integral_constant<bool, false> false_type;
+
+typedef integral_constant<int, 0>      zero_type;
+typedef integral_constant<int, 1>      one_type;
+typedef integral_constant<int, 2>      two_type;
+typedef integral_constant<int, 3>      three_type;
+
+template<typename T, typename U> struct is_same : public false_type { };
+template<typename T> struct is_same<T, T> : public true_type { };
+
+template<typename T> struct remove_const { typedef T type; };
+template<typename T> struct remove_const<T const> { typedef T type; };
+
+template<typename T> struct remove_volatile { typedef T type; };
+template<typename T> struct remove_volatile<T volatile> { typedef T type; };
+
+template<typename T> struct remove_cv 
+{
+    typedef typename remove_const<typename remove_volatile<T>::type>::type type;
+};
+
+// ---------------- is_integral ----------------------------------------
+template <class T> struct is_integral;
+template <class T> struct is_integral         : false_type { };
+template<> struct is_integral<bool>           : true_type { };
+template<> struct is_integral<char>           : true_type { };
+template<> struct is_integral<unsigned char>  : true_type { };
+template<> struct is_integral<signed char>    : true_type { };
+template<> struct is_integral<short>          : true_type { };
+template<> struct is_integral<unsigned short> : true_type { };
+template<> struct is_integral<int>            : true_type { };
+template<> struct is_integral<unsigned int>   : true_type { };
+template<> struct is_integral<long>           : true_type { };
+template<> struct is_integral<unsigned long>  : true_type { };
+#ifdef SPP_HAS_LONG_LONG
+    template<> struct is_integral<long long>  : true_type { };
+    template<> struct is_integral<unsigned long long> : true_type { };
+#endif
+template <class T> struct is_integral<const T>          : is_integral<T> { };
+template <class T> struct is_integral<volatile T>       : is_integral<T> { };
+template <class T> struct is_integral<const volatile T> : is_integral<T> { };
+
+// ---------------- is_floating_point ----------------------------------------
+template <class T> struct is_floating_point;
+template <class T> struct is_floating_point      : false_type { };
+template<> struct is_floating_point<float>       : true_type { };
+template<> struct is_floating_point<double>      : true_type { };
+template<> struct is_floating_point<long double> : true_type { };
+template <class T> struct is_floating_point<const T> :        is_floating_point<T> { };
+template <class T> struct is_floating_point<volatile T>       : is_floating_point<T> { };
+template <class T> struct is_floating_point<const volatile T> : is_floating_point<T> { };
+
+//  ---------------- is_pointer ----------------------------------------
+template <class T> struct is_pointer;
+template <class T> struct is_pointer     : false_type { };
+template <class T> struct is_pointer<T*> : true_type { };
+template <class T> struct is_pointer<const T>          : is_pointer<T> { };
+template <class T> struct is_pointer<volatile T>       : is_pointer<T> { };
+template <class T> struct is_pointer<const volatile T> : is_pointer<T> { };
+
+//  ---------------- is_reference ----------------------------------------
+template <class T> struct is_reference;
+template<typename T> struct is_reference     : false_type {};
+template<typename T> struct is_reference<T&> : true_type {};
+
+//  ---------------- is_relocatable ----------------------------------------
+// relocatable values can be moved around in memory using memcpy and remain
+// correct. Most types are relocatable, an example of a type who is not would
+// be a struct which contains a pointer to a buffer inside itself - this is the
+// case for std::string in gcc 5.
+// ------------------------------------------------------------------------
+template <class T> struct is_relocatable;
+template <class T> struct is_relocatable :
+     integral_constant<bool, (is_integral<T>::value || is_floating_point<T>::value)>
+{ };
+
+template<int S, int H> struct is_relocatable<HashObject<S, H> > : true_type { };
+
+template <class T> struct is_relocatable<const T>          : is_relocatable<T> { };
+template <class T> struct is_relocatable<volatile T>       : is_relocatable<T> { };
+template <class T> struct is_relocatable<const volatile T> : is_relocatable<T> { };
+template <class A, int N> struct is_relocatable<A[N]>      : is_relocatable<A> { };
+template <class T, class U> struct is_relocatable<std::pair<T, U> > :
+     integral_constant<bool, (is_relocatable<T>::value && is_relocatable<U>::value)>
+{ };
+
+// A template helper used to select A or B based on a condition.
+// ------------------------------------------------------------
+template<bool cond, typename A, typename B>
+struct if_
+{
+    typedef A type;
+};
+
+template<typename A, typename B>
+struct if_<false, A, B> 
+{
+    typedef B type;
+};
+
+}  // spp_ namespace
+
+#endif // spp_traits_h_guard
diff --git a/resources/3rdparty/sparsepp/spp_utils.h b/resources/3rdparty/sparsepp/sparsepp/spp_utils.h
old mode 100644
new mode 100755
similarity index 67%
rename from resources/3rdparty/sparsepp/spp_utils.h
rename to resources/3rdparty/sparsepp/sparsepp/spp_utils.h
index 96a8f5bf3..743ab7bca
--- a/resources/3rdparty/sparsepp/spp_utils.h
+++ b/resources/3rdparty/sparsepp/sparsepp/spp_utils.h
@@ -1,7 +1,7 @@
 // ----------------------------------------------------------------------
 // Copyright (c) 2016, Steven Gregory Popovitch - greg7mdp@gmail.com
 // All rights reserved.
-// 
+//
 // Code derived derived from Boost libraries.
 // Boost software licence reproduced below.
 //
@@ -15,8 +15,8 @@
 // copyright notice, this list of conditions and the following disclaimer
 // in the documentation and/or other materials provided with the
 // distribution.
-//     * The name of Steven Gregory Popovitch may not be used to 
-// endorse or promote products derived from this software without 
+//     * The name of Steven Gregory Popovitch may not be used to
+// endorse or promote products derived from this software without
 // specific prior written permission.
 //
 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@@ -34,21 +34,21 @@
 
 // ---------------------------------------------------------------------------
 // Boost Software License - Version 1.0 - August 17th, 2003
-// 
+//
 // Permission is hereby granted, free of charge, to any person or organization
 // obtaining a copy of the software and accompanying documentation covered by
 // this license (the "Software") to use, reproduce, display, distribute,
 // execute, and transmit the Software, and to prepare derivative works of the
 // Software, and to permit third-parties to whom the Software is furnished to
 // do so, all subject to the following:
-// 
+//
 // The copyright notices in the Software and this entire statement, including
 // the above license grant, this restriction and the following disclaimer,
 // must be included in all copies of the Software, in whole or in part, and
 // all derivative works of the Software, unless such copies or derivative
 // works are solely in the form of machine-executable object code generated by
 // a source language processor.
-// 
+//
 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 // FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
@@ -63,16 +63,12 @@
 //                  ----------------------------
 //
 //    Implements spp::spp_hash() and spp::hash_combine()
-//
-//    The exact same code is duplicated in sparsepp.h.
-//
-//    WARNING: Any change here has to be duplicated in sparsepp.h.
 //  ----------------------------------------------------------------------
 
 #if !defined(spp_utils_h_guard_)
 #define spp_utils_h_guard_
 
-#if defined(_MSC_VER) 
+#if defined(_MSC_VER)
     #if (_MSC_VER >= 1600 )                      // vs2010 (1900 is vs2015)
         #include <functional>
         #define SPP_HASH_CLASS std::hash
@@ -83,6 +79,18 @@
     #if (_MSC_FULL_VER < 190021730)
         #define SPP_NO_CXX11_NOEXCEPT
     #endif
+#elif defined __clang__
+    #if __has_feature(cxx_noexcept)  // what to use here?
+       #include <functional>
+       #define SPP_HASH_CLASS  std::hash
+    #else
+       #include <tr1/unordered_map>
+       #define SPP_HASH_CLASS std::tr1::hash
+    #endif
+
+    #if !__has_feature(cxx_noexcept)
+        #define SPP_NO_CXX11_NOEXCEPT
+    #endif
 #elif defined(__GNUC__)
     #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L)
         #include <functional>
@@ -96,13 +104,6 @@
         #define SPP_HASH_CLASS std::tr1::hash
         #define SPP_NO_CXX11_NOEXCEPT
     #endif
-#elif defined __clang__
-    #include <functional>
-    #define SPP_HASH_CLASS  std::hash
-
-    #if !__has_feature(cxx_noexcept)
-        #define SPP_NO_CXX11_NOEXCEPT
-    #endif
 #else
     #include <functional>
     #define SPP_HASH_CLASS  std::hash
@@ -122,17 +123,20 @@
 
 #define SPP_INLINE
 
-#ifndef SPP_NAMESPACE
-    #define SPP_NAMESPACE spp
+#ifndef spp_
+    #define spp_ spp
 #endif
 
-namespace SPP_NAMESPACE
+namespace spp_
 {
 
+template <class T>  T spp_min(T a, T b) { return a < b  ? a : b; }
+template <class T>  T spp_max(T a, T b) { return a >= b ? a : b; }
+
 template <class T>
 struct spp_hash
 {
-    SPP_INLINE size_t operator()(const T &__v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(const T &__v) const SPP_NOEXCEPT
     {
         SPP_HASH_CLASS<T> hasher;
         return hasher(__v);
@@ -142,10 +146,10 @@ struct spp_hash
 template <class T>
 struct spp_hash<T *>
 {
-    static size_t spp_log2 (size_t val) SPP_NOEXCEPT 
+    static size_t spp_log2 (size_t val) SPP_NOEXCEPT
     {
         size_t res = 0;
-        while (val > 1) 
+        while (val > 1)
         {
             val >>= 1;
             res++;
@@ -153,15 +157,16 @@ struct spp_hash<T *>
         return res;
     }
 
-    SPP_INLINE size_t operator()(const T *__v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(const T *__v) const SPP_NOEXCEPT
     {
         static const size_t shift = 3; // spp_log2(1 + sizeof(T)); // T might be incomplete!
-        return static_cast<size_t>((*(reinterpret_cast<const uintptr_t *>(&__v))) >> shift);
+        const uintptr_t i = (const uintptr_t)__v;
+        return static_cast<size_t>(i >> shift);
     }
 };
 
 // from http://burtleburtle.net/bob/hash/integer.html
-// fast and efficient for power of two table sizes where we always 
+// fast and efficient for power of two table sizes where we always
 // consider the last bits.
 // ---------------------------------------------------------------
 inline size_t spp_mix_32(uint32_t a)
@@ -172,7 +177,7 @@ inline size_t spp_mix_32(uint32_t a)
     return static_cast<size_t>(a);
 }
 
-// Maybe we should do a more thorough scrambling as described in 
+// Maybe we should do a more thorough scrambling as described in
 // https://gist.github.com/badboy/6267743
 // -------------------------------------------------------------
 inline size_t spp_mix_64(uint64_t a)
@@ -180,83 +185,83 @@ inline size_t spp_mix_64(uint64_t a)
     a = a ^ (a >> 4);
     a = (a ^ 0xdeadbeef) + (a << 5);
     a = a ^ (a >> 11);
-    return a;
+    return (size_t)a;
 }
 
 template <>
 struct spp_hash<bool> : public std::unary_function<bool, size_t>
 {
-    SPP_INLINE size_t operator()(bool __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(bool __v) const SPP_NOEXCEPT
     { return static_cast<size_t>(__v); }
 };
 
 template <>
 struct spp_hash<char> : public std::unary_function<char, size_t>
 {
-    SPP_INLINE size_t operator()(char __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(char __v) const SPP_NOEXCEPT
     { return static_cast<size_t>(__v); }
 };
 
 template <>
 struct spp_hash<signed char> : public std::unary_function<signed char, size_t>
 {
-    SPP_INLINE size_t operator()(signed char __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(signed char __v) const SPP_NOEXCEPT
     { return static_cast<size_t>(__v); }
 };
 
 template <>
 struct spp_hash<unsigned char> : public std::unary_function<unsigned char, size_t>
 {
-    SPP_INLINE size_t operator()(unsigned char __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(unsigned char __v) const SPP_NOEXCEPT
     { return static_cast<size_t>(__v); }
 };
 
 template <>
 struct spp_hash<wchar_t> : public std::unary_function<wchar_t, size_t>
 {
-    SPP_INLINE size_t operator()(wchar_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(wchar_t __v) const SPP_NOEXCEPT
     { return static_cast<size_t>(__v); }
 };
 
 template <>
 struct spp_hash<int16_t> : public std::unary_function<int16_t, size_t>
 {
-    SPP_INLINE size_t operator()(int16_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(int16_t __v) const SPP_NOEXCEPT
     { return spp_mix_32(static_cast<uint32_t>(__v)); }
 };
 
-template <> 
+template <>
 struct spp_hash<uint16_t> : public std::unary_function<uint16_t, size_t>
 {
-    SPP_INLINE size_t operator()(uint16_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(uint16_t __v) const SPP_NOEXCEPT
     { return spp_mix_32(static_cast<uint32_t>(__v)); }
 };
 
 template <>
 struct spp_hash<int32_t> : public std::unary_function<int32_t, size_t>
 {
-    SPP_INLINE size_t operator()(int32_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(int32_t __v) const SPP_NOEXCEPT
     { return spp_mix_32(static_cast<uint32_t>(__v)); }
 };
 
 template <>
 struct spp_hash<uint32_t> : public std::unary_function<uint32_t, size_t>
 {
-    SPP_INLINE size_t operator()(uint32_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(uint32_t __v) const SPP_NOEXCEPT
     { return spp_mix_32(static_cast<uint32_t>(__v)); }
 };
 
 template <>
 struct spp_hash<int64_t> : public std::unary_function<int64_t, size_t>
 {
-    SPP_INLINE size_t operator()(int64_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(int64_t __v) const SPP_NOEXCEPT
     { return spp_mix_64(static_cast<uint64_t>(__v)); }
 };
 
 template <>
 struct spp_hash<uint64_t> : public std::unary_function<uint64_t, size_t>
 {
-    SPP_INLINE size_t operator()(uint64_t __v) const SPP_NOEXCEPT 
+    SPP_INLINE size_t operator()(uint64_t __v) const SPP_NOEXCEPT
     { return spp_mix_64(static_cast<uint64_t>(__v)); }
 };
 
@@ -306,12 +311,136 @@ template <class T> struct Combiner<T, 8>
 template <class T>
 inline void hash_combine(std::size_t& seed, T const& v)
 {
-    spp::spp_hash<T> hasher;
+    spp_::spp_hash<T> hasher;
     Combiner<std::size_t, sizeof(std::size_t)> combiner;
 
     combiner(seed, hasher(v));
 }
-    
+
+static inline uint32_t s_spp_popcount_default(uint32_t i) SPP_NOEXCEPT
+{
+    i = i - ((i >> 1) & 0x55555555);
+    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
+    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
+}
+
+static inline uint32_t s_spp_popcount_default(uint64_t x) SPP_NOEXCEPT
+{
+    const uint64_t m1  = uint64_t(0x5555555555555555); // binary: 0101...
+    const uint64_t m2  = uint64_t(0x3333333333333333); // binary: 00110011..
+    const uint64_t m4  = uint64_t(0x0f0f0f0f0f0f0f0f); // binary:  4 zeros,  4 ones ...
+    const uint64_t h01 = uint64_t(0x0101010101010101); // the sum of 256 to the power of 0,1,2,3...
+
+    x -= (x >> 1) & m1;             // put count of each 2 bits into those 2 bits
+    x = (x & m2) + ((x >> 2) & m2); // put count of each 4 bits into those 4 bits 
+    x = (x + (x >> 4)) & m4;        // put count of each 8 bits into those 8 bits 
+    return (x * h01)>>56;           // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24)+...
+}
+
+#ifdef __APPLE__
+    static inline uint32_t count_trailing_zeroes(size_t v) SPP_NOEXCEPT
+    {
+        size_t x = (v & -v) - 1;
+        // sadly sizeof() required to build on macos 
+        return sizeof(size_t) == 8 ? s_spp_popcount_default((uint64_t)x) : s_spp_popcount_default((uint32_t)x);
+    }
+
+    static inline uint32_t s_popcount(size_t v) SPP_NOEXCEPT
+    {
+        // sadly sizeof() required to build on macos 
+        return sizeof(size_t) == 8 ? s_spp_popcount_default((uint64_t)v) : s_spp_popcount_default((uint32_t)v);
+    }
+#else
+    static inline uint32_t count_trailing_zeroes(size_t v) SPP_NOEXCEPT
+    {
+        return s_spp_popcount_default((v & -(intptr_t)v) - 1);
+    }
+
+    static inline uint32_t s_popcount(size_t v) SPP_NOEXCEPT
+    {
+        return s_spp_popcount_default(v);
+    }
+#endif
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+template<class T>
+class libc_allocator
+{
+public:
+    typedef T         value_type;
+    typedef T*        pointer;
+    typedef ptrdiff_t difference_type;
+    typedef const T*  const_pointer;
+    typedef size_t    size_type;
+
+    libc_allocator() {}
+    libc_allocator(const libc_allocator &) {}
+    libc_allocator& operator=(const libc_allocator &) { return *this; }
+
+#ifndef SPP_NO_CXX11_RVALUE_REFERENCES    
+    libc_allocator(libc_allocator &&) {}
+    libc_allocator& operator=(libc_allocator &&) { return *this; }
+#endif
+
+    pointer allocate(size_t n, const_pointer  /* unused */= 0) 
+    {
+        return static_cast<pointer>(malloc(n * sizeof(T)));
+    }
+
+    void deallocate(pointer p, size_t /* unused */) 
+    {
+        free(p);
+    }
+
+    pointer reallocate(pointer p, size_t new_size) 
+    {
+        return static_cast<pointer>(realloc(p, new_size * sizeof(T)));
+    }
+
+    // extra API to match spp_allocator interface
+    pointer reallocate(pointer p, size_t /* old_size */, size_t new_size) 
+    {
+        return static_cast<pointer>(realloc(p, new_size * sizeof(T)));
+    }
+
+    size_type max_size() const
+    {
+        return static_cast<size_type>(-1) / sizeof(value_type);
+    }
+
+    void construct(pointer p, const value_type& val)
+    {
+        new(p) value_type(val);
+    }
+
+    void destroy(pointer p) { p->~value_type(); }
+
+    template<class U>
+    struct rebind
+    {
+        typedef spp_::libc_allocator<U> other;
+    };
+
+};
+
+// forward declaration
+// -------------------
+template<class T>
+class spp_allocator;
+
+}
+
+template<class T>
+inline bool operator==(const spp_::libc_allocator<T> &, const spp_::libc_allocator<T> &)
+{
+    return true;
+}
+
+template<class T>
+inline bool operator!=(const spp_::libc_allocator<T> &, const spp_::libc_allocator<T> &)
+{
+    return false;
 }
 
 #endif // spp_utils_h_guard_
diff --git a/resources/3rdparty/sparsepp/spp.natvis b/resources/3rdparty/sparsepp/spp.natvis
new file mode 100755
index 000000000..1ca15df6f
--- /dev/null
+++ b/resources/3rdparty/sparsepp/spp.natvis
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="utf-8"?>  
+
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">  
+  <!-- VC 2015 -->
+  <Type Name="spp::sparse_hash_set&lt;*,*,*,*&gt;">  
+  <AlternativeType Name="spp::sparse_hash_map&lt;*,*,*,*,*&gt;" />  
+      <DisplayString>{{size = {rep.table._num_buckets}}}</DisplayString>  
+      <Expand>  
+        <CustomListItems MaxItemsPerView="1000" ExcludeView="Test">  
+          <Variable Name="grp" InitialValue="rep.table._first_group" />  
+          <Variable Name="last_grp" InitialValue="rep.table._last_group" />  
+          <Variable Name="item_ptr" InitialValue="rep.table._first_group-&gt;_group" />  
+          <Variable Name="cnt" InitialValue="-1" />  
+    
+          <Size>rep.table._num_buckets</Size>  
+          <Loop>  
+              <Break Condition="grp == last_grp" />  
+              <Exec>item_ptr = grp-&gt;_group</Exec>  
+              <Exec>cnt = grp-&gt;_num_buckets</Exec>  
+              <Loop>
+                <Break Condition="cnt == 0" />  
+                <Item>item_ptr,na</Item>  
+                <Exec>item_ptr++</Exec>  
+                <Exec>cnt--</Exec>  
+              </Loop>  
+              <Exec>++grp</Exec>  
+          </Loop>
+        </CustomListItems>  
+      </Expand>  
+  </Type>  
+    
+  <Type Name="spp::Two_d_iterator&lt;*,*,*,*&gt;">
+    <DisplayString Condition="row_current==0">end()</DisplayString>
+    <DisplayString Condition="row_current->_group == -1">end()</DisplayString>
+    <DisplayString>{*col_current}</DisplayString>
+    <Expand>
+      <ExpandedItem Condition="row_current->_group != -1">*col_current</ExpandedItem>
+    </Expand>
+  </Type>
+
+</AutoVisualizer>  
diff --git a/resources/3rdparty/sparsepp/tests/makefile b/resources/3rdparty/sparsepp/tests/makefile
new file mode 100755
index 000000000..df4eb6f6c
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/makefile
@@ -0,0 +1,27 @@
+CXXFLAGS     = -O2 -std=c++11 -I..
+CXXFLAGS    += -Wall -pedantic -Wextra -D_XOPEN_SOURCE=700 
+SPP_DEPS_1   =  spp.h spp_utils.h spp_dlalloc.h spp_traits.h spp_config.h
+SPP_DEPS     = $(addprefix ../sparsepp/,$(SPP_DEPS_1))
+TARGETS      = spp_test spp_alloc_test spp_bitset_test perftest1 bench
+
+
+ifeq ($(OS),Windows_NT)
+    LDFLAGS  = -lpsapi
+endif
+
+def: spp_test 
+
+all: $(TARGETS)
+
+clean:
+	rm -rf $(TARGETS) vsprojects/x64/* vsprojects/x86/*
+
+test:
+	./spp_test
+
+spp_test: spp_test.cc $(SPP_DEPS) makefile
+	$(CXX) $(CXXFLAGS) -D_CRT_SECURE_NO_WARNINGS spp_test.cc -o spp_test
+
+%: %.cc $(SPP_DEPS) makefile
+	$(CXX) $(CXXFLAGS) -DNDEBUG $< -o $@ $(LDFLAGS)
+
diff --git a/resources/3rdparty/sparsepp/tests/perftest1.cc b/resources/3rdparty/sparsepp/tests/perftest1.cc
new file mode 100755
index 000000000..ae8609e79
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/perftest1.cc
@@ -0,0 +1,162 @@
+// compile on linux with: g++ -std=c++11 -O2 perftest1.cc -o perftest1
+// -----------------------------------------------------------------------
+#include <fstream>
+#include <iostream>
+#include <ctime>
+#include <cstdio>
+#include <climits>
+#include <functional>
+#include <vector>
+#include <utility>
+
+#include <sparsepp/spp_timer.h>
+
+#define SPP 1
+#define DENSE 0
+#define SPARSE 0
+#define STD 0
+
+#if SPP
+    #include <sparsepp/spp.h>
+#elif DENSE
+    #include <google/dense_hash_map>
+#elif SPARSE
+    #include <google/sparse_hash_map>
+#elif STD
+    #include <unordered_map>
+#endif
+
+using std::make_pair;
+
+template <class T>
+void test(T &s, int count) 
+{
+    spp::Timer<std::milli> timer;
+
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i) 
+        s.insert(make_pair(rand(), i));
+
+    printf("%d random inserts         in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i)
+        s.find(rand()); 
+
+    printf("%d random finds           in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(1);
+    for (int i = 0; i < count; ++i)
+        s.find(rand());
+    printf("%d random not-finds       in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    s.clear();
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i) 
+        s.insert(make_pair(i, i));
+    printf("%d sequential inserts     in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i)
+        s.find(i);
+
+    printf("%d sequential finds       in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(1);
+    for (int i = 0; i < count; ++i) 
+    { 
+        int x = rand();
+        s.find(x);
+    }
+    printf("%d random not-finds       in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    s.clear();
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i) 
+        s.insert(make_pair(-i, -i));
+
+    printf("%d neg sequential inserts in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(0);
+    for (int i = 0; i < count; ++i)
+        s.find(-i);
+
+    printf("%d neg sequential finds   in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    timer.snap();
+    srand(1);
+    for (int i = 0; i < count; ++i) 
+        s.find(rand());
+    printf("%d random not-finds       in %5.2f seconds\n", count, timer.get_delta() / 1000);
+
+    s.clear();    
+}
+
+
+struct Hasher64 {
+    size_t operator()(uint64_t k) const { return (k ^ 14695981039346656037ULL) * 1099511628211ULL; }
+};
+
+struct Hasher32 {
+    size_t operator()(uint32_t k) const { return (k ^ 2166136261U)  * 16777619UL; }
+};
+
+struct Hasheri32 {
+    size_t operator()(int k) const 
+    {
+        return (k ^ 2166136261U)  * 16777619UL; 
+    }
+};
+
+struct Hasher_32 {
+    size_t operator()(int k) const 
+    {
+        uint32_t a = (uint32_t)k;
+#if 0
+        a = (a ^ 61) ^ (a >> 16);
+        a = a + (a << 3);
+        a = a ^ (a >> 4);
+        a = a * 0x27d4eb2d;
+        a = a ^ (a >> 15);
+        return a;
+#else
+        a = a ^ (a >> 4);
+        a = (a ^ 0xdeadbeef) + (a << 5);
+        a = a ^ (a >> 11);
+        return a;
+#endif
+    }
+};
+
+int main() 
+{
+#if SPP
+    spp::sparse_hash_map<int, int /*, Hasheri32 */> s;
+    printf ("Testing spp::sparse_hash_map\n");
+#elif DENSE
+    google::dense_hash_map<int, int/* , Hasher_32 */> s;
+    s.set_empty_key(-INT_MAX); 
+    s.set_deleted_key(-(INT_MAX - 1));
+    printf ("Testing google::dense_hash_map\n");
+#elif SPARSE
+    google::sparse_hash_map<int, int/* , Hasher_32 */> s;
+    s.set_deleted_key(-INT_MAX); 
+    printf ("Testing google::sparse_hash_map\n");
+#elif STD
+    std::unordered_map<int, int/* , Hasher_32 */> s;
+    printf ("Testing std::unordered_map\n");
+#endif
+    printf ("------------------------------\n");
+    test(s, 50000000);
+
+
+    return 0;
+}
diff --git a/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc b/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc
new file mode 100755
index 000000000..06b23ac80
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc
@@ -0,0 +1,189 @@
+#include <memory>
+#include <cassert>
+#include <cstdio>
+#include <stdlib.h> 
+#include <algorithm> 
+#include <vector>
+
+// enable debugging code in spp_bitset.h
+#define SPP_TEST 1
+
+#include <sparsepp/spp_timer.h>
+#include <sparsepp/spp_memory.h>
+#include <sparsepp/spp_dlalloc.h>
+
+using namespace std;
+
+static float _to_mb(uint64_t m) { return (float)((double)m / (1024 * 1024)); }
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+template <class T, class A>
+class TestAlloc
+{
+public:
+    TestAlloc(size_t num_alloc = 8000000) : 
+        _num_alloc(num_alloc)
+    {
+        _allocated.resize(_num_alloc, nullptr);
+        _sizes.resize(_num_alloc, 0);
+        _start_mem_usage = spp::GetProcessMemoryUsed();
+    }
+
+    void run()
+    {
+        srand(43); // always same sequence of random numbers
+
+        for (size_t i=0; i<_num_alloc; ++i)
+            _sizes[i] = std::max(2, (rand() % 5) * 2);
+                                 
+        spp::Timer<std::milli> timer;
+
+        // allocate small buffers
+        // ----------------------
+        for (size_t i=0; i<_num_alloc; ++i)
+        {
+            _allocated[i] = _allocator.allocate(_sizes[i]);
+            _set_buf(_allocated[i], _sizes[i]);
+        }
+        
+#if 1
+        // and grow the buffers to a max size of 24 each
+        // ---------------------------------------------
+        for (uint32_t j=4; j<26; j += 2)
+        {
+            for (size_t i=0; i<_num_alloc; ++i)
+            {
+                // if ( _sizes[i] < j)                    // windows allocator friendly!
+                if ((rand() % 4) != 3 && _sizes[i] < j)   // really messes up windows allocator
+                {
+                    _allocated[i] = _allocator.reallocate(_allocated[i], j);
+                    _check_buf(_allocated[i], _sizes[i]);
+                    _set_buf(_allocated[i], j);
+                    _sizes[i] = j;
+                }
+            }
+        }
+#endif
+
+#if 0
+        // test erase (shrinking the buffers)
+        // ---------------------------------------------
+        for (uint32_t j=28; j>4; j -= 2)
+        {
+            for (size_t i=0; i<_num_alloc; ++i)
+            {
+                // if ( _sizes[i] < j)                    // windows allocator friendly!
+                if ((rand() % 4) != 3 && _sizes[i] > j)   // really messes up windows allocator
+                {
+                    _allocated[i] = _allocator.reallocate(_allocated[i], j);
+                    _check_buf1(_allocated[i], _sizes[i]);
+                    _set_buf(_allocated[i], j);
+                    _sizes[i] = j;
+                }
+            }
+        }
+#endif
+
+#if 0
+        // and grow the buffers back to a max size of 24 each
+        // --------------------------------------------------
+        for (uint32_t j=4; j<26; j += 2)
+        {
+            for (size_t i=0; i<_num_alloc; ++i)
+            {
+                // if ( _sizes[i] < j)                    // windows allocator friendly!
+                if ((rand() % 4) != 3 && _sizes[i] < j)   // really messes up windows allocator
+                {
+                    _allocated[i] = _allocator.reallocate(_allocated[i], j);
+                    _check_buf(_allocated[i], _sizes[i]);
+                    _set_buf(_allocated[i], j);
+                    _sizes[i] = j;
+                }
+            }
+        }
+#endif
+
+        size_t total_units = 0;
+        for (size_t i=0; i<_num_alloc; ++i)
+            total_units += _sizes[i];
+        
+        uint64_t mem_usage          = spp::GetProcessMemoryUsed();
+        uint64_t alloc_mem_usage    = mem_usage - _start_mem_usage;
+        uint64_t expected_mem_usage = total_units * sizeof(T);
+
+        // finally free the memory
+        // -----------------------
+        for (size_t i=0; i<_num_alloc; ++i)
+        {
+            _check_buf(_allocated[i], _sizes[i]);
+            _allocator.deallocate(_allocated[i], _sizes[i]);
+        }
+
+        uint64_t mem_usage_end = spp::GetProcessMemoryUsed();
+
+        printf("allocated %zd entities of size %zd\n", total_units, sizeof(T));
+        printf("done in %3.2f seconds, mem_usage %4.1f/%4.1f/%4.1f MB\n", 
+               timer.get_total() / 1000, _to_mb(_start_mem_usage),  _to_mb(mem_usage),  _to_mb(mem_usage_end));
+        printf("expected mem usage: %4.1f\n", _to_mb(expected_mem_usage));
+        if (expected_mem_usage <= alloc_mem_usage)
+            printf("overhead: %4.1f%%\n", 
+                   (float)((double)(alloc_mem_usage - expected_mem_usage) / expected_mem_usage) * 100);
+        else
+            printf("bug: alloc_mem_usage <= expected_mem_usage\n");
+        
+        std::vector<T *>().swap(_allocated);
+        std::vector<uint32_t>().swap(_sizes);
+
+        printf("\nmem usage after freeing vectors: %4.1f\n", _to_mb(spp::GetProcessMemoryUsed()));
+    }
+
+private:
+
+    void _set_buf(T *buff, uint32_t sz) { *buff = (T)sz; buff[sz - 1] = (T)sz; }
+    void _check_buf1(T *buff, uint32_t sz) 
+    { 
+        assert(*buff == (T)sz); 
+        (void)(buff + sz); // silence warning
+    }
+    void _check_buf(T *buff, uint32_t sz) 
+    { 
+        assert(*buff == (T)sz &&  buff[sz - 1] == (T)sz); 
+        (void)(buff + sz); // silence warning
+    }
+
+    size_t                _num_alloc;
+    uint64_t              _start_mem_usage;
+    std::vector<T *>      _allocated;
+    std::vector<uint32_t> _sizes;
+    A                     _allocator;
+};
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+template <class X, class A>
+void run_test(const char *alloc_name)
+{
+    printf("\n---------------- testing %s\n\n", alloc_name);
+
+    printf("\nmem usage before the alloc test: %4.1f\n", 
+           _to_mb(spp::GetProcessMemoryUsed()));
+    {
+        TestAlloc< X, A >  test_alloc;
+        test_alloc.run();
+    }
+    printf("mem usage after the alloc test: %4.1f\n",
+           _to_mb(spp::GetProcessMemoryUsed()));
+
+    printf("\n\n");
+}
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+int main()
+{
+    typedef uint64_t X;
+
+    run_test<X, spp::libc_allocator<X>>("libc_allocator");
+    run_test<X, spp::spp_allocator<X>>("spp_allocator");
+}
diff --git a/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc b/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc
new file mode 100755
index 000000000..3c775f3f3
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc
@@ -0,0 +1,284 @@
+#include <memory>
+#include <cassert>
+#include <cstdio>
+#include <stdlib.h> 
+#include <algorithm> 
+#include <vector>
+
+// enable debugging code in spp_bitset.h
+#define SPP_TEST 1
+
+#include <sparsepp/spp_timer.h>
+#include <sparsepp/spp_memory.h>
+#include <sparsepp/spp_bitset.h>
+
+using namespace std;
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+template <size_t N>
+class TestBitset
+{
+public:
+    typedef spp::spp_bitset<N> BS;
+
+    TestBitset()
+    {}
+
+    void test_set(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs, bs2;
+
+        printf("testing set on spp_bitset<%zu>  , num_iter=%6zu -> ", N, num_iter);
+
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            bs.reset();
+            bs2.reset();
+            size_t start = rand() % N;
+            size_t to = start + rand() % (N - start);  
+            bs.set(start, to);
+            bs2.set_naive(start, to);
+            bool same = bs == bs2;
+            if (!same)
+                ++num_errors;
+            assert(same);
+        }
+        printf("num_errors = %zu\n", num_errors);
+    }
+
+    void test_reset(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs, bs2;
+        printf("testing reset on spp_bitset<%zu>, num_iter=%6zu -> ", N, num_iter);
+
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            bs.set();
+            bs2.set();
+            size_t start = rand() % N;
+            size_t to = start + rand() % (N - start);  
+            bs.reset(start, to);
+            bs2.reset_naive(start, to);
+            bool same = bs == bs2;
+            if (!same)
+                ++num_errors;
+            assert(same);
+        }
+        printf("num_errors = %zu\n", num_errors);
+    }
+
+    void test_all(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs;
+        printf("testing all() on spp_bitset<%zu>, num_iter=%6zu -> ", N, num_iter);
+
+        for (size_t i=0; i<4 * N; ++i)
+        {
+            bs.set(rand() % N);
+            if (i > 2 * N)
+            {
+                for (size_t j=0; j<num_iter; ++j)
+                {
+                    size_t start = rand() % N;
+                    size_t to = start + rand() % (N - start);  
+                    bool same = bs.all(start, to) == bs.all_naive(start, to);
+                    if (!same)
+                        ++num_errors;
+                    assert(same);                  
+                }
+
+                size_t start = 0, start_naive = 1;
+                bs.all(start);
+                bs.all_naive(start_naive);
+                bool same = (start == start_naive);
+                if (!same)
+                    ++num_errors;
+                assert(same);   
+            }
+        }
+        printf("num_errors = %zu\n", num_errors);
+    }
+
+    void test_any(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs;
+        printf("testing any() on spp_bitset<%zu>, num_iter=%6zu -> ", N, num_iter);
+
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            bs.set(rand() % N);
+            for (size_t j=0; j<100; ++j)
+            {
+                size_t start = rand() % N;
+                size_t to = start + rand() % (N - start);  
+                bool same = bs.any(start, to) == bs.any_naive(start, to);
+                if (!same)
+                    ++num_errors;
+                assert(same);      
+            }
+        }
+        printf("num_errors = %zu\n", num_errors);
+    }
+
+    void test_longest(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs, bs2;
+        assert(bs.longest_zero_sequence() == N);
+        bs.set(0);
+        assert(bs.longest_zero_sequence() == N-1);
+        bs.set(10);
+        assert(bs.find_next_n(3, 8) == 11);
+        assert(bs.find_next_n(3, 6) == 6);
+        assert(bs.find_next_n(3, N-2) == 1);
+        assert(bs.longest_zero_sequence() == N-11);
+        if (N > 1000)
+        {
+            bs.set(1000);
+            size_t longest = bs.longest_zero_sequence();
+            assert(longest == 1000-11 || longest == N-1001);
+            if (!(longest == 1000-11 || longest == N-1001))
+                ++num_errors;
+        }
+
+        spp::Timer<std::milli> timer_lz;
+        spp::Timer<std::milli> timer_lz_slow;
+        float lz_time(0), lz_time_slow(0);
+
+        printf("testing longest_zero_sequence()  , num_iter=%6zu -> ", num_iter);
+        srand(1);
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            bs.reset();
+            for (size_t j=0; j<N; ++j)
+            {
+                bs.set(rand() % N);
+
+                timer_lz.snap();
+                size_t lz1 = bs.longest_zero_sequence();
+                lz_time += timer_lz.get_delta();
+
+                timer_lz_slow.snap();
+                size_t lz2 = bs.longest_zero_sequence_naive();
+                lz_time_slow += timer_lz_slow.get_delta();
+
+                num_errors += (lz1 != lz2);
+                assert(!num_errors);
+            }
+        } 
+
+       printf("num_errors = %zu, time=%7.1f, slow_time=%7.1f\n", num_errors, lz_time, lz_time_slow); 
+    }
+
+    void test_longest2(size_t num_iter)
+    {
+        size_t num_errors = 0;
+        BS bs, bs2;
+        assert(bs.longest_zero_sequence() == N);
+        bs.set(0);
+        assert(bs.longest_zero_sequence() == N-1);
+        bs.set(10);
+        assert(bs.find_next_n(3, 8) == 11);
+        assert(bs.find_next_n(3, 6) == 6);
+        assert(bs.find_next_n(3, N-2) == 1);
+        assert(bs.longest_zero_sequence() == N-11);
+        if (N > 1000)
+        {
+            bs.set(1000);
+            size_t longest = bs.longest_zero_sequence();
+            assert(longest == 1000-11 || longest == N-1001);
+            if (!(longest == 1000-11 || longest == N-1001))
+                ++num_errors;
+        }
+
+        spp::Timer<std::milli> timer_lz;
+        spp::Timer<std::milli> timer_lz_slow;
+        float lz_time(0), lz_time_slow(0);
+
+        printf("testing longest_zero_sequence2() , num_iter=%6zu -> ", num_iter);
+        srand(1);
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            bs.reset();
+            for (size_t j=0; j<N; ++j)
+            {
+                bs.set(rand() % N);
+                size_t start_pos1 = 0, start_pos2 = 0;
+
+                timer_lz.snap();
+                size_t lz1 = bs.longest_zero_sequence(64, start_pos1);
+                lz_time += timer_lz.get_delta();
+
+                timer_lz_slow.snap();
+                size_t lz2 = bs.longest_zero_sequence_naive(64, start_pos2);
+                lz_time_slow += timer_lz_slow.get_delta();
+                
+                assert(start_pos1 == start_pos2);
+
+                num_errors += (lz1 != lz2) || (start_pos1 != start_pos2);
+                assert(!num_errors);
+            }
+        } 
+
+       printf("num_errors = %zu, time=%7.1f, slow_time=%7.1f\n", num_errors, lz_time, lz_time_slow); 
+    }
+
+    void test_ctz(size_t num_iter) 
+    {
+        size_t num_errors = 0;
+
+        spp::Timer<std::milli> timer_ctz;
+        spp::Timer<std::milli> timer_ctz_slow;
+        float ctz_time(0), ctz_time_slow(0);
+
+        printf("testing count_trailing_zeroes()  , num_iter=%6zu -> ", num_iter);
+        for (size_t i=0; i<num_iter; ++i)
+        {
+            size_t v = rand() ^ (rand() << 16);
+
+            timer_ctz.snap();
+            uint32_t ctz1 = spp::count_trailing_zeroes(v);
+            ctz_time += timer_ctz.get_delta();
+
+            timer_ctz_slow.snap();
+            size_t ctz2 = spp::count_trailing_zeroes_naive(v);
+            ctz_time_slow += timer_ctz_slow.get_delta();
+
+            num_errors += (ctz1 != ctz2);
+            assert(!num_errors);
+        } 
+
+        printf("num_errors = %zu, time=%7.1f, slow_time=%7.1f\n", num_errors, ctz_time, ctz_time_slow); 
+            
+    }
+
+    void run()
+    {
+        test_ctz(10000);
+        test_all(10000);
+        test_any(1000);
+        test_set(1000);
+        test_reset(1000);
+        test_longest(200);
+        test_longest2(200);
+    }
+};
+
+// -----------------------------------------------------------
+// -----------------------------------------------------------
+int main()
+{
+    TestBitset<1024> test_bitset_1024;
+    test_bitset_1024.run();
+
+    TestBitset<4096> test_bitset_4096;
+    test_bitset_4096.run();
+
+    //TestBitset<8192> test_bitset_8192;
+    //test_bitset_8192.run();
+}
diff --git a/resources/3rdparty/sparsepp/spp_test.cc b/resources/3rdparty/sparsepp/tests/spp_test.cc
old mode 100644
new mode 100755
similarity index 98%
rename from resources/3rdparty/sparsepp/spp_test.cc
rename to resources/3rdparty/sparsepp/tests/spp_test.cc
index e17eb0f82..279dd0163
--- a/resources/3rdparty/sparsepp/spp_test.cc
+++ b/resources/3rdparty/sparsepp/tests/spp_test.cc
@@ -41,7 +41,7 @@
     #pragma warning( disable : 4996 ) // 'fopen': This function or variable may be unsafe
 #endif
 
-#include "sparsepp.h"
+#include <sparsepp/spp.h>
 
 #ifdef _MSC_VER 
     #pragma warning( disable : 4127 ) // conditional expression is constant
@@ -357,7 +357,6 @@ public:
 
     void set_deleted_key(const key_type& k) { ht_.set_deleted_key(k); }
     void clear_deleted_key() { ht_.clear_deleted_key(); }
-    key_type deleted_key() const { return ht_.deleted_key(); }
 
     size_type erase(const key_type& key)   { return ht_.erase(key); }
     void erase(typename HT::iterator it)   { ht_.erase(it); }
@@ -432,9 +431,9 @@ protected:
 // ---------------------------------------------------------------------
 // ---------------------------------------------------------------------
 template <class Key, class T,
-          class HashFcn = SPP_HASH_CLASS<Key>,
+          class HashFcn  = SPP_HASH_CLASS<Key>,
           class EqualKey = std::equal_to<Key>,
-          class Alloc = spp::libc_allocator_with_realloc<std::pair<const Key, T> > >
+          class Alloc    = SPP_DEFAULT_ALLOCATOR<std::pair<const Key, T> > >
 class HashtableInterface_SparseHashMap
     : public BaseHashtableInterface< sparse_hash_map<Key, T, HashFcn,
                                                      EqualKey, Alloc> >
@@ -518,9 +517,9 @@ void swap(HashtableInterface_SparseHashMap<K,T,H,E,A>& a,
 // ---------------------------------------------------------------------
 // ---------------------------------------------------------------------
 template <class Value,
-          class HashFcn = SPP_HASH_CLASS<Value>,
+          class HashFcn  = SPP_HASH_CLASS<Value>,
           class EqualKey = std::equal_to<Value>,
-          class Alloc = spp::libc_allocator_with_realloc<Value> >
+          class Alloc    = SPP_DEFAULT_ALLOCATOR<Value> >
 class HashtableInterface_SparseHashSet
     : public BaseHashtableInterface< sparse_hash_set<Value, HashFcn,
                                                      EqualKey, Alloc> > 
@@ -749,8 +748,8 @@ void EXPECT_TRUE(bool cond)
     }
 }
 
-SPP_START_NAMESPACE
-
+namespace spp_
+{
 
 namespace testing 
 {
@@ -897,8 +896,7 @@ class Test { };
 
 } // namespace testing
 
-SPP_END_NAMESPACE
-
+} // namespace spp_
 
 namespace testing = SPP_NAMESPACE::testing;
 
@@ -1287,7 +1285,7 @@ namespace {
 
 #define INT_HASHTABLES                                                  \
   HashtableInterface_SparseHashMap<int, int, Hasher, Hasher,            \
-                                   Alloc<int> >,                        \
+                                   Alloc<std::pair<const int, int> > >,  \
   HashtableInterface_SparseHashSet<int, Hasher, Hasher,                 \
                                    Alloc<int> >,                        \
   /* This is a table where the key associated with a value is -value */ \
@@ -1297,7 +1295,7 @@ namespace {
 
 #define STRING_HASHTABLES                                               \
   HashtableInterface_SparseHashMap<string, string, Hasher, Hasher,      \
-                                   Alloc<string> >,                     \
+                                   Alloc<std::pair<const string, string> > >,                     \
   HashtableInterface_SparseHashSet<string, Hasher, Hasher,              \
                                    Alloc<string> >,                     \
   /* This is a table where the key associated with a value is Cap(value) */ \
@@ -1312,7 +1310,7 @@ namespace {
 // ---------------------------------------------------------------------
 #define CHARSTAR_HASHTABLES                                             \
   HashtableInterface_SparseHashMap<const char*, ValueType,              \
-                                   Hasher, Hasher, Alloc<const char*> >, \
+                                   Hasher, Hasher, Alloc<std::pair<const char* const, ValueType> > >, \
   HashtableInterface_SparseHashSet<const char*, Hasher, Hasher,         \
                                    Alloc<const char*> >,                \
   HashtableInterface_SparseHashtable<const char*, const char*,          \
@@ -1436,7 +1434,6 @@ TYPED_TEST(HashtableIntTest, Typedefs)
     // different, arbitrary function that returns the type.  Sometimes
     // the type isn't used at all, and there's no good way to use the
     // variable.
-    kt = this->ht_.deleted_key();
     (void)vt;   // value_type may not be copyable.  Easiest not to try.
     h = this->ht_.hash_funct();
     ke = this->ht_.key_eq();
@@ -1551,6 +1548,38 @@ TEST(HashtableTest, ReferenceWrapper)
 }
 #endif
 
+#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES)
+class CNonCopyable
+{
+public:
+    CNonCopyable(CNonCopyable const &) = delete;
+    const CNonCopyable& operator=(CNonCopyable const &) = delete;
+    CNonCopyable() = default;
+};
+
+
+struct Probe : CNonCopyable
+{
+    Probe() {}
+    Probe(Probe &&) {}
+    void operator=(Probe &&)	{}
+
+private:
+    Probe(const Probe &);
+    Probe& operator=(const Probe &);
+};
+
+TEST(HashtableTest, NonCopyable) 
+{
+    typedef spp::sparse_hash_map<uint64_t, Probe> THashMap;
+    THashMap probes;
+    
+    probes.insert(THashMap::value_type(27, Probe()));
+    EXPECT_EQ(probes.begin()->first, 27);
+}
+
+#endif
+
 
 TEST(HashtableTest, ModifyViaIterator) 
 {
@@ -1642,7 +1671,7 @@ TYPED_TEST(HashtableIntTest, Constructors)
     // placement-news we have to do below.
     Hasher hasher(1);   // 1 is a unique id
     int alloc_count = 0;
-    Alloc<typename TypeParam::key_type> alloc(2, &alloc_count);
+    Alloc<typename TypeParam::value_type> alloc(2, &alloc_count);
 
     TypeParam ht_noarg;
     TypeParam ht_onearg(100);
@@ -1793,9 +1822,6 @@ TYPED_TEST(HashtableAllTest, Swap)
 
     this->ht_.swap(other_ht);
 
-    EXPECT_EQ(this->UniqueKey(2), this->ht_.deleted_key());
-    EXPECT_EQ(this->UniqueKey(1), other_ht.deleted_key());
-
     EXPECT_EQ(1, this->ht_.hash_funct().id());
     EXPECT_EQ(0, other_ht.hash_funct().id());
 
@@ -1826,8 +1852,6 @@ TYPED_TEST(HashtableAllTest, Swap)
     std::swap(this->ht_, other_ht);
 #endif
 
-    EXPECT_EQ(this->UniqueKey(1), this->ht_.deleted_key());
-    EXPECT_EQ(this->UniqueKey(2), other_ht.deleted_key());
     EXPECT_EQ(0, this->ht_.hash_funct().id());
     EXPECT_EQ(1, other_ht.hash_funct().id());
     EXPECT_EQ(1996u, this->ht_.size());
@@ -2251,24 +2275,6 @@ TYPED_TEST(HashtableStringTest, EmptyKey)
     EXPECT_EQ(kEmptyString, this->ht_.empty_key());
 }
 
-TYPED_TEST(HashtableAllTest, DeletedKey) 
-{
-    if (!this->ht_.supports_deleted_key())
-        return;
-    this->ht_.insert(this->UniqueObject(10));
-    this->ht_.insert(this->UniqueObject(20));
-    this->ht_.set_deleted_key(this->UniqueKey(1));
-    EXPECT_EQ(this->ht_.deleted_key(), this->UniqueKey(1));
-    EXPECT_EQ(2u, this->ht_.size());
-    this->ht_.erase(this->UniqueKey(20));
-    EXPECT_EQ(1u, this->ht_.size());
-
-    // Changing the deleted key is fine.
-    this->ht_.set_deleted_key(this->UniqueKey(2));
-    EXPECT_EQ(this->ht_.deleted_key(), this->UniqueKey(2));
-    EXPECT_EQ(1u, this->ht_.size());
-}
-
 TYPED_TEST(HashtableAllTest, Erase) 
 {
     this->ht_.set_deleted_key(this->UniqueKey(1));
@@ -2329,7 +2335,7 @@ TYPED_TEST(HashtableAllTest, Equals)
 
     // The choice of allocator/etc doesn't matter either.
     Hasher hasher(1);
-    Alloc<typename TypeParam::key_type> alloc(2, NULL);
+    Alloc<typename TypeParam::value_type> alloc(2, NULL);
     TypeParam ht3(5, hasher, hasher, alloc);
     EXPECT_TRUE(ht1 == ht3);
     EXPECT_FALSE(ht1 != ht3);
@@ -2746,7 +2752,7 @@ TEST(HashtableTest, SimpleDataTypeOptimizations)
     // Only sparsehashtable optimizes moves in this way.
     sparse_hash_map<int, Memmove, Hasher, Hasher> memmove;
     sparse_hash_map<int, NoMemmove, Hasher, Hasher> nomemmove;
-    sparse_hash_map<int, Memmove, Hasher, Hasher, Alloc<int> >
+    sparse_hash_map<int, Memmove, Hasher, Hasher, Alloc<std::pair<const int, Memmove> > >
         memmove_nonstandard_alloc;
 
     Memmove::num_copies = 0;
diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln b/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln
new file mode 100755
index 000000000..06da8666e
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln
@@ -0,0 +1,38 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio 14
+VisualStudioVersion = 14.0.25420.1
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "spp_test", "spp_test.vcxproj", "{9863A521-E9DB-4775-A276-CADEF726CF11}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "spp_alloc_test", "spp_alloc_test.vcxproj", "{19BC4240-15ED-4C76-BC57-34BB70FE163B}"
+EndProject
+Global
+	GlobalSection(SolutionConfigurationPlatforms) = preSolution
+		Debug|x64 = Debug|x64
+		Debug|x86 = Debug|x86
+		Release|x64 = Release|x64
+		Release|x86 = Release|x86
+	EndGlobalSection
+	GlobalSection(ProjectConfigurationPlatforms) = postSolution
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x64.ActiveCfg = Debug|x64
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x64.Build.0 = Debug|x64
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x86.ActiveCfg = Debug|Win32
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x86.Build.0 = Debug|Win32
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x64.ActiveCfg = Release|x64
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x64.Build.0 = Release|x64
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x86.ActiveCfg = Release|Win32
+		{9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x86.Build.0 = Release|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.ActiveCfg = Debug|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.Build.0 = Debug|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.ActiveCfg = Debug|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.Build.0 = Debug|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.ActiveCfg = Release|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.Build.0 = Release|x64
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.ActiveCfg = Release|Win32
+		{19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.Build.0 = Release|Win32
+	EndGlobalSection
+	GlobalSection(SolutionProperties) = preSolution
+		HideSolutionNode = FALSE
+	EndGlobalSection
+EndGlobal
diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj
new file mode 100755
index 000000000..609710ffe
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj
@@ -0,0 +1,176 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\spp_alloc_test.cc" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h" />
+    <ClInclude Include="..\..\sparsepp\spp_alloc.h" />
+    <ClInclude Include="..\..\sparsepp\spp_bitset.h" />
+    <ClInclude Include="..\..\sparsepp\spp_memory.h" />
+    <ClInclude Include="..\..\sparsepp\spp_timer.h" />
+    <ClInclude Include="..\..\sparsepp\spp_utils.h" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{19BC4240-15ED-4C76-BC57-34BB70FE163B}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <CharacterSet>MultiByte</CharacterSet>
+    <PlatformToolset>v140</PlatformToolset>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>14.0.23107.0</_ProjectFileVersion>
+  </PropertyGroup>
+  <PropertyGroup>
+     <IntDirSharingDetected>None</IntDirSharingDetected>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <MinimalRebuild>true</MinimalRebuild>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_alloc_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+      <TargetMachine>MachineX86</TargetMachine>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_alloc_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <TargetMachine>MachineX86</TargetMachine>
+      <Profile>true</Profile>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_alloc_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <Profile>true</Profile>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters
new file mode 100755
index 000000000..8c773fa94
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{c644622a-f598-4fcf-861c-199b4b988881}</UniqueIdentifier>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_alloc.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_bitset.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_memory.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_timer.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_utils.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj
new file mode 100755
index 000000000..c510a10cf
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj
@@ -0,0 +1,175 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup Label="ProjectConfigurations">
+    <ProjectConfiguration Include="Debug|Win32">
+      <Configuration>Debug</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Debug|x64">
+      <Configuration>Debug</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|Win32">
+      <Configuration>Release</Configuration>
+      <Platform>Win32</Platform>
+    </ProjectConfiguration>
+    <ProjectConfiguration Include="Release|x64">
+      <Configuration>Release</Configuration>
+      <Platform>x64</Platform>
+    </ProjectConfiguration>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\spp_test.cc" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h" />
+    <ClInclude Include="..\..\sparsepp\spp_alloc.h" />
+    <ClInclude Include="..\..\sparsepp\spp_bitset.h" />
+    <ClInclude Include="..\..\sparsepp\spp_memory.h" />
+    <ClInclude Include="..\..\sparsepp\spp_timer.h" />
+    <ClInclude Include="..\..\sparsepp\spp_utils.h" />
+  </ItemGroup>
+  <PropertyGroup Label="Globals">
+    <ProjectGuid>{9863A521-E9DB-4775-A276-CADEF726CF11}</ProjectGuid>
+    <Keyword>Win32Proj</Keyword>
+    <WindowsTargetPlatformVersion>8.1</WindowsTargetPlatformVersion>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <CharacterSet>MultiByte</CharacterSet>
+    <PlatformToolset>v140</PlatformToolset>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+    <ConfigurationType>Application</ConfigurationType>
+    <PlatformToolset>v140</PlatformToolset>
+    <CharacterSet>MultiByte</CharacterSet>
+  </PropertyGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+  <ImportGroup Label="ExtensionSettings">
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+    <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+  </ImportGroup>
+  <PropertyGroup Label="UserMacros" />
+  <PropertyGroup>
+    <_ProjectFileVersion>14.0.23107.0</_ProjectFileVersion>
+  </PropertyGroup>
+  <PropertyGroup>
+     <IntDirSharingDetected>None</IntDirSharingDetected>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>true</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <LinkIncremental>true</LinkIncremental>
+    <CodeAnalysisRuleSet>AllRules.ruleset</CodeAnalysisRuleSet>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <OutDir>$(SolutionDir)$(Configuration)\</OutDir>
+    <IntDir>$(Configuration)\</IntDir>
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <LinkIncremental>false</LinkIncremental>
+  </PropertyGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <MinimalRebuild>true</MinimalRebuild>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+      <TargetMachine>MachineX86</TargetMachine>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+    <ClCompile>
+      <Optimization>Disabled</Optimization>
+      <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+      <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>EnableAllWarnings</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <ProgramDatabaseFile>$(OutDir)spp_test.pdb</ProgramDatabaseFile>
+      <SubSystem>Console</SubSystem>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader />
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+      <TargetMachine>MachineX86</TargetMachine>
+    </Link>
+  </ItemDefinitionGroup>
+  <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+    <ClCompile>
+      <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+      <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+      <PrecompiledHeader>
+      </PrecompiledHeader>
+      <WarningLevel>Level3</WarningLevel>
+      <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+      <AdditionalIncludeDirectories>../..</AdditionalIncludeDirectories>
+    </ClCompile>
+    <Link>
+      <OutputFile>$(OutDir)spp_test.exe</OutputFile>
+      <GenerateDebugInformation>true</GenerateDebugInformation>
+      <SubSystem>Console</SubSystem>
+      <OptimizeReferences>true</OptimizeReferences>
+      <EnableCOMDATFolding>true</EnableCOMDATFolding>
+    </Link>
+  </ItemDefinitionGroup>
+  <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+  <ImportGroup Label="ExtensionTargets">
+  </ImportGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters
new file mode 100755
index 000000000..70934ad0c
--- /dev/null
+++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+  <ItemGroup>
+    <Filter Include="Header Files">
+      <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+      <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+    </Filter>
+  </ItemGroup>
+  <ItemGroup>
+    <ClCompile Include="..\spp_test.cc" />
+  </ItemGroup>
+  <ItemGroup>
+    <ClInclude Include="..\..\sparsepp\spp.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_alloc.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_bitset.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_memory.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_timer.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+    <ClInclude Include="..\..\sparsepp\spp_utils.h">
+      <Filter>Header Files</Filter>
+    </ClInclude>
+  </ItemGroup>
+</Project>
\ No newline at end of file
diff --git a/resources/3rdparty/sylvan/.travis.yml b/resources/3rdparty/sylvan/.travis.yml
index 835aaf383..e85d3b077 100755
--- a/resources/3rdparty/sylvan/.travis.yml
+++ b/resources/3rdparty/sylvan/.travis.yml
@@ -58,12 +58,12 @@ install:
 script:
 - ${CC} --version
 - ${CXX} --version
-- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} -DSYLVAN_BUILD_DOCS=${SYLVAN_BUILD_DOCS}
+- cmake . -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DSYLVAN_STATS=${SYLVAN_STATS} -DWITH_COVERAGE=${COVERAGE} -DSYLVAN_BUILD_DOCS=${SYLVAN_BUILD_DOCS} -DSYLVAN_BUILD_EXAMPLES=ON
 - make -j 2
 - make test
 - examples/simple
-- examples/mc models/schedule_world.2.8-rgs.bdd -w 2 | tee /dev/fd/2 | grep -q "1,570,340"
-- examples/lddmc models/blocks.2.ldd -w 2 | tee /dev/fd/2 | grep -q "7057 states"
+- examples/mc models/schedule_world.2.bdd -w 2 | tee /dev/fd/2 | grep -q "1,570,340"
+- examples/lddmc models/blocks.2.ldd -w 2 | tee /dev/fd/2 | grep -q "7,057 states"
 
 notifications:
   email: false
diff --git a/resources/3rdparty/sylvan/CHANGELOG.md b/resources/3rdparty/sylvan/CHANGELOG.md
index e4d58ae65..7cc6e1228 100755
--- a/resources/3rdparty/sylvan/CHANGELOG.md
+++ b/resources/3rdparty/sylvan/CHANGELOG.md
@@ -2,12 +2,60 @@
 All notable changes to Sylvan will be documented in this file.
 
 ## [Unreleased]
+### Changed
+- We now implement twisted tabulation as the hash function for the nodes table. The old hash function is still available and the default behavior can be changed in `sylvan_table.h`.
+
+## [1.4.0] - 2017-07-12
+### Added
+- Function `mtbdd_cmpl` that computes the complement for MTBDDs. (0 becomes 1, non-0 becomes 0)
+
+### Changed
+- Changed file formats used by the examples to match the changes in LTSmin.
+- Function `mtbdd_satcount` now does not count assignments leading to 0. Perhaps in the future we make this configurable. (Like in CuDD.)
+- Slightly improved C++ support by wrapping header files in the namespace sylvan.
+
+### Fixed
+- There was a bug where Lace tasks are overwritten during SYNC, which causes problems during garbage collection. Lace reusing the bucket during SYNC is by design and is difficult to change. We fix the issue by checking during garbage collection if the stored task is still the same function, which in the worst case marks more nodes than needed.
+- Band-aid patch for hashing; very similar nodes were hashing to similar positions and strides, causing early garbage collections and full tables. The patch works for now, but we need a more robust solution.
+
+### Removed
+- Removed support for HWLOC (pinning on NUMA machines). Planning to bring this back as an option, but in its current form it prevents multiple Sylvan programs from running simultaneously on the same machine.
+
+## [1.3.3] - 2017-06-03
+### Changed
+- Changed file format for .bdd files in the MC example.
+
+### Fixed
+- A major bug in `lddmc_match_sat_par` has been fixed.
+- A bug in the saturation algorithm in the model checking example has been fixed.
+- A major bug in the hash table rehashing implementation has been fixed.
+
+## [1.3.2] - 2017-05-23
+### Added
+- Now implements `lddmc_protect` and `lddmc_unprotect` for external pointer references.
+- Now implements `lddmc_refs_pushptr` and `lddmc_refs_popptr` for internal pointer references
+
+### Changed
+- New version of Lace has slightly different API for manually created threads.
+
+## [1.3.1] - 2017-05-22
+### Fixed
+- A bug in `mtbdd_refs_ptrs_up` caused a segfault. This has been fixed.
+
+## [1.3.0] - 2017-05-16
 ### Added
 - The embedded work-stealing framework now explicitly checks for stack overflows and aborts with an appropriate error message written to stderr.
 - New functions `sylvan_project` and `sylvan_and_project` for BDDs, a dual of existential quantification, where instead of the variables to remove, the given set of variables are the variables to keep.
+- New functions `mtbdd_refs_pushptr` and `mtbdd_refs_popptr` allow thread-specific referencing of pointers.
 
 ### Changed
 - Rewritten initialization of Sylvan. Before the call to `sylvan_init_package`, table sizes must be initialized either using `sylvan_set_sizes` or with the new function `sylvan_set_limits`. This new function allows the user to set a maximum number of bytes allocated for the nodes table and for the operation cache.
+- Rewritten MTBDD referencing system.
+- Rewritten MTBDD map and set functions (no API change except renaming `mtbdd_map_addall` to `mtbdd_map_update` with backward compatibility)
+- The lock-free unique table now uses double hashing instead of rehashing. This can improve the performance for custom leaves and improves the hash spread.
+
+### Fixed
+- A bug in `llmsset_lookup` affecting custom leaves has been fixed.
 
 ## [1.2.0] - 2017-02-03
 ### Added
diff --git a/resources/3rdparty/sylvan/CMakeLists.txt b/resources/3rdparty/sylvan/CMakeLists.txt
old mode 100755
new mode 100644
index a251eadec..7332217d3
--- a/resources/3rdparty/sylvan/CMakeLists.txt
+++ b/resources/3rdparty/sylvan/CMakeLists.txt
@@ -1,6 +1,6 @@
 cmake_minimum_required(VERSION 3.1)
 
-project(sylvan LANGUAGES C CXX VERSION 1.2.0)
+project(sylvan LANGUAGES C CXX VERSION 1.4.0)
 
 set(PROJECT_DESCRIPTION "Sylvan, a parallel decision diagram library")
 set(PROJECT_URL "https://github.com/trolando/sylvan")
@@ -25,8 +25,8 @@ set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/cmake/")
 option(SYLVAN_PORTABLE "If set, the created library will be portable." OFF)
 option(USE_CARL "Sets whether carl should be included." ON)
 
-set(CMAKE_C_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -fPIC ${CMAKE_C_FLAGS}")
-set(CMAKE_CXX_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated -fPIC ${CMAKE_CXX_FLAGS}")
+set(CMAKE_C_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing ${CMAKE_C_FLAGS}")
+set(CMAKE_CXX_FLAGS "-O2 -Wextra -Wall -fno-strict-aliasing -Wno-deprecated ${CMAKE_CXX_FLAGS}")
 
 if (NOT SYLVAN_PORTABLE)
     set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
@@ -73,18 +73,17 @@ include_directories("${PROJECT_BINARY_DIR}/../../../include")
 include_directories(src)
 
 add_subdirectory(src)
-option(SYLVAN_BUILD_TESTS "Build example tools" ON)
+option(SYLVAN_BUILD_TESTS "Build example tests" ON)
 if(SYLVAN_BUILD_TESTS)
     add_subdirectory(test)
 endif()
-    
 
-option(SYLVAN_BUILD_EXAMPLES "Build example tools" ON)
+option(SYLVAN_BUILD_EXAMPLES "Build example tools" OFF)
 if(SYLVAN_BUILD_EXAMPLES)
     add_subdirectory(examples)
 endif()
 
-option(SYLVAN_BUILD_DOCS "Build documentation" ON)
+option(SYLVAN_BUILD_DOCS "Build documentation" OFF)
 if(SYLVAN_BUILD_DOCS)
     configure_file("docs/conf.py.in" "docs/conf.py" @ONLY)
     find_package(Sphinx REQUIRED)
diff --git a/resources/3rdparty/sylvan/cmake/FindGMP.cmake b/resources/3rdparty/sylvan/cmake/FindGMP.cmake
index 7d787a84d..7dcd75afd 100755
--- a/resources/3rdparty/sylvan/cmake/FindGMP.cmake
+++ b/resources/3rdparty/sylvan/cmake/FindGMP.cmake
@@ -8,16 +8,13 @@
 find_package(PkgConfig)
 pkg_check_modules(PC_GMP QUIET gmp)
 
-set(GMP_INCLUDE "" CACHE PATH "include dir")
-set(GMP_LOCATION "" CACHE PATH "location dir")
-
 set(GMP_DEFINITIONS ${PC_GMP_CFLAGS_OTHER})
 
 find_path(GMP_INCLUDE_DIR gmp.h
-          HINTS ${GMP_INCLUDE} ${PC_GMP_INCLUDEDIR} ${PC_GMP_INCLUDE_DIRS})
+          HINTS ${PC_GMP_INCLUDEDIR} ${PC_GMP_INCLUDE_DIRS})
 
 find_library(GMP_LIBRARIES NAMES gmp libgmp
-             HINTS ${GMP_LOCATION} ${PC_GMP_LIBDIR} ${PC_GMP_LIBRARY_DIRS} NO_CMAKE_PATH NO_CMAKE_ENVIRONMENT_PATH)
+             HINTS ${PC_GMP_LIBDIR} ${PC_GMP_LIBRARY_DIRS})
 
 include(FindPackageHandleStandardArgs)
 # handle the QUIETLY and REQUIRED arguments and set GMP_FOUND to TRUE
diff --git a/resources/3rdparty/sylvan/docs/index.rst b/resources/3rdparty/sylvan/docs/index.rst
index cb2a33aa7..f15c2e85e 100755
--- a/resources/3rdparty/sylvan/docs/index.rst
+++ b/resources/3rdparty/sylvan/docs/index.rst
@@ -31,15 +31,19 @@ Bindings for other languages than C/C++ also exist:
 Dependencies
 ------------
 
-Sylvan has the following required dependencies:
+Sylvan has the following dependencies:
 
 - **CMake** for compiling.
 - **gmp** (``libgmp-dev``) for the GMP leaves in MTBDDs.
-- **hwloc** (``libhwloc-dev``) for pinning worker threads to processors.
+- **Sphinx** if you want to build the documentation.
 
 Sylvan depends on the `work-stealing framework
 Lace <http://fmt.ewi.utwente.nl/tools/lace>`__ for its implementation.
 Lace is embedded in the Sylvan distribution.
+Lace requires one additional library:
+
+- **hwloc** (``libhwloc-dev``) for pinning worker threads to processors.
+
 
 Building
 --------
@@ -71,14 +75,12 @@ To use Sylvan, the library and its dependency Lace must be initialized:
         lace_init(n_workers, 0);
         lace_startup(0, NULL, NULL);
 
-        size_t nodes_minsize = 1LL<<22;
-        size_t nodes_maxsize = 1LL<<26;
-        size_t cache_minsize = 1LL<<23;
-        size_t cache_maxsize = 1LL<<27;
-        sylvan_init_package(nodes_minsize, nodes_maxsize, cache_minsize, cache_maxsize);
+        // use at most 512 MB, nodes:cache ratio 2:1, initial size 1/32 of maximum
+        sylvan_set_limits(512*1024*1024, 1, 5);
+        sylvan_init_package();
         sylvan_init_mtbdd();
 
-        ...
+        /* ... do stuff ... */
 
         sylvan_stats_report(stdout);
         sylvan_quit();
@@ -90,19 +92,20 @@ for work-stealing. The parameter ``n_workers`` can be set to 0 for auto-detectio
 function ``lace_startup`` then creates all other worker threads. The worker threads run
 until ``lace_exit`` is called. Lace must be started before Sylvan can be initialized.
 
-Sylvan is initialized with a call to ``sylvan_init_package``. Here we choose the initial
-and maximum sizes of the nodes table and the operation cache. In the example, we choose a maximum
-nodes table size of 2^26 and a maximum cache size of 2^27. The initial sizes are
-set to 2^22 and 2^23, respectively. The sizes must be powers of 2.
-Sylvan allocates memory for the maximum sizes *in virtual memory* but only uses the space
-needed for the initial sizes. The sizes are doubled during garbage collection, until the maximum
-size has been reached.
+Sylvan is initialized with a call to ``sylvan_init_package``. Before this call, Sylvan needs to know
+how much memory to allocate for the nodes table and the operation cache. In this example, we use the
+``sylvan_set_limits`` function to tell Sylvan that it may allocate at most 512 MB for these tables.
+The second parameter indicates the ratio of the nodes table and the operation cache, with each
+higher number doubling the size of the nodes table. Negative numbers double the size of the operation
+cache instead. In the example, we want the nodes table to be twice as big as the operation cache.
+The third parameter controls how often garbage collection doubles the table sizes before
+their maximum size is reached. The value 5 means that the initial tables are 32x as small as the maximum size.
+By default, every execution of garbage collection doubles the table sizes.
 
-After ``sylvan_init_package``, the subpackages ``mtbdd`` and ``ldd`` can be initialized with
-``sylvan_init_mtbdd`` and ``sylvan_init_ldd``. This mainly allocates auxiliary datastructures for
-garbage collection.
+After ``sylvan_init_package``, subpackages like ``mtbdd`` and ``ldd`` can be initialized with
+``sylvan_init_mtbdd`` and ``sylvan_init_ldd``. This allocates auxiliary datastructures.
 
-If you enable statistics generation (via CMake) then you can use ``sylvan_stats_report`` to report
+If you enabled statistics generation (via CMake), then you can use ``sylvan_stats_report`` to report
 the obtained statistics to a given ``FILE*``.
 
 The Lace framework
@@ -110,7 +113,7 @@ The Lace framework
 
 Sylvan uses the Lace framework to offer 'automatic' parallelization of decision diagram operations.
 Many functions in Sylvan are Lace tasks. To call a Lace task, the variables 
-``__lace_worker`` and ``__lace_dq_head`` must be initialized **locally**.
+``__lace_worker`` and ``__lace_dq_head`` must be initialized as **local** variables of the current function.
 Use the macro ``LACE_ME`` to initialize the variables in every function that calls Sylvan functions
 and is not itself a Lace task.
 
@@ -121,98 +124,207 @@ Like all decision diagram implementations, Sylvan performs garbage collection.
 Garbage collection is triggered when trying to insert a new node and no
 empty space can be found in the table within a reasonable upper bound.
 
+Garbage collection can be disabled with ``sylvan_gc_disable`` and enabled again with ``sylvan_gc_enable``.
+Call ``sylvan_gc`` to manually trigger garbage collection.
+
 To ensure that no decision diagram nodes are overwritten, you must ensure that
 Sylvan knows which decision diagrams you care about.
-The easiest way to do this is with ``sylvan_protect`` and ``sylvan_unprotect`` to protect
-a given pointer.
-These functions protect the decision diagram referenced to by that pointer at the time
-that garbage collection is performed.
-Unlike some other implementations of decision diagrams,
-you can modify the variable between the calls to ``sylvan_protect`` and ``sylvan_unprotect``
-without explicitly changing the reference.
-
-To manually trigger garbage collection, call ``sylvan_gc``.
-You can use ``sylvan_gc_disable`` and ``sylvan_gc_enable`` to disable garbage collection or
-enable it again. If garbage collection is disabled, the program will abort when the nodes table
-is full.
+Each subpackage implements mechanisms to store references to decision diagrams that must be kept.
+For example, the *mtbdd* subpackage implements ``mtbdd_protect`` and ``mtbdd_unprotect`` to store pointers to
+MTBDD variables.
+
+.. code:: c
+
+    MTBDD* allocate_var() {
+        MTBDD* my_var = (MTBDD*)calloc(sizeof(MTBDD), 1);
+        mtbdd_protect(my_var);
+        return my_var;
+    }
+
+    free_var(MTBDD* my_var) {
+        mtbdd_unprotect(my_var);
+        free(my_var);
+    }
+
+If you use ``mtbdd_protect`` you do not need to update the reference every time the value changes.
+
+The *mtbdd* subpackage also implements thread-local stacks to temporarily store pointers and results of tasks:
+
+.. code:: c
+
+    MTBDD some_thing = ...;
+    mtbdd_refs_pushptr(&some_thing);
+    MTBDD result_param1 = mtbdd_false, result_param2 = mtbdd_false;
+    mtbdd_refs_pushptr(&result_param1);
+    mtbdd_refs_pushptr(&result_param2);
+    while (some_condition) {
+        mtbdd_refs_spawn(SPAWN(an_operation, some_thing, param1));
+        result_param2 = CALL(an_operation, some_thing, param2);
+        result_param1 = mtbdd_refs_sync(SYNC(an_operation));
+        some_thing = CALL(another_operation, result1, result2);
+    }
+    mtbdd_refs_popptr(3);
+    return some_thing;
+
+It is recommended to use the thread-local stacks for local variables, and to use the ``protect`` and ``unprotect``
+functions for other variables. Every SPAWN and SYNC of a Lace task that returns an MTBDD must be decorated with
+``mtbdd_refs_stack`` and ``mtbdd_refs_sync`` as in the above example.
+
+References to decision diagrams must be added before a worker may cooperate on garbage collection.
+Workers can cooperate on garbage collection during ``SYNC`` and when functions create nodes or use ``sylvan_gc_test`` to test whether to assist in garbage collection.
+Functions for adding or removing references never perform garbage collection.
+Furthermore, only the ``mtbdd_makenode`` function (and other node making primitives) implicitly reference their parameters; all other functions do not reference their parameters.
+Nesting Sylvan functions (including ``sylvan_ithvar``) is bad practice and should be avoided.
+
 **Warning**: Sylvan is a multi-threaded library and all workers must cooperate for garbage collection. If you use locking mechanisms in your code, beware of deadlocks!
+You can explicitly cooperate on garbage collection with ``sylvan_gc_test()``.
+
+Basic BDD/MTBDD functionality
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Basic BDD functionality
-~~~~~~~~~~~~~~~~~~~~~~~
+In Sylvan, BDDs are special cases of MTBDDs.
+Several functions are specific for BDDs and they start with ``sylvan_``, whereas generic MTBDD functions start
+with ``mtbdd_``.
 
 To create new BDDs, you can use:
 
-- ``sylvan_true``: representation of constant ``true``.
-- ``sylvan_false``: representation of constant ``false``.
+- ``mtbdd_true``: representation of constant ``true``.
+- ``mtbdd_false``: representation of constant ``false``.
 - ``sylvan_ithvar(var)``: representation of literal <var> (negated: ``sylvan_nithvar(var)``)
 
 To follow the BDD edges and obtain the variable at the root of a BDD,
-you can use (only for internal nodes, not for leaves ``sylvan_true`` and ``sylvan_false``):
+you can use (only for internal nodes, not for leaves ``mtbdd_true`` and ``mtbdd_false``):
 
-- ``sylvan_var(bdd)``: obtain the variable of the root node of <bdd>.
-- ``sylvan_high(bdd)``: follow the high edge of <bdd>.
-- ``sylvan_low(bdd)``: follow the low edge of <bdd>.
+- ``mtbdd_getvar(bdd)``: obtain the variable of the root node of <bdd>.
+- ``mtbdd_gethigh(bdd)``: follow the high edge of <bdd>.
+- ``mtbdd_getlow(bdd)``: follow the low edge of <bdd>.
 
 You need to manually reference BDDs that you want to keep during garbage
-collection:
+collection (see the above explanation):
 
-- ``sylvan_protect(bddptr)``: add a pointer reference to <bddptr>.
-- ``sylvan_unprotect(bddptr)``: remove a pointer reference to <bddptr>.
-- ``sylvan_ref(bdd)``: add a reference to <bdd>.
-- ``sylvan_deref(bdd)``: remove a reference to <bdd>.
+- ``mtbdd_protect(bddptr)``: add a pointer reference to <bddptr>.
+- ``mtbdd_unprotect(bddptr)``: remove a pointer reference to <bddptr>.
+- ``mtbdd_refs_pushptr(bddptr)``: add a local pointer reference to <bddptr>.
+- ``mtbdd_refs_popptr(amount)``: remove the last <amount> local pointer references.
+- ``mtbdd_refs_spawn(SPAWN(...))``: spawn a task that returns a BDD/MTBDD.
+- ``mtbdd_refs_sync(SYNC(...))``: sync a task that returns a BDD/MTBDD.
 
-It is recommended to use ``sylvan_protect`` and ``sylvan_unprotect``.
+It is recommended to use ``mtbdd_protect`` and ``mtbdd_unprotect``.
 The C++ objects (defined in ``sylvan_obj.hpp``) handle this automatically.
+For local variables, we recommend ``mtbdd_refs_pushptr`` and ``mtbdd_refs_popptr``.
 
-The following basic operations are implemented:
+The following basic BDD operations are implemented:
 
 - ``sylvan_not(bdd)``: compute the negation of <bdd>.
 - ``sylvan_ite(a,b,c)``: compute 'if <a> then <b> else <c>'.
-- ``sylvan_and(a, b)``: compute '<a> and <b>'
-- ``sylvan_or(a, b)``: compute '<a> or <b>'
-- ``sylvan_nand(a, b)``: compute 'not (<a> and <b>)'
-- ``sylvan_nor(a, b)``: compute 'not (<a> or <b>)'
-- ``sylvan_imp(a, b)``: compute '<a> then <b>'
-- ``sylvan_invimp(a, b)``: compute '<b> then <a>'
-- ``sylvan_xor(a, b)``: compute '<a> xor <b>'
-- ``sylvan_equiv(a, b)``: compute '<a> = <b>'
-- ``sylvan_diff(a, b)``: compute '<a> and not <b>'
-- ``sylvan_less(a, b)``: compute '<b> and not <a>'
+- ``sylvan_and(a, b)``: compute '<a> and <b>'.
+- ``sylvan_or(a, b)``: compute '<a> or <b>'.
+- ``sylvan_nand(a, b)``: compute 'not (<a> and <b>)'.
+- ``sylvan_nor(a, b)``: compute 'not (<a> or <b>)'.
+- ``sylvan_imp(a, b)``: compute '<a> then <b>'.
+- ``sylvan_invimp(a, b)``: compute '<b> then <a>'.
+- ``sylvan_xor(a, b)``: compute '<a> xor <b>'.
+- ``sylvan_equiv(a, b)``: compute '<a> = <b>'.
+- ``sylvan_diff(a, b)``: compute '<a> and not <b>'.
+- ``sylvan_less(a, b)``: compute '<b> and not <a>'.
 - ``sylvan_exists(bdd, vars)``: existential quantification of <bdd> with respect to variables <vars>.
 - ``sylvan_forall(bdd, vars)``: universal quantification of <bdd> with respect to variables <vars>.
+- ``sylvan_project(bdd, vars)``: the dual of ``sylvan_exists``, projects the <bdd> to the variable domain <vars>.
 
 A set of variables (like <vars> above) is a BDD representing the conjunction of the variables.
-
-Other BDD operations
-~~~~~~~~~~~~~~~~~~~~
-
-See ``src/sylvan_bdd.h`` for other operations on BDDs, especially operations
-that are relevant for model checking.
-
-Basic MTBDD functionality
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-See ``src/sylvan_mtbdd.h`` for operations on multi-terminal BDDs.
-
-Basic LDD functionality
-~~~~~~~~~~~~~~~~~~~~~~~
-
-See ``src/sylvan_ldd.h`` for operations on List DDs.
+A number of convencience functions are defined to manipulate sets of variables:
+
+- ``mtbdd_set_empty()``: obtain an empty set.
+- ``mtbdd_set_isempty(set)``: compute whether the set is empty.
+- ``mtbdd_set_first(set)``: obtain the first variable of the set.
+- ``mtbdd_set_next(set)``: obtain the subset without the first variable.
+- ``mtbdd_set_from_array(arr, len)``: create a set from a given array.
+- ``mtbdd_set_to_array(set, arr)``: write the set to the given array.
+- ``mtbdd_set_add(set, var)``: compute the set plus the variable.
+- ``mtbdd_set_union(set1, set2)``: compute the union of two sets.
+- ``mtbdd_set_remove(set, var)``: compute the set minus the variable.
+- ``mtbdd_set_minus(set1, set2)``: compute the set <set1> minus the variables in <set2>.
+- ``mtbdd_set_count(set)``: compute the number of variables in the set.
+- ``mtbdd_set_contains(set, var)``: compute whether the set contains the variable.
+
+Sylvan also implements composition and substitution/variable renaming using a "MTBDD map". An MTBDD map is a special structure
+implemented with special MTBDD nodes to store a mapping from variables (uint32_t) to MTBDDs. Like sets of variables and MTBDDs, MTBDD maps must
+also be referenced for garbage collection. The following functions are related to MTBDD maps:
+
+- ``mtbdd_compose(dd, map)``: apply the map to the given decision diagram, transforming every node with a variable that is associated with some function F in the map by ``if <F> then <high> else <low>``.
+- ``sylvan_compose(dd, map)``: same as ``mtbdd_compose``, but assumes the decision diagram only has Boolean leaves.
+- ``mtbdd_map_empty()``: obtain an empty map.
+- ``mtbdd_map_isempty(map)``: compute whether the map is empty.
+- ``mtbdd_map_key(map)``: obtain the key of the first pair of the map.
+- ``mtbdd_map_value(map)``: obtain the value of the first pair of the map.
+- ``mtbdd_map_next(map)``: obtain the submap without the first pair.
+- ``mtbdd_map_add(map, key, value)``: compute the map plus the given key-value pair.
+- ``mtbdd_map_update(map1, map2)``: compute the union of two maps, with priority to map2 if both maps share variables.
+- ``mtbdd_map_remove(map, var)``: compute the map minus the variable.
+- ``mtbdd_map_removeall(map, set)``: compute the map minus the given variables.
+- ``mtbdd_map_count(set)``: compute the number of pairs in the map.
+- ``mtbdd_map_contains(map, var)``: compute whether the map contains the variable.
+
+Sylvan implements a number of counting operations:
+
+- ``mtbdd_satcount(bdd, number_of_vars)``: compute the number of minterms (assignments that lead to True) for a function with <number_of_vars> variables; we don't need to know the exact variables that may be in the BDD, just how many there are.
+- ``sylvan_pathcount(bdd)``: compute the number of distinct paths to True.
+- ``mtbdd_nodecount(bdd)``: compute the number of nodes (and leaves) in the BDD.
+- ``mtbdd_nodecount_more(array, length)``: compute the number of nodes (and leaves) in the array of BDDs.
+
+Sylvan implements various advanced operations:
+
+- ``sylvan_and_exists(bdd_a, bdd_b, vars)``: compute ``sylvan_exists(sylvan_and(bdd_a, bdd_b), vars)`` in one step.
+- ``sylvan_and_project(bdd_a, bdd_b, vars)``: compute ``sylvan_project(sylvan_and(bdd_a, bdd_b), vars)`` in one step.
+- ``sylvan_cube(vars, values)``: compute a cube (to leaf True) of the given variables, where the array values indicates for each variable whether to use it in negative form (value 0) or positive form (value 1) or to skip it (as dont-care, value 2).
+- ``sylvan_union_cube(set, vars, values)``: compute ``sylvan_or(set, sylvan_cube(vars, values))`` in one step.
+- ``sylvan_constrain(bdd_f, bdd_c)``: compute the generic cofactor of F constrained by C, i.e, set F to False for all assignments not in C.
+- ``sylvan_restrict(bdd_f, bdd_c)``: compute Coudert and Madre's restrict algorithm, which tries to minimize bdd_f according to a care set C using sibling substitution; the invariant is ``restrict(f, c) \and c == f \and c``; the result of this algorithm is often but not always smaller than the original.
+- ``sylvan_pick_cube(bdd)`` or ``sylvan_sat_one_bdd(bdd)``: extract a single path to True from the BDD (returns the BDD of this path)
+- ``sylvan_pick_single_cube(bdd, vars)`` or ``sylvan_sat_single(bdd, vars)`` extracts a single minterm from the BDD (returns the BDD of this assignment)
+- ``sylvan_sat_one(bdd, vars, array)``: extract a single minterm from the BDD given the set of variables and write the values of the variables in order to the given array, with 0 when it is negative, 1 when it is positive, and 2 when it is dontcare.
+
+Sylvan implements several operations for transition systems. These operations assume an interleaved variable ordering, such that *source* or *unprimed* variables have even parity (0, 2, 4...) and matching *target* or *primed* variables have odd parity (1, 3, 5...).
+The transition relations may be partial transition relations that only manipulate a subset of variables; hence, the operations also require the set of variables.
+
+- ``sylvan_relnext(set, relation, vars)``: apply the (partial) relation on the given variables to the set.
+- ``sylvan_relprev(relation, set, vars)``: apply the (partial) relation in reverse to the set; this computes predecessors but can also concatenate relations as follows: ``sylvan_relprev(rel1, rel2, rel1_vars)``.
+- ``sylvan_closure(relation)``: compute the transitive closure of the given set recursively (see Matsunaga et al, DAC 1993)
+
+See ``src/sylvan_bdd.h`` and ``src/mtbdd.h`` for other operations on BDDs and MTBDDs.
+
+Custom leaves
+~~~~~~~~~~~~~
+
+See ``src/sylvan_mt.h`` and the example in ``src/sylvan_gmp.h`` and ``src/sylvan_gmp.c`` for custom leaves in MTBDDs.
+
+Custom decision diagram operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Adding custom decision diagram operations is easy. Include ``sylvan_int.h`` for the internal functions. See ``sylvan_cache.h``
+for how to use the operation cache.
+
+List decision diagrams
+~~~~~~~~~~~~~~~~~~~~~~
+
+See ``src/sylvan_ldd.h`` for operations on list decision diagrams.
+
+File I/O
+~~~~~~~~
+
+You can store and load BDDs using a number of methods, which are documented in the header files ``sylvan_mtbdd.h`` and ``sylvan_ldd.h``.
 
 Support for C++
 ~~~~~~~~~~~~~~~
 
 See ``src/sylvan_obj.hpp`` for the C++ interface.
 
-.. Adding custom decision diagram operations
-.. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
 Table resizing
 ~~~~~~~~~~~~~~
 
 During garbage collection, it is possible to resize the nodes table and
-the cache. Sylvan provides two default implementations: an aggressive
-version that resizes every time garbage collection is performed, and a
+the cache. By default, Sylvan doubles the table sizes during every garbage
+collection until the maximum table size has been reached. There is also a
 less aggressive version that only resizes when at least half the table is
 full. This can be configured in ``src/sylvan_config.h``. It is not
 possible to decrease the size of the nodes table and the cache.
diff --git a/resources/3rdparty/sylvan/examples/ldd2bdd.c b/resources/3rdparty/sylvan/examples/ldd2bdd.c
index 545be9792..b23311bd2 100755
--- a/resources/3rdparty/sylvan/examples/ldd2bdd.c
+++ b/resources/3rdparty/sylvan/examples/ldd2bdd.c
@@ -13,15 +13,15 @@ static int workers = 0; // autodetect
 static int verbose = 0;
 static char* model_filename = NULL; // filename of model
 static char* bdd_filename = NULL; // filename of output BDD
-static char* sizes = "22,27,21,26"; // default sizes
 static int check_results = 0;
+static int no_reachable = 0;
 
 /* argp configuration */
 static struct argp_option options[] =
 {
     {"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
-    {"table-sizes", 1, "<tablesize>,<tablemax>,<cachesize>,<cachemax>", 0, "Sizes of nodes table and operation cache as powers of 2", 0},
-    {"check-results", 2, 0, 0, "Check new transition relations ", 0},
+    {"check-results", 2, 0, 0, "Check new transition relations", 0},
+    {"no-reachable", 1, 0, 0, "Do not write reachabile states", 0},
     {"verbose", 'v', 0, 0, "Set verbose", 0},
     {0, 0, 0, 0, 0, 0}
 };
@@ -37,7 +37,7 @@ parse_opt(int key, char *arg, struct argp_state *state)
         verbose = 1;
         break;
     case 1:
-        sizes = arg;
+        no_reachable = 1;
         break;
     case 2:
         check_results = 1;
@@ -58,67 +58,112 @@ parse_opt(int key, char *arg, struct argp_state *state)
 
 static struct argp argp = { options, parse_opt, "<model> [<output-bdd>]", 0, 0, 0, 0 };
 
-/* Globals */
+/**
+ * Types (set and relation)
+ */
 typedef struct set
 {
-    MDD mdd;
-    MDD proj;
+    MDD dd;
 } *set_t;
 
 typedef struct relation
 {
-    MDD mdd;
-    MDD meta;
+    MDD dd;
+    MDD meta; // for relprod
+    int r_k, w_k, *r_proj, *w_proj;
 } *rel_t;
 
-static size_t vector_size; // size of vector
+static int vector_size; // size of vector
 static int next_count; // number of partitions of the transition relation
 static rel_t *next; // each partition of the transition relation
 static int actionbits = 0;
 static int has_actions = 0;
 
-#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
+#define Abort(...) { fprintf(stderr, __VA_ARGS__); fprintf(stderr, "Abort at line %d!\n", __LINE__); exit(-1); }
 
 /* Load a set from file */
 #define set_load(f) CALL(set_load, f)
 TASK_1(set_t, set_load, FILE*, f)
 {
-    lddmc_serialize_fromfile(f);
-
-    size_t mdd;
-    size_t proj;
-    int size;
+    set_t set = (set_t)malloc(sizeof(struct set));
 
-    if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+    int k;
+    if (fread(&k, sizeof(int), 1, f) != 1) Abort("Invalid input file!");
+    if (k != -1) Abort("Invalid input file!");
 
-    set_t set = (set_t)malloc(sizeof(struct set));
-    set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
-    set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj));
+    lddmc_serialize_fromfile(f);
+    size_t dd;
+    if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!");
+    set->dd = lddmc_serialize_get_reversed(dd);
+    lddmc_protect(&set->dd);
 
     return set;
 }
 
 /* Load a relation from file */
-#define rel_load(f) CALL(rel_load, f)
-TASK_1(rel_t, rel_load, FILE*, f)
+#define rel_load_proj(f) CALL(rel_load_proj, f)
+TASK_1(rel_t, rel_load_proj, FILE*, f)
 {
-    lddmc_serialize_fromfile(f);
+    int r_k, w_k;
+    if (fread(&r_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
+    if (fread(&w_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
 
-    size_t mdd;
-    size_t meta;
+    rel_t rel = (rel_t)malloc(sizeof(struct relation));
+    rel->r_k = r_k;
+    rel->w_k = w_k;
+    rel->r_proj = (int*)malloc(sizeof(int[rel->r_k]));
+    rel->w_proj = (int*)malloc(sizeof(int[rel->w_k]));
+
+    if (fread(rel->r_proj, sizeof(int), rel->r_k, f) != (size_t)rel->r_k) Abort("Invalid file format.");
+    if (fread(rel->w_proj, sizeof(int), rel->w_k, f) != (size_t)rel->w_k) Abort("Invalid file format.");
+
+    int *r_proj = rel->r_proj;
+    int *w_proj = rel->w_proj;
+
+    /* Compute the meta */
+    uint32_t meta[vector_size*2+2];
+    memset(meta, 0, sizeof(uint32_t[vector_size*2+2]));
+    int r_i=0, w_i=0, i=0, j=0;
+    for (;;) {
+        int type = 0;
+        if (r_i < r_k && r_proj[r_i] == i) {
+            r_i++;
+            type += 1; // read
+        }
+        if (w_i < w_k && w_proj[w_i] == i) {
+            w_i++;
+            type += 2; // write
+        }
+        if (type == 0) meta[j++] = 0;
+        else if (type == 1) { meta[j++] = 3; }
+        else if (type == 2) { meta[j++] = 4; }
+        else if (type == 3) { meta[j++] = 1; meta[j++] = 2; }
+        if (r_i == r_k && w_i == w_k) {
+            meta[j++] = 5; // action label
+            meta[j++] = (uint32_t)-1;
+            break;
+        }
+        i++;
+    }
 
-    if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
+    rel->meta = lddmc_cube((uint32_t*)meta, j);
+    rel->dd = lddmc_false;
 
-    rel_t rel = (rel_t)malloc(sizeof(struct relation));
-    rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
-    rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta));
+    lddmc_protect(&rel->meta);
+    lddmc_protect(&rel->dd);
 
     return rel;
 }
 
+#define rel_load(f, rel) CALL(rel_load, f, rel)
+VOID_TASK_2(rel_load, FILE*, f, rel_t, rel)
+{
+    lddmc_serialize_fromfile(f);
+    size_t dd;
+    if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!");
+    rel->dd = lddmc_serialize_get_reversed(dd);
+}
+
 /**
  * Compute the highest value for each variable level.
  * This method is called for the set of reachable states.
@@ -199,7 +244,7 @@ VOID_TASK_3(compute_highest_action, MDD, dd, MDD, meta, uint32_t*, target)
  */
 static uint64_t bdd_from_ldd_id;
 #define bdd_from_ldd(dd, bits, firstvar) CALL(bdd_from_ldd, dd, bits, firstvar)
-TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
+TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_dd, uint32_t, firstvar)
 {
     /* simple for leaves */
     if (dd == lddmc_false) return mtbdd_false;
@@ -208,16 +253,16 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
     MTBDD result;
     /* get from cache */
     /* note: some assumptions about the encoding... */
-    if (cache_get3(bdd_from_ldd_id, dd, bits_mdd, firstvar, &result)) return result;
+    if (cache_get3(bdd_from_ldd_id, dd, bits_dd, firstvar, &result)) return result;
 
     mddnode_t n = LDD_GETNODE(dd);
-    mddnode_t nbits = LDD_GETNODE(bits_mdd);
+    mddnode_t nbits = LDD_GETNODE(bits_dd);
     int bits = (int)mddnode_getvalue(nbits);
 
-    /* spawn right, same bits_mdd and firstvar */
-    mtbdd_refs_spawn(SPAWN(bdd_from_ldd, mddnode_getright(n), bits_mdd, firstvar));
+    /* spawn right, same bits_dd and firstvar */
+    mtbdd_refs_spawn(SPAWN(bdd_from_ldd, mddnode_getright(n), bits_dd, firstvar));
 
-    /* call down, with next bits_mdd and firstvar */
+    /* call down, with next bits_dd and firstvar */
     MTBDD down = CALL(bdd_from_ldd, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits);
 
     /* encode current value */
@@ -239,7 +284,7 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
     mtbdd_refs_pop(2);
 
     /* put in cache */
-    cache_put3(bdd_from_ldd_id, dd, bits_mdd, firstvar, result);
+    cache_put3(bdd_from_ldd_id, dd, bits_dd, firstvar, result);
 
     return result;
 }
@@ -249,7 +294,7 @@ TASK_3(MTBDD, bdd_from_ldd, MDD, dd, MDD, bits_mdd, uint32_t, firstvar)
  */
 static uint64_t bdd_from_ldd_rel_id;
 #define bdd_from_ldd_rel(dd, bits, firstvar, meta) CALL(bdd_from_ldd_rel, dd, bits, firstvar, meta)
-TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD, meta)
+TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_dd, uint32_t, firstvar, MDD, meta)
 {
     if (dd == lddmc_false) return mtbdd_false;
     if (dd == lddmc_true) return mtbdd_true;
@@ -266,11 +311,11 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
 
     MTBDD result;
     /* note: assumptions */
-    if (cache_get4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, &result)) return result;
+    if (cache_get4(bdd_from_ldd_rel_id, dd, bits_dd, firstvar, meta, &result)) return result;
 
     const mddnode_t n = LDD_GETNODE(dd);
     const mddnode_t nmeta = LDD_GETNODE(meta);
-    const mddnode_t nbits = LDD_GETNODE(bits_mdd);
+    const mddnode_t nbits = LDD_GETNODE(bits_dd);
     const int bits = (int)mddnode_getvalue(nbits);
 
     const uint32_t vmeta = mddnode_getvalue(nmeta);
@@ -285,10 +330,10 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
         assert(mddnode_getright(n) != mtbdd_true);
 
         /* spawn right */
-        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
+        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
 
         /* compute down with same bits / firstvar */
-        MTBDD down = bdd_from_ldd_rel(mddnode_getdown(n), bits_mdd, firstvar, mddnode_getdown(nmeta));
+        MTBDD down = bdd_from_ldd_rel(mddnode_getdown(n), bits_dd, firstvar, mddnode_getdown(nmeta));
         mtbdd_refs_push(down);
 
         /* encode read value */
@@ -319,7 +364,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
 
         /* spawn right */
         assert(mddnode_getright(n) != mtbdd_true);
-        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
+        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
 
         /* get recursive result */
         MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
@@ -358,7 +403,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
         assert(!mddnode_getcopy(n));  // do not process read copy nodes
 
         /* spawn right */
-        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_mdd, firstvar, meta));
+        mtbdd_refs_spawn(SPAWN(bdd_from_ldd_rel, mddnode_getright(n), bits_dd, firstvar, meta));
 
         /* get recursive result */
         MTBDD down = CALL(bdd_from_ldd_rel, mddnode_getdown(n), mddnode_getdown(nbits), firstvar + 2*bits, mddnode_getdown(nmeta));
@@ -402,7 +447,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
         assert(vmeta <= 5);
     }
 
-    cache_put4(bdd_from_ldd_rel_id, dd, bits_mdd, firstvar, meta, result);
+    cache_put4(bdd_from_ldd_rel_id, dd, bits_dd, firstvar, meta, result);
 
     return result;
 }
@@ -411,7 +456,7 @@ TASK_4(MTBDD, bdd_from_ldd_rel, MDD, dd, MDD, bits_mdd, uint32_t, firstvar, MDD,
  * Compute the BDD equivalent of the meta variable (to a variables cube)
  */
 MTBDD
-meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
+meta_to_bdd(MDD meta, MDD bits_dd, uint32_t firstvar)
 {
     if (meta == lddmc_false || meta == lddmc_true) return mtbdd_true;
 
@@ -430,10 +475,10 @@ meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
     
     if (vmeta == 1) {
         /* return recursive result, don't go down on bits */
-        return meta_to_bdd(mddnode_getdown(nmeta), bits_mdd, firstvar);
+        return meta_to_bdd(mddnode_getdown(nmeta), bits_dd, firstvar);
     }
 
-    const mddnode_t nbits = LDD_GETNODE(bits_mdd);
+    const mddnode_t nbits = LDD_GETNODE(bits_dd);
     const int bits = (int)mddnode_getvalue(nbits);
 
     /* compute recursive result */
@@ -450,16 +495,6 @@ meta_to_bdd(MDD meta, MDD bits_mdd, uint32_t firstvar)
     return res;
 }
 
-static char*
-to_h(double size, char *buf)
-{
-    const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
-    int i = 0;
-    for (;size>1024;size/=1024) i++;
-    sprintf(buf, "%.*f %s", i, size, units[i]);
-    return buf;
-}
-
 VOID_TASK_0(gc_start)
 {
     printf("Starting garbage collection\n");
@@ -475,37 +510,13 @@ main(int argc, char **argv)
 {
     argp_parse(&argp, argc, argv, 0, 0, 0);
 
-    // Parse table sizes
-    int tablesize, maxtablesize, cachesize, maxcachesize;
-    if (sscanf(sizes, "%d,%d,%d,%d", &tablesize, &maxtablesize, &cachesize, &maxcachesize) != 4) {
-        Abort("Invalid string for --table-sizes, try e.g. --table-sizes=23,28,22,27");
-    }
-    if (tablesize < 10 || maxtablesize < 10 || cachesize < 10 || maxcachesize < 10 ||
-            tablesize > 40 || maxtablesize > 40 || cachesize > 40 || maxcachesize > 40) {
-        Abort("Invalid string for --table-sizes, must be between 10 and 40");
-    }
-    if (tablesize > maxtablesize) {
-        Abort("Invalid string for --table-sizes, tablesize is larger than maxtablesize");
-    }
-    if (cachesize > maxcachesize) {
-        Abort("Invalid string for --table-sizes, cachesize is larger than maxcachesize");
-    }
-
-    // Report table sizes
-    char buf[32];
-    to_h((1ULL<<maxtablesize)*24+(1ULL<<maxcachesize)*36, buf);
-    printf("Sylvan allocates %s virtual memory for nodes table and operation cache.\n", buf);
-    to_h((1ULL<<tablesize)*24+(1ULL<<cachesize)*36, buf);
-    printf("Initial nodes table and operation cache requires %s.\n", buf);
-
     // Init Lace
     lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
     lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
-
     LACE_ME;
 
     // Init Sylvan
-    sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
+    sylvan_set_limits(1LL<<30, 1, 10);
     sylvan_init_package();
     sylvan_init_ldd();
     sylvan_init_mtbdd();
@@ -523,34 +534,20 @@ main(int argc, char **argv)
     if (f == NULL) Abort("Cannot open file '%s'!\n", model_filename);
 
     // Read integers per vector
-    if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
+    if (fread(&vector_size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
 
     // Read initial state
-    if (verbose) {
-        printf("Loading initial state... ");
-        fflush(stdout);
-    }
+    if (verbose) printf("Loading initial state.\n");
     set_t initial = set_load(f);
-    if (verbose) printf("done.\n");
 
     // Read number of transitions
     if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
     next = (rel_t*)malloc(sizeof(rel_t) * next_count);
 
     // Read transitions
-    if (verbose) {
-        printf("Loading transition relations... ");
-        fflush(stdout);
-    }
-    int i;
-    for (i=0; i<next_count; i++) {
-        next[i] = rel_load(f);
-        if (verbose) {
-            printf("%d, ", i);
-            fflush(stdout);
-        }
-    }
-    if (verbose) printf("done.\n");
+    if (verbose) printf("Loading transition relations.\n");
+    for (int i=0; i<next_count; i++) next[i] = rel_load_proj(f);
+    for (int i=0; i<next_count; i++) rel_load(f, next[i]);
 
     // Read whether reachable states are stored
     int has_reachable = 0;
@@ -558,16 +555,13 @@ main(int argc, char **argv)
     if (has_reachable == 0) Abort("Input file missing reachable states!\n");
 
     // Read reachable states
-    if (verbose) {
-        printf("Loading reachable states... ");
-        fflush(stdout);
-    }
+    if (verbose) printf("Loading reachable states.\n");
     set_t states = set_load(f);
-    if (verbose) printf("done.\n");
     
     // Read number of action labels
     int action_labels_count = 0;
-    if (fread(&action_labels_count, sizeof(int), 1, f) != 1) Abort("Input file missing action label count!\n");
+    if (fread(&action_labels_count, sizeof(int), 1, f) != 1) action_labels_count = 0;
+    // ignore: Abort("Input file missing action label count!\n");
 
     // Read action labels
     char *action_labels[action_labels_count];
@@ -587,11 +581,11 @@ main(int argc, char **argv)
 
     // Report statistics
     if (verbose) {
-        printf("%zu integers per state, %d transition groups\n", vector_size, next_count);
+        printf("%d integers per state, %d transition groups\n", vector_size, next_count);
         printf("LDD nodes:\n");
-        printf("Initial states: %zu LDD nodes\n", lddmc_nodecount(initial->mdd));
-        for (i=0; i<next_count; i++) {
-            printf("Transition %d: %zu LDD nodes\n", i, lddmc_nodecount(next[i]->mdd));
+        printf("Initial states: %zu LDD nodes\n", lddmc_nodecount(initial->dd));
+        for (int i=0; i<next_count; i++) {
+            printf("Transition %d: %zu LDD nodes\n", i, lddmc_nodecount(next[i]->dd));
         }
     }
 
@@ -600,28 +594,18 @@ main(int argc, char **argv)
 
     // Compute highest value at each level (from reachable states)
     uint32_t highest[vector_size];
-    for (size_t i=0; i<vector_size; i++) highest[i] = 0;
-    compute_highest(states->mdd, highest);
+    for (int i=0; i<vector_size; i++) highest[i] = 0;
+    compute_highest(states->dd, highest);
 
     // Compute highest action label value (from transition relations)
     uint32_t highest_action = 0;
     for (int i=0; i<next_count; i++) {
-        compute_highest_action(next[i]->mdd, next[i]->meta, &highest_action);
-    }
-
-    // Report highest integers
-    /*
-    printf("Highest integer per level: ");
-    for (size_t i=0; i<vector_size; i++) {
-        if (i>0) printf(", ");
-        printf("%u", highest[i]);
+        compute_highest_action(next[i]->dd, next[i]->meta, &highest_action);
     }
-    printf("\n");
-    */
 
     // Compute number of bits for each level
     int bits[vector_size];
-    for (size_t i=0; i<vector_size; i++) {
+    for (int i=0; i<vector_size; i++) {
         bits[i] = 0;
         while (highest[i] != 0) {
             bits[i]++;
@@ -641,7 +625,7 @@ main(int argc, char **argv)
     // Report number of bits
     if (verbose) {
         printf("Bits per level: ");
-        for (size_t i=0; i<vector_size; i++) {
+        for (int i=0; i<vector_size; i++) {
             if (i>0) printf(", ");
             printf("%d", bits[i]);
         }
@@ -650,15 +634,15 @@ main(int argc, char **argv)
     }
 
     // Compute bits MDD
-    MDD bits_mdd = lddmc_true;
-    for (size_t i=0; i<vector_size; i++) {
-        bits_mdd = lddmc_makenode(bits[vector_size-i-1], bits_mdd, lddmc_false);
+    MDD bits_dd = lddmc_true;
+    for (int i=0; i<vector_size; i++) {
+        bits_dd = lddmc_makenode(bits[vector_size-i-1], bits_dd, lddmc_false);
     }
-    lddmc_ref(bits_mdd);
+    lddmc_ref(bits_dd);
 
     // Compute total number of bits
     int totalbits = 0;
-    for (size_t i=0; i<vector_size; i++) {
+    for (int i=0; i<vector_size; i++) {
         totalbits += bits[i];
     }
 
@@ -677,28 +661,23 @@ main(int argc, char **argv)
     if (f == NULL) Abort("Cannot open file '%s'!\n", bdd_filename);
 
     // Write domain...
-    int vector_size = 1;
-    fwrite(&totalbits, sizeof(int), 1, f);  // use number of bits as vector size
-    fwrite(&vector_size, sizeof(int), 1, f);  // set each to 1
+    fwrite(&vector_size, sizeof(int), 1, f);
+    fwrite(bits, sizeof(int), vector_size, f);
     fwrite(&actionbits, sizeof(int), 1, f);
 
     // Write initial state...
-    MTBDD new_initial = bdd_from_ldd(initial->mdd, bits_mdd, 0);
-    assert((size_t)mtbdd_satcount(new_initial, totalbits) == (size_t)lddmc_satcount_cached(initial->mdd));
+    MTBDD new_initial = bdd_from_ldd(initial->dd, bits_dd, 0);
+    assert((size_t)mtbdd_satcount(new_initial, totalbits) == (size_t)lddmc_satcount_cached(initial->dd));
     mtbdd_refs_push(new_initial);
     {
-        size_t a = sylvan_serialize_add(new_initial);
-        size_t b = sylvan_serialize_add(state_vars);
-        size_t s = totalbits;
-        sylvan_serialize_tofile(f);
-        fwrite(&a, sizeof(size_t), 1, f);
-        fwrite(&s, sizeof(size_t), 1, f);
-        fwrite(&b, sizeof(size_t), 1, f);
+        int k = -1;
+        fwrite(&k, sizeof(int), 1, f);
+        mtbdd_writer_tobinary(f, &new_initial, 1);
     }
 
     // Custom operation that converts to BDD given number of bits for each level
-    MTBDD new_states = bdd_from_ldd(states->mdd, bits_mdd, 0);
-    assert((size_t)mtbdd_satcount(new_states, totalbits) == (size_t)lddmc_satcount_cached(states->mdd));
+    MTBDD new_states = bdd_from_ldd(states->dd, bits_dd, 0);
+    assert((size_t)mtbdd_satcount(new_states, totalbits) == (size_t)lddmc_satcount_cached(states->dd));
     mtbdd_refs_push(new_states);
 
     // Report size of BDD
@@ -710,51 +689,52 @@ main(int argc, char **argv)
     // Write number of transitions
     fwrite(&next_count, sizeof(int), 1, f);
 
-    // Write transitions
+    // Write meta for each transition
+    for (int i=0; i<next_count; i++) {
+        fwrite(&next[i]->r_k, sizeof(int), 1, f);
+        fwrite(&next[i]->w_k, sizeof(int), 1, f);
+        fwrite(next[i]->r_proj, sizeof(int), next[i]->r_k, f);
+        fwrite(next[i]->w_proj, sizeof(int), next[i]->w_k, f);
+    }
+
+    // Write BDD for each transition
     for (int i=0; i<next_count; i++) {
         // Compute new transition relation
-        MTBDD new_rel = bdd_from_ldd_rel(next[i]->mdd, bits_mdd, 0, next[i]->meta);
+        MTBDD new_rel = bdd_from_ldd_rel(next[i]->dd, bits_dd, 0, next[i]->meta);
         mtbdd_refs_push(new_rel);
+        mtbdd_writer_tobinary(f, &new_rel, 1);
 
-        // Compute new <variables> for the current transition relation
-        MTBDD new_vars = meta_to_bdd(next[i]->meta, bits_mdd, 0);
-        mtbdd_refs_push(new_vars);
+        // Report number of nodes
+        if (verbose) printf("Transition %d: %zu BDD nodes\n", i, mtbdd_nodecount(new_rel));
 
         if (check_results) {
+            // Compute new <variables> for the current transition relation
+            MTBDD new_vars = meta_to_bdd(next[i]->meta, bits_dd, 0);
+            mtbdd_refs_push(new_vars);
+
             // Test if the transition is correctly converted
             MTBDD test = sylvan_relnext(new_states, new_rel, new_vars);
             mtbdd_refs_push(test);
-            MDD succ = lddmc_relprod(states->mdd, next[i]->mdd, next[i]->meta);
+            MDD succ = lddmc_relprod(states->dd, next[i]->dd, next[i]->meta);
             lddmc_refs_push(succ);
-            MTBDD test2 = bdd_from_ldd(succ, bits_mdd, 0);
+            MTBDD test2 = bdd_from_ldd(succ, bits_dd, 0);
             if (test != test2) Abort("Conversion error!\n");
-            mtbdd_refs_pop(1);
             lddmc_refs_pop(1);
+            mtbdd_refs_pop(2);
         }
 
-        // Report number of nodes
-        if (verbose) printf("Transition %d: %zu BDD nodes\n", i, mtbdd_nodecount(new_rel));
-
-        size_t a = sylvan_serialize_add(new_rel);
-        size_t b = sylvan_serialize_add(new_vars);
-        sylvan_serialize_tofile(f);
-        fwrite(&a, sizeof(size_t), 1, f);
-        fwrite(&b, sizeof(size_t), 1, f);
+        mtbdd_refs_pop(1);
     }
 
     // Write reachable states
-    has_reachable = 1;
+    if (no_reachable) has_reachable = 0;
     fwrite(&has_reachable, sizeof(int), 1, f);
-
-    {
-        size_t a = sylvan_serialize_add(new_states);
-        size_t b = sylvan_serialize_add(state_vars);
-        size_t s = totalbits;
-        sylvan_serialize_tofile(f);
-        fwrite(&a, sizeof(size_t), 1, f);
-        fwrite(&s, sizeof(size_t), 1, f);
-        fwrite(&b, sizeof(size_t), 1, f);
+    if (has_reachable) {
+        int k = -1;
+        fwrite(&k, sizeof(int), 1, f);
+        mtbdd_writer_tobinary(f, &new_states, 1);
     }
+    mtbdd_refs_pop(1);  // new_states
 
     // Write action labels
     fwrite(&action_labels_count, sizeof(int), 1, f);
diff --git a/resources/3rdparty/sylvan/examples/lddmc.c b/resources/3rdparty/sylvan/examples/lddmc.c
index 0035420c1..52fdb5033 100755
--- a/resources/3rdparty/sylvan/examples/lddmc.c
+++ b/resources/3rdparty/sylvan/examples/lddmc.c
@@ -1,6 +1,6 @@
 #include <argp.h>
-#include <assert.h>
 #include <inttypes.h>
+#include <locale.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -11,18 +11,19 @@
 #endif
 
 #include <getrss.h>
-#include <sylvan.h>
-#include <sylvan_table.h>
 
-/* Configuration */
+#include <sylvan_int.h>
+
+/* Configuration (via argp) */
 static int report_levels = 0; // report states at start of every level
 static int report_table = 0; // report table size at end of every level
-static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy
-static int check_deadlocks = 0; // set to 1 to check for deadlocks
-static int print_transition_matrix = 1; // print transition relation matrix
+static int report_nodes = 0; // report number of nodes of LDDs
+static int strategy = 2; // 0 = BFS, 1 = PAR, 2 = SAT, 3 = CHAINING
+static int check_deadlocks = 0; // set to 1 to check for deadlocks on-the-fly
+static int print_transition_matrix = 0; // print transition relation matrix
 static int workers = 0; // autodetect
 static char* model_filename = NULL; // filename of model
-static char* out_filename = NULL; // filename of output BDD
+static char* out_filename = NULL; // filename of output
 #ifdef HAVE_PROFILER
 static char* profile_filename = NULL; // filename for profiling
 #endif
@@ -31,13 +32,15 @@ static char* profile_filename = NULL; // filename for profiling
 static struct argp_option options[] =
 {
     {"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
-    {"strategy", 's', "<bfs|par|sat>", 0, "Strategy for reachability (default=par)", 0},
+    {"strategy", 's', "<bfs|par|sat|chaining>", 0, "Strategy for reachability (default=par)", 0},
 #ifdef HAVE_PROFILER
     {"profiler", 'p', "<filename>", 0, "Filename for profiling", 0},
 #endif
     {"deadlocks", 3, 0, 0, "Check for deadlocks", 1},
+    {"count-nodes", 5, 0, 0, "Report #nodes for LDDs", 1},
     {"count-states", 1, 0, 0, "Report #states at each level", 1},
     {"count-table", 2, 0, 0, "Report table usage at each level", 1},
+    {"print-matrix", 4, 0, 0, "Print transition matrix", 1},
     {0, 0, 0, 0, 0, 0}
 };
 
@@ -52,8 +55,12 @@ parse_opt(int key, char *arg, struct argp_state *state)
         if (strcmp(arg, "bfs")==0) strategy = 0;
         else if (strcmp(arg, "par")==0) strategy = 1;
         else if (strcmp(arg, "sat")==0) strategy = 2;
+        else if (strcmp(arg, "chaining")==0) strategy = 3;
         else argp_usage(state);
         break;
+    case 4:
+        print_transition_matrix = 1;
+        break;
     case 3:
         check_deadlocks = 1;
         break;
@@ -63,6 +70,9 @@ parse_opt(int key, char *arg, struct argp_state *state)
     case 2:
         report_table = 1;
         break;
+    case 5:
+        report_nodes = 1;
+        break;
 #ifdef HAVE_PROFILER
     case 'p':
         profile_filename = arg;
@@ -84,119 +94,214 @@ parse_opt(int key, char *arg, struct argp_state *state)
 
 static struct argp argp = { options, parse_opt, "<model> [<output-bdd>]", 0, 0, 0, 0 };
 
-/* Globals */
+/**
+ * Types (set and relation)
+ */
 typedef struct set
 {
-    MDD mdd;
-    MDD proj;
-    int size;
+    MDD dd;
 } *set_t;
 
 typedef struct relation
 {
-    MDD mdd;
-    MDD meta;
-    int size;
+    MDD dd;
+    MDD meta; // for relprod
+    int r_k, w_k, *r_proj, *w_proj;
+    int firstvar; // for saturation/chaining
+    MDD topmeta; // for saturation
 } *rel_t;
 
-static size_t vector_size; // size of vector
+static int vector_size; // size of vector in integers
 static int next_count; // number of partitions of the transition relation
 static rel_t *next; // each partition of the transition relation
 
-#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
+/**
+ * Obtain current wallclock time
+ */
+static double
+wctime()
+{
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    return (tv.tv_sec + 1E-6 * tv.tv_usec);
+}
+
+static double t_start;
+#define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__)
+#define Abort(...) { fprintf(stderr, __VA_ARGS__); fprintf(stderr, "Abort at line %d!\n", __LINE__); exit(-1); }
 
-/* Load a set from file */
+/**
+ * Load a set from file
+ */
 static set_t
 set_load(FILE* f)
 {
-    lddmc_serialize_fromfile(f);
-
-    size_t mdd;
-    size_t proj;
-    int size;
-
-    if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&proj, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+    set_t set = (set_t)malloc(sizeof(struct set));
 
-    LACE_ME;
+    /* read projection (actually we don't support projection) */
+    int k;
+    if (fread(&k, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+    if (k != -1) Abort("Invalid input file!\n"); // only support full vector
 
-    set_t set = (set_t)malloc(sizeof(struct set));
-    set->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
-    set->proj = lddmc_ref(lddmc_serialize_get_reversed(proj));
-    set->size = size;
+    /* read dd */
+    lddmc_serialize_fromfile(f);
+    size_t dd;
+    if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
+    set->dd = lddmc_serialize_get_reversed(dd);
+    lddmc_protect(&set->dd);
 
     return set;
 }
 
-/* Save a set to file */
+/**
+ * Save a set to file
+ */
 static void
 set_save(FILE* f, set_t set)
 {
-    size_t mdd = lddmc_serialize_add(set->mdd);
-    size_t proj = lddmc_serialize_add(set->proj);
+    int k = -1;
+    fwrite(&k, sizeof(int), 1, f);
+    size_t dd = lddmc_serialize_add(set->dd);
     lddmc_serialize_tofile(f);
-    fwrite(&mdd, sizeof(size_t), 1, f);
-    fwrite(&proj, sizeof(size_t), 1, f);
-    fwrite(&set->size, sizeof(int), 1, f);;
+    fwrite(&dd, sizeof(size_t), 1, f);
+}
+
+/**
+ * Load a relation from file
+ */
+#define rel_load_proj(f) CALL(rel_load_proj, f)
+TASK_1(rel_t, rel_load_proj, FILE*, f)
+{
+    int r_k, w_k;
+    if (fread(&r_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
+    if (fread(&w_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
+
+    rel_t rel = (rel_t)malloc(sizeof(struct relation));
+    rel->r_k = r_k;
+    rel->w_k = w_k;
+    rel->r_proj = (int*)malloc(sizeof(int[rel->r_k]));
+    rel->w_proj = (int*)malloc(sizeof(int[rel->w_k]));
+
+    if (fread(rel->r_proj, sizeof(int), rel->r_k, f) != (size_t)rel->r_k) Abort("Invalid file format.");
+    if (fread(rel->w_proj, sizeof(int), rel->w_k, f) != (size_t)rel->w_k) Abort("Invalid file format.");
+
+    int *r_proj = rel->r_proj;
+    int *w_proj = rel->w_proj;
+
+    rel->firstvar = -1;
+
+    /* Compute the meta */
+    uint32_t meta[vector_size*2+2];
+    memset(meta, 0, sizeof(uint32_t[vector_size*2+2]));
+    int r_i=0, w_i=0, i=0, j=0;
+    for (;;) {
+        int type = 0;
+        if (r_i < r_k && r_proj[r_i] == i) {
+            r_i++;
+            type += 1; // read
+        }
+        if (w_i < w_k && w_proj[w_i] == i) {
+            w_i++;
+            type += 2; // write
+        }
+        if (type == 0) meta[j++] = 0;
+        else if (type == 1) { meta[j++] = 3; }
+        else if (type == 2) { meta[j++] = 4; }
+        else if (type == 3) { meta[j++] = 1; meta[j++] = 2; }
+        if (type != 0 && rel->firstvar == -1) rel->firstvar = i;
+        if (r_i == r_k && w_i == w_k) {
+            meta[j++] = 5; // action label
+            meta[j++] = (uint32_t)-1;
+            break;
+        }
+        i++;
+    }
+
+    rel->meta = lddmc_cube((uint32_t*)meta, j);
+    lddmc_protect(&rel->meta);
+    if (rel->firstvar != -1) {
+        rel->topmeta = lddmc_cube((uint32_t*)meta+rel->firstvar, j-rel->firstvar);
+        lddmc_protect(&rel->topmeta);
+    }
+    rel->dd = lddmc_false;
+    lddmc_protect(&rel->dd);
+
+    return rel;
+}
+
+#define rel_load(f, rel) CALL(rel_load, f, rel)
+VOID_TASK_2(rel_load, FILE*, f, rel_t, rel)
+{
+    lddmc_serialize_fromfile(f);
+    size_t dd;
+    if (fread(&dd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!");
+    rel->dd = lddmc_serialize_get_reversed(dd);
+}
+
+/**
+ * Save a relation to file
+ */
+static void
+rel_save_proj(FILE* f, rel_t rel)
+{
+    fwrite(&rel->r_k, sizeof(int), 1, f);
+    fwrite(&rel->w_k, sizeof(int), 1, f);
+    fwrite(rel->r_proj, sizeof(int), rel->r_k, f);
+    fwrite(rel->w_proj, sizeof(int), rel->w_k, f);
 }
 
 static void
 rel_save(FILE* f, rel_t rel)
 {
-    size_t mdd = lddmc_serialize_add(rel->mdd);
-    size_t meta = lddmc_serialize_add(rel->meta);
+    size_t dd = lddmc_serialize_add(rel->dd);
     lddmc_serialize_tofile(f);
-    fwrite(&mdd, sizeof(size_t), 1, f);
-    fwrite(&meta, sizeof(size_t), 1, f);
+    fwrite(&dd, sizeof(size_t), 1, f);
 }
 
+/**
+ * Clone a set
+ */
 static set_t
 set_clone(set_t source)
 {
     set_t set = (set_t)malloc(sizeof(struct set));
-    set->mdd = lddmc_ref(source->mdd);
-    set->proj = lddmc_ref(source->proj);
-    set->size = source->size;
+    set->dd = source->dd;
+    lddmc_protect(&set->dd);
     return set;
 }
 
-static int
-calculate_size(MDD meta)
+static char*
+to_h(double size, char *buf)
 {
-    int result = 0;
-    uint32_t val = lddmc_getvalue(meta);
-    while (val != (uint32_t)-1) {
-        if (val != 0) result += 1;
-        meta = lddmc_follow(meta, val);
-        assert(meta != lddmc_true && meta != lddmc_false);
-        val = lddmc_getvalue(meta);
-    }
-    return result;
+    const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
+    int i = 0;
+    for (;size>1024;size/=1024) i++;
+    sprintf(buf, "%.*f %s", i, size, units[i]);
+    return buf;
 }
 
-/* Load a relation from file */
-static rel_t
-rel_load(FILE* f)
+static void
+print_memory_usage(void)
 {
-    lddmc_serialize_fromfile(f);
-
-    size_t mdd;
-    size_t meta;
-
-    if (fread(&mdd, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-    if (fread(&meta, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-
-    LACE_ME;
-
-    rel_t rel = (rel_t)malloc(sizeof(struct relation));
-    rel->mdd = lddmc_ref(lddmc_serialize_get_reversed(mdd));
-    rel->meta = lddmc_ref(lddmc_serialize_get_reversed(meta));
-    rel->size = calculate_size(rel->meta);
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("Memory usage: %s\n", buf);
+}
 
-    return rel;
+/**
+ * Get the first variable of the transition relation
+ */
+static int
+get_first(MDD meta)
+{
+    uint32_t val = lddmc_getvalue(meta);
+    if (val != 0) return 0;
+    return 1+get_first(lddmc_follow(meta, val));
 }
 
+/**
+ * Print a single example of a set to stdout
+ */
 static void
 print_example(MDD example)
 {
@@ -205,9 +310,8 @@ print_example(MDD example)
         uint32_t vec[vector_size];
         lddmc_sat_one(example, vec, vector_size);
 
-        size_t i;
         printf("[");
-        for (i=0; i<vector_size; i++) {
+        for (int i=0; i<vector_size; i++) {
             if (i>0) printf(",");
             printf("%" PRIu32, vec[i]);
         }
@@ -232,154 +336,176 @@ print_matrix(size_t size, MDD meta)
     }
 }
 
-static char*
-to_h(double size, char *buf)
-{
-    const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
-    int i = 0;
-    for (;size>1024;size/=1024) i++;
-    sprintf(buf, "%.*f %s", i, size, units[i]);
-    return buf;
-}
-
-static int
-get_first(MDD meta)
-{
-    uint32_t val = lddmc_getvalue(meta);
-    if (val != 0) return 0;
-    return 1+get_first(lddmc_follow(meta, val));
-}
-
-/* Straight-forward implementation of parallel reduction */
+/**
+ * Implement parallel strategy (that performs the relprod operations in parallel)
+ */
 TASK_5(MDD, go_par, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks)
 {
     if (len == 1) {
         // Calculate NEW successors (not in visited)
-        MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta));
+        MDD succ = lddmc_relprod(cur, next[from]->dd, next[from]->meta);
+        lddmc_refs_push(succ);
         if (deadlocks) {
             // check which MDDs in deadlocks do not have a successor in this relation
-            MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur));
-            *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc));
-            lddmc_deref(anc);
+            MDD anc = lddmc_relprev(succ, next[from]->dd, next[from]->meta, cur);
+            lddmc_refs_push(anc);
+            *deadlocks = lddmc_minus(*deadlocks, anc);
+            lddmc_refs_pop(1);
         }
-        MDD result = lddmc_ref(lddmc_minus(succ, visited));
-        lddmc_deref(succ);
+        MDD result = lddmc_minus(succ, visited);
+        lddmc_refs_pop(1);
         return result;
-    } else {
-        MDD deadlocks_left;
-        MDD deadlocks_right;
-        if (deadlocks) {
-            deadlocks_left = *deadlocks;
-            deadlocks_right = *deadlocks;
-        }
-
-        // Recursively calculate left+right
-        SPAWN(go_par, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left: NULL);
-        MDD right = CALL(go_par, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL);
-        MDD left = SYNC(go_par);
+    } else if (deadlocks != NULL) {
+        MDD deadlocks_left = *deadlocks;
+        MDD deadlocks_right = *deadlocks;
+        lddmc_refs_pushptr(&deadlocks_left);
+        lddmc_refs_pushptr(&deadlocks_right);
+
+        // Recursively compute left+right
+        lddmc_refs_spawn(SPAWN(go_par, cur, visited, from, len/2, &deadlocks_left));
+        MDD right = CALL(go_par, cur, visited, from+len/2, len-len/2, &deadlocks_right);
+        lddmc_refs_push(right);
+        MDD left = lddmc_refs_sync(SYNC(go_par));
+        lddmc_refs_push(left);
 
         // Merge results of left+right
-        MDD result = lddmc_ref(lddmc_union(left, right));
-        lddmc_deref(left);
-        lddmc_deref(right);
+        MDD result = lddmc_union(left, right);
+        lddmc_refs_pop(2);
 
-        if (deadlocks) {
-            *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right));
-            lddmc_deref(deadlocks_left);
-            lddmc_deref(deadlocks_right);
-        }
+        // Intersect deadlock sets
+        lddmc_refs_push(result);
+        *deadlocks = lddmc_intersect(deadlocks_left, deadlocks_right);
+        lddmc_refs_pop(1);
+        lddmc_refs_popptr(2);
+
+        // Return result
+        return result;
+    } else {
+        // Recursively compute left+right
+        lddmc_refs_spawn(SPAWN(go_par, cur, visited, from, len/2, NULL));
+        MDD right = CALL(go_par, cur, visited, from+len/2, len-len/2, NULL);
+        lddmc_refs_push(right);
+        MDD left = lddmc_refs_sync(SYNC(go_par));
+        lddmc_refs_push(left);
 
+        // Merge results of left+right
+        MDD result = lddmc_union(left, right);
+        lddmc_refs_pop(2);
+
+        // Return result
         return result;
     }
 }
 
-/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */
+/**
+ * Implementation of the PAR strategy
+ */
 VOID_TASK_1(par, set_t, set)
 {
-    MDD visited = set->mdd;
-    MDD new = lddmc_ref(visited);
-    size_t counter = 1;
-    do {
-        char buf[32];
-        to_h(getCurrentRSS(), buf);
-        printf("Memory usage: %s\n", buf);
-        printf("Level %zu... ", counter++);
-        if (report_levels) {
-            printf("%zu states... ", (size_t)lddmc_satcount_cached(visited));
-        }
-        fflush(stdout);
-
-        // calculate successors in parallel
-        MDD cur = new;
-        MDD deadlocks = cur;
-        new = CALL(go_par, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL);
-        lddmc_deref(cur);
+    /* Prepare variables */
+    MDD visited = set->dd;
+    MDD front = visited;
+    lddmc_refs_pushptr(&visited);
+    lddmc_refs_pushptr(&front);
 
+    int iteration = 1;
+    do {
         if (check_deadlocks) {
-            printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks));
+            // compute successors in parallel
+            MDD deadlocks = front;
+            lddmc_refs_pushptr(&deadlocks);
+            front = CALL(go_par, front, visited, 0, next_count, &deadlocks);
+            lddmc_refs_popptr(1);
+
             if (deadlocks != lddmc_false) {
+                INFO("Found %'0.0f deadlock states... ", lddmc_satcount_cached(deadlocks));
                 printf("example: ");
                 print_example(deadlocks);
-                printf("... ");
+                printf("\n");
                 check_deadlocks = 0;
             }
+        } else {
+            // compute successors in parallel
+            front = CALL(go_par, front, visited, 0, next_count, NULL);
         }
 
-        // visited = visited + new
-        MDD old_visited = visited;
-        visited = lddmc_ref(lddmc_union(visited, new));
-        lddmc_deref(old_visited);
+        // visited = visited + front
+        visited = lddmc_union(visited, front);
 
+        INFO("Level %d done", iteration);
+        if (report_levels) {
+            printf(", %'0.0f states explored", lddmc_satcount_cached(visited));
+        }
         if (report_table) {
             size_t filled, total;
             sylvan_table_usage(&filled, &total);
-            printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled);
-        } else {
-            printf("done.\n");
+            printf(", table: %0.1f%% full (%'zu nodes)", 100.0*(double)filled/total, filled);
         }
-    } while (new != lddmc_false);
-    lddmc_deref(new);
-    set->mdd = visited;
+        char buf[32];
+        to_h(getCurrentRSS(), buf);
+        printf(", rss=%s.\n", buf);
+        iteration++;
+    } while (front != lddmc_false);
+
+    set->dd = visited;
+    lddmc_refs_popptr(2);
 }
 
-/* Sequential version of merge-reduction */
+/**
+ * Implement sequential strategy (that performs the relprod operations one by one)
+ */
 TASK_5(MDD, go_bfs, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, deadlocks)
 {
     if (len == 1) {
         // Calculate NEW successors (not in visited)
-        MDD succ = lddmc_ref(lddmc_relprod(cur, next[from]->mdd, next[from]->meta));
+        MDD succ = lddmc_relprod(cur, next[from]->dd, next[from]->meta);
+        lddmc_refs_push(succ);
         if (deadlocks) {
             // check which MDDs in deadlocks do not have a successor in this relation
-            MDD anc = lddmc_ref(lddmc_relprev(succ, next[from]->mdd, next[from]->meta, cur));
-            *deadlocks = lddmc_ref(lddmc_minus(*deadlocks, anc));
-            lddmc_deref(anc);
+            MDD anc = lddmc_relprev(succ, next[from]->dd, next[from]->meta, cur);
+            lddmc_refs_push(anc);
+            *deadlocks = lddmc_minus(*deadlocks, anc);
+            lddmc_refs_pop(1);
         }
-        MDD result = lddmc_ref(lddmc_minus(succ, visited));
-        lddmc_deref(succ);
+        MDD result = lddmc_minus(succ, visited);
+        lddmc_refs_pop(1);
         return result;
-    } else {
-        MDD deadlocks_left;
-        MDD deadlocks_right;
-        if (deadlocks) {
-            deadlocks_left = *deadlocks;
-            deadlocks_right = *deadlocks;
-        }
-
-        // Recursively calculate left+right
-        MDD left = CALL(go_bfs, cur, visited, from, (len+1)/2, deadlocks ? &deadlocks_left : NULL);
-        MDD right = CALL(go_bfs, cur, visited, from+(len+1)/2, len/2, deadlocks ? &deadlocks_right : NULL);
+    } else if (deadlocks != NULL) {
+        MDD deadlocks_left = *deadlocks;
+        MDD deadlocks_right = *deadlocks;
+        lddmc_refs_pushptr(&deadlocks_left);
+        lddmc_refs_pushptr(&deadlocks_right);
+
+        // Recursively compute left+right
+        MDD left = CALL(go_par, cur, visited, from, len/2, &deadlocks_left);
+        lddmc_refs_push(left);
+        MDD right = CALL(go_par, cur, visited, from+len/2, len-len/2, &deadlocks_right);
+        lddmc_refs_push(right);
 
         // Merge results of left+right
-        MDD result = lddmc_ref(lddmc_union(left, right));
-        lddmc_deref(left);
-        lddmc_deref(right);
+        MDD result = lddmc_union(left, right);
+        lddmc_refs_pop(2);
 
-        if (deadlocks) {
-            *deadlocks = lddmc_ref(lddmc_intersect(deadlocks_left, deadlocks_right));
-            lddmc_deref(deadlocks_left);
-            lddmc_deref(deadlocks_right);
-        }
+        // Intersect deadlock sets
+        lddmc_refs_push(result);
+        *deadlocks = lddmc_intersect(deadlocks_left, deadlocks_right);
+        lddmc_refs_pop(1);
+        lddmc_refs_popptr(2);
 
+        // Return result
+        return result;
+    } else {
+        // Recursively compute left+right
+        MDD left = CALL(go_par, cur, visited, from, len/2, NULL);
+        lddmc_refs_push(left);
+        MDD right = CALL(go_par, cur, visited, from+len/2, len-len/2, NULL);
+        lddmc_refs_push(right);
+
+        // Merge results of left+right
+        MDD result = lddmc_union(left, right);
+        lddmc_refs_pop(2);
+
+        // Return result
         return result;
     }
 }
@@ -387,160 +513,330 @@ TASK_5(MDD, go_bfs, MDD, cur, MDD, visited, size_t, from, size_t, len, MDD*, dea
 /* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */
 VOID_TASK_1(bfs, set_t, set)
 {
-    MDD visited = set->mdd;
-    MDD new = lddmc_ref(visited);
-    size_t counter = 1;
-    do {
-        char buf[32];
-        to_h(getCurrentRSS(), buf);
-        printf("Memory usage: %s\n", buf);
-        printf("Level %zu... ", counter++);
-        if (report_levels) {
-            printf("%zu states... ", (size_t)lddmc_satcount_cached(visited));
-        }
-        fflush(stdout);
-
-        MDD cur = new;
-        MDD deadlocks = cur;
-        new = CALL(go_bfs, cur, visited, 0, next_count, check_deadlocks ? &deadlocks : NULL);
-        lddmc_deref(cur);
+    /* Prepare variables */
+    MDD visited = set->dd;
+    MDD front = visited;
+    lddmc_refs_pushptr(&visited);
+    lddmc_refs_pushptr(&front);
 
+    int iteration = 1;
+    do {
         if (check_deadlocks) {
-            printf("found %zu deadlock states... ", (size_t)lddmc_satcount_cached(deadlocks));
+            // compute successors
+            MDD deadlocks = front;
+            lddmc_refs_pushptr(&deadlocks);
+            front = CALL(go_bfs, front, visited, 0, next_count, &deadlocks);
+            lddmc_refs_popptr(1);
+
             if (deadlocks != lddmc_false) {
+                INFO("Found %'0.0f deadlock states... ", lddmc_satcount_cached(deadlocks));
                 printf("example: ");
                 print_example(deadlocks);
-                printf("... ");
+                printf("\n");
                 check_deadlocks = 0;
             }
+        } else {
+            // compute successors
+            front = CALL(go_bfs, front, visited, 0, next_count, NULL);
         }
 
-        // visited = visited + new
-        MDD old_visited = visited;
-        visited = lddmc_ref(lddmc_union(visited, new));
-        lddmc_deref(old_visited);
+        // visited = visited + front
+        visited = lddmc_union(visited, front);
 
+        INFO("Level %d done", iteration);
+        if (report_levels) {
+            printf(", %'0.0f states explored", lddmc_satcount_cached(visited));
+        }
         if (report_table) {
             size_t filled, total;
             sylvan_table_usage(&filled, &total);
-            printf("done, table: %0.1f%% full (%zu nodes).\n", 100.0*(double)filled/total, filled);
-        } else {
-            printf("done.\n");
+            printf(", table: %0.1f%% full (%'zu nodes)", 100.0*(double)filled/total, filled);
         }
-    } while (new != lddmc_false);
-    lddmc_deref(new);
-    set->mdd = visited;
+        char buf[32];
+        to_h(getCurrentRSS(), buf);
+        printf(", rss=%s.\n", buf);
+        iteration++;
+    } while (front != lddmc_false);
+
+    set->dd = visited;
+    lddmc_refs_popptr(2);
 }
 
-/* Obtain current wallclock time */
-static double
-wctime()
+/**
+ * Implementation of (parallel) saturation
+ * (assumes relations are ordered on first variable)
+ */
+TASK_3(MDD, go_sat, MDD, set, int, idx, int, depth)
 {
-    struct timeval tv;
-    gettimeofday(&tv, NULL);
-    return (tv.tv_sec + 1E-6 * tv.tv_usec);
+    /* Terminal cases */
+    if (set == lddmc_false) return lddmc_false;
+    if (idx == next_count) return set;
+
+    /* Consult the cache */
+    MDD result;
+    const MDD _set = set;
+    if (cache_get3(201LL<<40, _set, idx, 0, &result)) return result;
+    lddmc_refs_pushptr(&_set);
+
+    /**
+     * Possible improvement: cache more things (like intermediate results?)
+     *   and chain-apply more of the current level before going deeper?
+     */
+
+    /* Check if the relation should be applied */
+    const int var = next[idx]->firstvar;
+    assert(depth <= var);
+    if (depth == var) {
+        /* Count the number of relations starting here */
+        int n = 1;
+        while ((idx + n) < next_count && var == next[idx + n]->firstvar) n++;
+        /*
+         * Compute until fixpoint:
+         * - SAT deeper
+         * - chain-apply all current level once
+         */
+        MDD prev = lddmc_false;
+        lddmc_refs_pushptr(&set);
+        lddmc_refs_pushptr(&prev);
+        while (prev != set) {
+            prev = set;
+            // SAT deeper
+            set = CALL(go_sat, set, idx + n, depth);
+            // chain-apply all current level once
+            for (int i=0; i<n; i++) {
+                set = lddmc_relprod_union(set, next[idx+i]->dd, next[idx+i]->topmeta, set);
+            }
+        }
+        lddmc_refs_popptr(2);
+        result = set;
+    } else {
+        /* Recursive computation */
+        lddmc_refs_spawn(SPAWN(go_sat, lddmc_getright(set), idx, depth));
+        MDD down = lddmc_refs_push(CALL(go_sat, lddmc_getdown(set), idx, depth+1));
+        MDD right = lddmc_refs_sync(SYNC(go_sat));
+        lddmc_refs_pop(1);
+        result = lddmc_makenode(lddmc_getvalue(set), down, right);
+    }
+
+    /* Store in cache */
+    cache_put3(201LL<<40, _set, idx, 0, result);
+    lddmc_refs_popptr(1);
+    return result;
+}
+
+/**
+ * Wrapper for the Saturation strategy
+ */
+VOID_TASK_1(sat, set_t, set)
+{
+    set->dd = CALL(go_sat, set->dd, 0, 0);
+}
+
+/**
+ * Implementation of the Chaining strategy (does not support deadlock detection)
+ */
+VOID_TASK_1(chaining, set_t, set)
+{
+    MDD visited = set->dd;
+    MDD front = visited;
+    MDD succ = sylvan_false;
+
+    lddmc_refs_pushptr(&visited);
+    lddmc_refs_pushptr(&front);
+    lddmc_refs_pushptr(&succ);
+
+    int iteration = 1;
+    do {
+        // calculate successors in parallel
+        for (int i=0; i<next_count; i++) {
+            succ = lddmc_relprod(front, next[i]->dd, next[i]->meta);
+            front = lddmc_union(front, succ);
+            succ = lddmc_false; // reset, for gc
+        }
+
+        // front = front - visited
+        // visited = visited + front
+        front = lddmc_minus(front, visited);
+        visited = lddmc_union(visited, front);
+
+        INFO("Level %d done", iteration);
+        if (report_levels) {
+            printf(", %'0.0f states explored", lddmc_satcount_cached(visited));
+        }
+        if (report_table) {
+            size_t filled, total;
+            sylvan_table_usage(&filled, &total);
+            printf(", table: %0.1f%% full (%'zu nodes)", 100.0*(double)filled/total, filled);
+        }
+        char buf[32];
+        to_h(getCurrentRSS(), buf);
+        printf(", rss=%s.\n", buf);
+        iteration++;
+    } while (front != lddmc_false);
+
+    set->dd = visited;
+    lddmc_refs_popptr(3);
+}
+
+VOID_TASK_0(gc_start)
+{
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("(GC) Starting garbage collection... (rss: %s)\n", buf);
+}
+
+VOID_TASK_0(gc_end)
+{
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("(GC) Garbage collection done.       (rss: %s)\n", buf);
 }
 
 int
 main(int argc, char **argv)
 {
+    /**
+     * Parse command line, set locale, set startup time for INFO messages.
+     */
     argp_parse(&argp, argc, argv, 0, 0, 0);
+    setlocale(LC_NUMERIC, "en_US.utf-8");
+    t_start = wctime();
+
+    /**
+     * Initialize Lace.
+     *
+     * First: setup with given number of workers (0 for autodetect) and some large size task queue.
+     * Second: start all worker threads with default settings.
+     * Third: setup local variables using the LACE_ME macro.
+     */
+    lace_init(workers, 1000000);
+    lace_startup(0, NULL, NULL);
+    LACE_ME;
+
+    /**
+     * Initialize Sylvan.
+     *
+     * First: set memory limits
+     * - 2 GB memory, nodes table twice as big as cache, initial size halved 6x
+     *   (that means it takes 6 garbage collections to get to the maximum nodes&cache size)
+     * Second: initialize package and subpackages
+     * Third: add hooks to report garbage collection
+     */
+    sylvan_set_limits(2LL<<30, 1, 6);
+    sylvan_init_package();
+    sylvan_init_ldd();
+    sylvan_gc_hook_pregc(TASK(gc_start));
+    sylvan_gc_hook_postgc(TASK(gc_end));
+
+    /**
+     * Read the model from file
+     */
 
     FILE *f = fopen(model_filename, "r");
     if (f == NULL) {
-        fprintf(stderr, "Cannot open file '%s'!\n", model_filename);
+        Abort("Cannot open file '%s'!\n", model_filename);
         return -1;
     }
 
-    // Init Lace
-    lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
-    lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
-
-    // Init Sylvan LDDmc
-    // Nodes table size: 24 bytes * 2**N_nodes
-    // Cache table size: 36 bytes * 2**N_cache
-    // With: N_nodes=25, N_cache=24: 1.3 GB memory
-    sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
-    sylvan_init_package();
-    sylvan_init_ldd();
-    sylvan_init_mtbdd();
-
-    // Read and report domain info (integers per vector and bits per integer)
-    if (fread(&vector_size, sizeof(size_t), 1, f) != 1) Abort("Invalid input file!\n");
-
-    printf("Vector size: %zu\n", vector_size);
+    /* Read domain data */
+    if (fread(&vector_size, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
 
-    // Read initial state
-    printf("Loading initial state... ");
-    fflush(stdout);
+    /* Read initial state */
     set_t initial = set_load(f);
-    set_t states = set_clone(initial);
-    printf("done.\n");
 
-    // Read transitions
+    /* Read number of transition relations */
     if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
     next = (rel_t*)malloc(sizeof(rel_t) * next_count);
 
-    printf("Loading transition relations... ");
-    fflush(stdout);
-    int i;
-    for (i=0; i<next_count; i++) {
-        next[i] = rel_load(f);
-        printf("%d, ", i);
-        fflush(stdout);
-    }
+    /* Read transition relations */
+    for (int i=0; i<next_count; i++) next[i] = rel_load_proj(f);
+    for (int i=0; i<next_count; i++) rel_load(f, next[i]);
+
+    /* We ignore the reachable states and action labels that are stored after the relations */
+
+    /* Close the file */
     fclose(f);
-    printf("done.\n");
-
-    // Report statistics
-    printf("Read file '%s'\n", argv[1]);
-    printf("%zu integers per state, %d transition groups\n", vector_size, next_count);
-    printf("MDD nodes:\n");
-    printf("Initial states: %zu MDD nodes\n", lddmc_nodecount(states->mdd));
-    for (i=0; i<next_count; i++) {
-        printf("Transition %d: %zu MDD nodes\n", i, lddmc_nodecount(next[i]->mdd));
+
+    /**
+     * Pre-processing and some statistics reporting
+     */
+
+    if (strategy == 2 || strategy == 3) {
+        // for SAT and CHAINING, sort the transition relations (gnome sort because I like gnomes)
+        int i = 1, j = 2;
+        rel_t t;
+        while (i < next_count) {
+            rel_t *p = &next[i], *q = p-1;
+            if ((*q)->firstvar > (*p)->firstvar) {
+                t = *q;
+                *q = *p;
+                *p = t;
+                if (--i) continue;
+            }
+            i = j++;
+        }
     }
 
+    INFO("Read file '%s'\n", model_filename);
+    INFO("%d integers per state, %d transition groups\n", vector_size, next_count);
+
     if (print_transition_matrix) {
-        for (i=0; i<next_count; i++) {
+        for (int i=0; i<next_count; i++) {
+            INFO("");
             print_matrix(vector_size, next[i]->meta);
             printf(" (%d)\n", get_first(next[i]->meta));
         }
     }
 
-    LACE_ME;
+    set_t states = set_clone(initial);
 
 #ifdef HAVE_PROFILER
     if (profile_filename != NULL) ProfilerStart(profile_filename);
 #endif
-    if (strategy == 1) {
+
+    if (strategy == 0) {
+        double t1 = wctime();
+        CALL(bfs, states);
+        double t2 = wctime();
+        INFO("BFS Time: %f\n", t2-t1);
+    } else if (strategy == 1) {
         double t1 = wctime();
         CALL(par, states);
         double t2 = wctime();
-        printf("PAR Time: %f\n", t2-t1);
-    } else {
+        INFO("PAR Time: %f\n", t2-t1);
+    } else if (strategy == 2) {
         double t1 = wctime();
-        CALL(bfs, states);
+        CALL(sat, states);
         double t2 = wctime();
-        printf("BFS Time: %f\n", t2-t1);
+        INFO("SAT Time: %f\n", t2-t1);
+    } else if (strategy == 3) {
+        double t1 = wctime();
+        CALL(chaining, states);
+        double t2 = wctime();
+        INFO("CHAINING Time: %f\n", t2-t1);
+    } else {
+        Abort("Invalid strategy set?!\n");
     }
+
 #ifdef HAVE_PROFILER
     if (profile_filename != NULL) ProfilerStop();
 #endif
 
     // Now we just have states
-    printf("Final states: %zu states\n", (size_t)lddmc_satcount_cached(states->mdd));
-    printf("Final states: %zu MDD nodes\n", lddmc_nodecount(states->mdd));
+    INFO("Final states: %'0.0f states\n", lddmc_satcount_cached(states->dd));
+    if (report_nodes) {
+        INFO("Final states: %'zu MDD nodes\n", lddmc_nodecount(states->dd));
+    }
 
     if (out_filename != NULL) {
-        printf("Writing to %s.\n", out_filename);
+        INFO("Writing to %s.\n", out_filename);
 
         // Create LDD file
         FILE *f = fopen(out_filename, "w");
         lddmc_serialize_reset();
 
         // Write domain...
-        fwrite(&vector_size, sizeof(size_t), 1, f);
+        fwrite(&vector_size, sizeof(int), 1, f);
 
         // Write initial state...
         set_save(f, initial);
@@ -549,9 +845,8 @@ main(int argc, char **argv)
         fwrite(&next_count, sizeof(int), 1, f);
 
         // Write transitions
-        for (int i=0; i<next_count; i++) {
-            rel_save(f, next[i]);
-        }
+        for (int i=0; i<next_count; i++) rel_save_proj(f, next[i]);
+        for (int i=0; i<next_count; i++) rel_save(f, next[i]);
 
         // Write reachable states
         int has_reachable = 1;
@@ -562,6 +857,7 @@ main(int argc, char **argv)
         fclose(f);
     }
 
+    print_memory_usage();
     sylvan_stats_report(stdout);
 
     return 0;
diff --git a/resources/3rdparty/sylvan/examples/mc.c b/resources/3rdparty/sylvan/examples/mc.c
index 3db987bab..3578ecb37 100755
--- a/resources/3rdparty/sylvan/examples/mc.c
+++ b/resources/3rdparty/sylvan/examples/mc.c
@@ -10,15 +10,17 @@
 #include <gperftools/profiler.h>
 #endif
 
+#include <getrss.h>
+
 #include <sylvan.h>
-#include <sylvan_table.h>
+#include <sylvan_int.h>
 
-/* Configuration */
+/* Configuration (via argp) */
 static int report_levels = 0; // report states at end of every level
 static int report_table = 0; // report table size at end of every level
 static int report_nodes = 0; // report number of nodes of BDDs
-static int strategy = 1; // set to 1 = use PAR strategy; set to 0 = use BFS strategy
-static int check_deadlocks = 0; // set to 1 to check for deadlocks
+static int strategy = 2; // 0 = BFS, 1 = PAR, 2 = SAT, 3 = CHAINING
+static int check_deadlocks = 0; // set to 1 to check for deadlocks on-the-fly (only bfs/par)
 static int merge_relations = 0; // merge relations to 1 relation
 static int print_transition_matrix = 0; // print transition relation matrix
 static int workers = 0; // autodetect
@@ -31,7 +33,7 @@ static char* profile_filename = NULL; // filename for profiling
 static struct argp_option options[] =
 {
     {"workers", 'w', "<workers>", 0, "Number of workers (default=0: autodetect)", 0},
-    {"strategy", 's', "<bfs|par|sat>", 0, "Strategy for reachability (default=par)", 0},
+    {"strategy", 's', "<bfs|par|sat|chaining>", 0, "Strategy for reachability (default=sat)", 0},
 #ifdef HAVE_PROFILER
     {"profiler", 'p', "<filename>", 0, "Filename for profiling", 0},
 #endif
@@ -54,6 +56,7 @@ parse_opt(int key, char *arg, struct argp_state *state)
         if (strcmp(arg, "bfs")==0) strategy = 0;
         else if (strcmp(arg, "par")==0) strategy = 1;
         else if (strcmp(arg, "sat")==0) strategy = 2;
+        else if (strcmp(arg, "chaining")==0) strategy = 3;
         else argp_usage(state);
         break;
     case 4:
@@ -93,7 +96,9 @@ parse_opt(int key, char *arg, struct argp_state *state)
 }
 static struct argp argp = { options, parse_opt, "<model>", 0, 0, 0, 0 };
 
-/* Globals */
+/**
+ * Types (set and relation)
+ */
 typedef struct set
 {
     BDD bdd;
@@ -104,15 +109,19 @@ typedef struct relation
 {
     BDD bdd;
     BDD variables; // all variables in the relation (used by relprod)
+    int r_k, w_k, *r_proj, *w_proj;
 } *rel_t;
 
-static int vector_size; // size of vector
-static int statebits, actionbits; // number of bits for state, number of bits for action
-static int bits_per_integer; // number of bits per integer in the vector
+static int vectorsize; // size of vector in integers
+static int *statebits; // number of bits for each state integer
+static int actionbits; // number of bits for action label
+static int totalbits; // total number of bits
 static int next_count; // number of partitions of the transition relation
 static rel_t *next; // each partition of the transition relation
 
-/* Obtain current wallclock time */
+/**
+ * Obtain current wallclock time
+ */
 static double
 wctime()
 {
@@ -123,66 +132,171 @@ wctime()
 
 static double t_start;
 #define INFO(s, ...) fprintf(stdout, "[% 8.2f] " s, wctime()-t_start, ##__VA_ARGS__)
-#define Abort(...) { fprintf(stderr, __VA_ARGS__); exit(-1); }
+#define Abort(...) { fprintf(stderr, __VA_ARGS__); fprintf(stderr, "Abort at line %d!\n", __LINE__); exit(-1); }
 
-/* Load a set from file */
-#define set_load(f) CALL(set_load, f)
-TASK_1(set_t, set_load, FILE*, f)
+static char*
+to_h(double size, char *buf)
 {
-    sylvan_serialize_fromfile(f);
+    const char* units[] = {"B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"};
+    int i = 0;
+    for (;size>1024;size/=1024) i++;
+    sprintf(buf, "%.*f %s", i, size, units[i]);
+    return buf;
+}
 
-    size_t set_bdd, set_vector_size, set_state_vars;
-    if ((fread(&set_bdd, sizeof(size_t), 1, f) != 1) ||
-        (fread(&set_vector_size, sizeof(size_t), 1, f) != 1) ||
-        (fread(&set_state_vars, sizeof(size_t), 1, f) != 1)) {
-        Abort("Invalid input file!\n");
-    }
+static void
+print_memory_usage(void)
+{
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("Memory usage: %s\n", buf);
+}
 
+/**
+ * Load a set from file
+ * The expected binary format:
+ * - int k : projection size, or -1 for full state
+ * - int[k] proj : k integers specifying the variables of the projection
+ * - MTBDD[1] BDD (mtbdd binary format)
+ */
+#define set_load(f) CALL(set_load, f)
+TASK_1(set_t, set_load, FILE*, f)
+{
+    // allocate set
     set_t set = (set_t)malloc(sizeof(struct set));
-    set->bdd = sylvan_serialize_get_reversed(set_bdd);
-    set->variables = sylvan_support(sylvan_serialize_get_reversed(set_state_vars));
-
+    set->bdd = sylvan_false;
+    set->variables = sylvan_true;
     sylvan_protect(&set->bdd);
     sylvan_protect(&set->variables);
 
+    // read k
+    int k;
+    if (fread(&k, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+
+    if (k == -1) {
+        // create variables for a full state vector
+        uint32_t vars[totalbits];
+        for (int i=0; i<totalbits; i++) vars[i] = 2*i;
+        set->variables = sylvan_set_fromarray(vars, totalbits);
+    } else {
+        // read proj
+        int proj[k];
+        if (fread(proj, sizeof(int), k, f) != (size_t)k) Abort("Invalid input file!\n");
+        // create variables for a short/projected state vector
+        uint32_t vars[totalbits];
+        uint32_t cv = 0;
+        int j = 0, n = 0;
+        for (int i=0; i<vectorsize && j<k; i++) {
+            if (i == proj[j]) {
+                for (int x=0; x<statebits[i]; x++) vars[n++] = (cv += 2) - 2;
+                j++;
+            } else {
+                cv += 2 * statebits[i];
+            }
+        }
+        set->variables = sylvan_set_fromarray(vars, n);
+    }
+
+    // read bdd
+    if (mtbdd_reader_frombinary(f, &set->bdd, 1) != 0) Abort("Invalid input file!\n");
+
     return set;
 }
 
-/* Load a relation from file */
-#define rel_load(f) CALL(rel_load, f)
-TASK_1(rel_t, rel_load, FILE*, f)
+/**
+ * Load a relation from file
+ * This part just reads the r_k, w_k, r_proj and w_proj variables.
+ */
+#define rel_load_proj(f) CALL(rel_load_proj, f)
+TASK_1(rel_t, rel_load_proj, FILE*, f)
 {
-    sylvan_serialize_fromfile(f);
-
-    size_t rel_bdd, rel_vars;
-    if ((fread(&rel_bdd, sizeof(size_t), 1, f) != 1) ||
-        (fread(&rel_vars, sizeof(size_t), 1, f) != 1)) {
-        Abort("Invalid input file!\n");
-    }
-
     rel_t rel = (rel_t)malloc(sizeof(struct relation));
-    rel->bdd = sylvan_serialize_get_reversed(rel_bdd);
-    rel->variables = sylvan_support(sylvan_serialize_get_reversed(rel_vars));
-
+    int r_k, w_k;
+    if (fread(&r_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
+    if (fread(&w_k, sizeof(int), 1, f) != 1) Abort("Invalid file format.");
+    rel->r_k = r_k;
+    rel->w_k = w_k;
+    int *r_proj = (int*)malloc(sizeof(int[r_k]));
+    int *w_proj = (int*)malloc(sizeof(int[w_k]));
+    if (fread(r_proj, sizeof(int), r_k, f) != (size_t)r_k) Abort("Invalid file format.");
+    if (fread(w_proj, sizeof(int), w_k, f) != (size_t)w_k) Abort("Invalid file format.");
+    rel->r_proj = r_proj;
+    rel->w_proj = w_proj;
+
+    rel->bdd = sylvan_false;
     sylvan_protect(&rel->bdd);
+
+    /* Compute a_proj the union of r_proj and w_proj, and a_k the length of a_proj */
+    int a_proj[r_k+w_k];
+    int r_i = 0, w_i = 0, a_i = 0;
+    for (;r_i < r_k || w_i < w_k;) {
+        if (r_i < r_k && w_i < w_k) {
+            if (r_proj[r_i] < w_proj[w_i]) {
+                a_proj[a_i++] = r_proj[r_i++];
+            } else if (r_proj[r_i] > w_proj[w_i]) {
+                a_proj[a_i++] = w_proj[w_i++];
+            } else /* r_proj[r_i] == w_proj[w_i] */ {
+                a_proj[a_i++] = w_proj[w_i++];
+                r_i++;
+            }
+        } else if (r_i < r_k) {
+            a_proj[a_i++] = r_proj[r_i++];
+        } else if (w_i < w_k) {
+            a_proj[a_i++] = w_proj[w_i++];
+        }
+    }
+    const int a_k = a_i;
+
+    /* Compute all_variables, which are all variables the transition relation is defined on */
+    uint32_t all_vars[totalbits * 2];
+    uint32_t curvar = 0; // start with variable 0
+    int i=0, j=0, n=0;
+    for (; i<vectorsize && j<a_k; i++) {
+        if (i == a_proj[j]) {
+            for (int k=0; k<statebits[i]; k++) {
+                all_vars[n++] = curvar;
+                all_vars[n++] = curvar + 1;
+                curvar += 2;
+            }
+            j++;
+        } else {
+            curvar += 2 * statebits[i];
+        }
+    }
+    rel->variables = sylvan_set_fromarray(all_vars, n);
     sylvan_protect(&rel->variables);
 
     return rel;
 }
 
+/**
+ * Load a relation from file
+ * This part just reads the bdd of the relation
+ */
+#define rel_load(rel, f) CALL(rel_load, rel, f)
+VOID_TASK_2(rel_load, rel_t, rel, FILE*, f)
+{
+    if (mtbdd_reader_frombinary(f, &rel->bdd, 1) != 0) Abort("Invalid file format!\n");
+}
+
+/**
+ * Print a single example of a set to stdout
+ * Assumption: the example is a full vector and variables contains all state variables...
+ */
 #define print_example(example, variables) CALL(print_example, example, variables)
 VOID_TASK_2(print_example, BDD, example, BDDSET, variables)
 {
-    uint8_t str[vector_size * bits_per_integer];
+    uint8_t str[totalbits];
 
     if (example != sylvan_false) {
         sylvan_sat_one(example, variables, str);
+        int x=0;
         printf("[");
-        for (int i=0; i<vector_size; i++) {
+        for (int i=0; i<vectorsize; i++) {
             uint32_t res = 0;
-            for (int j=0; j<bits_per_integer; j++) {
-                if (str[bits_per_integer*i+j] == 1) res++;
-                res<<=1;
+            for (int j=0; j<statebits[i]; j++) {
+                if (str[x++] == 1) res++;
+                res <<= 1;
             }
             if (i>0) printf(",");
             printf("%" PRIu32, res);
@@ -191,7 +305,84 @@ VOID_TASK_2(print_example, BDD, example, BDDSET, variables)
     }
 }
 
-/* Straight-forward implementation of parallel reduction */
+/**
+ * Implementation of (parallel) saturation
+ * (assumes relations are ordered on first variable)
+ */
+TASK_2(BDD, go_sat, BDD, set, int, idx)
+{
+    /* Terminal cases */
+    if (set == sylvan_false) return sylvan_false;
+    if (idx == next_count) return set;
+
+    /* Consult the cache */
+    BDD result;
+    const BDD _set = set;
+    if (cache_get3(200LL<<40, _set, idx, 0, &result)) return result;
+    mtbdd_refs_pushptr(&_set);
+
+    /**
+     * Possible improvement: cache more things (like intermediate results?)
+     *   and chain-apply more of the current level before going deeper?
+     */
+
+    /* Check if the relation should be applied */
+    const uint32_t var = sylvan_var(next[idx]->variables);
+    if (set == sylvan_true || var <= sylvan_var(set)) {
+        /* Count the number of relations starting here */
+        int count = idx+1;
+        while (count < next_count && var == sylvan_var(next[count]->variables)) count++;
+        count -= idx;
+        /*
+         * Compute until fixpoint:
+         * - SAT deeper
+         * - chain-apply all current level once
+         */
+        BDD prev = sylvan_false;
+        BDD step = sylvan_false;
+        mtbdd_refs_pushptr(&set);
+        mtbdd_refs_pushptr(&prev);
+        mtbdd_refs_pushptr(&step);
+        while (prev != set) {
+            prev = set;
+            // SAT deeper
+            set = CALL(go_sat, set, idx+count);
+            // chain-apply all current level once
+            for (int i=0;i<count;i++) {
+                step = sylvan_relnext(set, next[idx+i]->bdd, next[idx+i]->variables);
+                set = sylvan_or(set, step);
+                step = sylvan_false; // unset, for gc
+            }
+        }
+        mtbdd_refs_popptr(3);
+        result = set;
+    } else {
+        /* Recursive computation */
+        mtbdd_refs_spawn(SPAWN(go_sat, sylvan_low(set), idx));
+        BDD high = mtbdd_refs_push(CALL(go_sat, sylvan_high(set), idx));
+        BDD low = mtbdd_refs_sync(SYNC(go_sat));
+        mtbdd_refs_pop(1);
+        result = sylvan_makenode(sylvan_var(set), low, high);
+    }
+
+    /* Store in cache */
+    cache_put3(200LL<<40, _set, idx, 0, result);
+    mtbdd_refs_popptr(1);
+    return result;
+}
+
+/**
+ * Wrapper for the Saturation strategy
+ */
+VOID_TASK_1(sat, set_t, set)
+{
+    set->bdd = CALL(go_sat, set->bdd, 0);
+}
+
+/**
+ * Implement parallel strategy (that performs the relnext operations in parallel)
+ * This function does one level...
+ */
 TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks)
 {
     if (len == 1) {
@@ -239,7 +430,9 @@ TASK_5(BDD, go_par, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, dea
     }
 }
 
-/* PAR strategy, parallel strategy (operations called in parallel *and* parallelized by Sylvan) */
+/**
+ * Implementation of the PAR strategy
+ */
 VOID_TASK_1(par, set_t, set)
 {
     BDD visited = set->bdd;
@@ -301,7 +494,10 @@ VOID_TASK_1(par, set_t, set)
     sylvan_unprotect(&deadlocks);
 }
 
-/* Sequential version of merge-reduction */
+/**
+ * Implement sequential strategy (that performs the relnext operations one by one)
+ * This function does one level...
+ */
 TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, deadlocks)
 {
     if (len == 1) {
@@ -350,7 +546,9 @@ TASK_5(BDD, go_bfs, BDD, cur, BDD, visited, size_t, from, size_t, len, BDD*, dea
     }
 }
 
-/* BFS strategy, sequential strategy (but operations are parallelized by Sylvan) */
+/**
+ * Implementation of the BFS strategy
+ */
 VOID_TASK_1(bfs, set_t, set)
 {
     BDD visited = set->bdd;
@@ -412,26 +610,77 @@ VOID_TASK_1(bfs, set_t, set)
     sylvan_unprotect(&deadlocks);
 }
 
+/**
+ * Implementation of the Chaining strategy (does not support deadlock detection)
+ */
+VOID_TASK_1(chaining, set_t, set)
+{
+    BDD visited = set->bdd;
+    BDD next_level = visited;
+    BDD succ = sylvan_false;
+
+    bdd_refs_pushptr(&visited);
+    bdd_refs_pushptr(&next_level);
+    bdd_refs_pushptr(&succ);
+
+    int iteration = 1;
+    do {
+        // calculate successors in parallel
+        for (int i=0; i<next_count; i++) {
+            succ = sylvan_relnext(next_level, next[i]->bdd, next[i]->variables);
+            next_level = sylvan_or(next_level, succ);
+            succ = sylvan_false; // reset, for gc
+        }
+
+        // new = new - visited
+        // visited = visited + new
+        next_level = sylvan_diff(next_level, visited);
+        visited = sylvan_or(visited, next_level);
+
+        if (report_table && report_levels) {
+            size_t filled, total;
+            sylvan_table_usage(&filled, &total);
+            INFO("Level %d done, %'0.0f states explored, table: %0.1f%% full (%'zu nodes)\n",
+                iteration, sylvan_satcount(visited, set->variables),
+                100.0*(double)filled/total, filled);
+        } else if (report_table) {
+            size_t filled, total;
+            sylvan_table_usage(&filled, &total);
+            INFO("Level %d done, table: %0.1f%% full (%'zu nodes)\n",
+                iteration,
+                100.0*(double)filled/total, filled);
+        } else if (report_levels) {
+            INFO("Level %d done, %'0.0f states explored\n", iteration, sylvan_satcount(visited, set->variables));
+        } else {
+            INFO("Level %d done\n", iteration);
+        }
+        iteration++;
+    } while (next_level != sylvan_false);
+
+    set->bdd = visited;
+    bdd_refs_popptr(3);
+}
+
 /**
  * Extend a transition relation to a larger domain (using s=s')
  */
 #define extend_relation(rel, vars) CALL(extend_relation, rel, vars)
-TASK_2(BDD, extend_relation, BDD, relation, BDDSET, variables)
+TASK_2(BDD, extend_relation, MTBDD, relation, MTBDD, variables)
 {
     /* first determine which state BDD variables are in rel */
-    int has[statebits];
-    for (int i=0; i<statebits; i++) has[i] = 0;
-    BDDSET s = variables;
+    int has[totalbits];
+    for (int i=0; i<totalbits; i++) has[i] = 0;
+    MTBDD s = variables;
     while (!sylvan_set_isempty(s)) {
-        BDDVAR v = sylvan_set_first(s);
-        if (v/2 >= (unsigned)statebits) break; // action labels
+        uint32_t v = sylvan_set_first(s);
+        if (v/2 >= (unsigned)totalbits) break; // action labels
         has[v/2] = 1;
         s = sylvan_set_next(s);
     }
 
     /* create "s=s'" for all variables not in rel */
     BDD eq = sylvan_true;
-    for (int i=statebits-1; i>=0; i--) {
+    for (int i=totalbits-1; i>=0; i--) {
         if (has[i]) continue;
         BDD low = sylvan_makenode(2*i+1, eq, sylvan_false);
         bdd_refs_push(low);
@@ -463,148 +712,209 @@ TASK_2(BDD, big_union, int, first, int, count)
     return result;
 }
 
+/**
+ * Print one row of the transition matrix (for vars)
+ */
 static void
-print_matrix(BDD vars)
+print_matrix_row(rel_t rel)
 {
-    for (int i=0; i<vector_size; i++) {
-        if (sylvan_set_isempty(vars)) {
-            fprintf(stdout, "-");
-        } else {
-            BDDVAR next_s = 2*((i+1)*bits_per_integer);
-            if (sylvan_set_first(vars) < next_s) {
-                fprintf(stdout, "+");
-                for (;;) {
-                    vars = sylvan_set_next(vars);
-                    if (sylvan_set_isempty(vars)) break;
-                    if (sylvan_set_first(vars) >= next_s) break;
-                }
-            } else {
-                fprintf(stdout, "-");
-            }
+    int r_i = 0, w_i = 0;
+    for (int i=0; i<vectorsize; i++) {
+        int s = 0;
+        if (r_i < rel->r_k && rel->r_proj[r_i] == i) {
+            s |= 1;
+            r_i++;
+        }
+        if (w_i < rel->w_k && rel->w_proj[w_i] == i) {
+            s |= 2;
+            w_i++;
         }
+        if (s == 0) fprintf(stdout, "-");
+        else if (s == 1) fprintf(stdout, "r");
+        else if (s == 2) fprintf(stdout, "w");
+        else if (s == 3) fprintf(stdout, "+");
     }
 }
 
 VOID_TASK_0(gc_start)
 {
-    INFO("(GC) Starting garbage collection...\n");
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("(GC) Starting garbage collection... (rss: %s)\n", buf);
 }
 
 VOID_TASK_0(gc_end)
 {
-    INFO("(GC) Garbage collection done.\n");
+    char buf[32];
+    to_h(getCurrentRSS(), buf);
+    INFO("(GC) Garbage collection done.       (rss: %s)\n", buf);
 }
 
 int
 main(int argc, char **argv)
 {
+    /**
+     * Parse command line, set locale, set startup time for INFO messages.
+     */
     argp_parse(&argp, argc, argv, 0, 0, 0);
     setlocale(LC_NUMERIC, "en_US.utf-8");
     t_start = wctime();
 
-    FILE *f = fopen(model_filename, "r");
-    if (f == NULL) {
-        fprintf(stderr, "Cannot open file '%s'!\n", model_filename);
-        return -1;
-    }
-
-    // Init Lace
-    lace_init(workers, 1000000); // auto-detect number of workers, use a 1,000,000 size task queue
-    lace_startup(0, NULL, NULL); // auto-detect program stack, do not use a callback for startup
-
+    /**
+     * Initialize Lace.
+     *
+     * First: setup with given number of workers (0 for autodetect) and some large size task queue.
+     * Second: start all worker threads with default settings.
+     * Third: setup local variables using the LACE_ME macro.
+     */
+    lace_init(workers, 1000000);
+    lace_startup(0, NULL, NULL);
     LACE_ME;
 
-    // Init Sylvan
-    // Nodes table size: 24 bytes * 2**N_nodes
-    // Cache table size: 36 bytes * 2**N_cache
-    // With: N_nodes=25, N_cache=24: 1.3 GB memory
-    sylvan_set_sizes(1LL<<21, 1LL<<27, 1LL<<20, 1LL<<26);
+    /**
+     * Initialize Sylvan.
+     *
+     * First: set memory limits
+     * - 2 GB memory, nodes table twice as big as cache, initial size halved 6x
+     *   (that means it takes 6 garbage collections to get to the maximum nodes&cache size)
+     * Second: initialize package and subpackages
+     * Third: add hooks to report garbage collection
+     */
+    sylvan_set_limits(2LL<<30, 1, 6);
     sylvan_init_package();
-    sylvan_set_granularity(6); // granularity 6 is decent default value - 1 means "use cache for every operation"
     sylvan_init_bdd();
     sylvan_gc_hook_pregc(TASK(gc_start));
     sylvan_gc_hook_postgc(TASK(gc_end));
 
-    /* Load domain information */
-    if ((fread(&vector_size, sizeof(int), 1, f) != 1) ||
-        (fread(&statebits, sizeof(int), 1, f) != 1) ||
-        (fread(&actionbits, sizeof(int), 1, f) != 1)) {
-        Abort("Invalid input file!\n");
-    }
+    /**
+     * Read the model from file
+     */
 
-    bits_per_integer = statebits;
-    statebits *= vector_size;
+    /* Open the file */
+    FILE *f = fopen(model_filename, "r");
+    if (f == NULL) Abort("Cannot open file '%s'!\n", model_filename);
 
-    // Read initial state
+    /* Read domain data */
+    if (fread(&vectorsize, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+    statebits = (int*)malloc(sizeof(int[vectorsize]));
+    if (fread(statebits, sizeof(int), vectorsize, f) != (size_t)vectorsize) Abort("Invalid input file!\n");
+    if (fread(&actionbits, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
+    totalbits = 0;
+    for (int i=0; i<vectorsize; i++) totalbits += statebits[i];
+
+    /* Read initial state */
     set_t states = set_load(f);
 
-    // Read transitions
+    /* Read number of transition relations */
     if (fread(&next_count, sizeof(int), 1, f) != 1) Abort("Invalid input file!\n");
     next = (rel_t*)malloc(sizeof(rel_t) * next_count);
 
-    int i;
-    for (i=0; i<next_count; i++) {
-        next[i] = rel_load(f);
-    }
+    /* Read transition relations */
+    for (int i=0; i<next_count; i++) next[i] = rel_load_proj(f);
+    for (int i=0; i<next_count; i++) rel_load(next[i], f);
 
-    /* Done */
+    /* We ignore the reachable states and action labels that are stored after the relations */
+
+    /* Close the file */
     fclose(f);
 
-    if (print_transition_matrix) {
-        for (i=0; i<next_count; i++) {
-            INFO("");
-            print_matrix(next[i]->variables);
-            fprintf(stdout, "\n");
+    /**
+     * Pre-processing and some statistics reporting
+     */
+
+    if (strategy == 2 || strategy == 3) {
+        // for SAT and CHAINING, sort the transition relations (gnome sort because I like gnomes)
+        int i = 1, j = 2;
+        rel_t t;
+        while (i < next_count) {
+            rel_t *p = &next[i], *q = p-1;
+            if (sylvan_var((*q)->variables) > sylvan_var((*p)->variables)) {
+                t = *q;
+                *q = *p;
+                *p = t;
+                if (--i) continue;
+            }
+            i = j++;
         }
     }
 
-    // Report statistics
     INFO("Read file '%s'\n", model_filename);
-    INFO("%d integers per state, %d bits per integer, %d transition groups\n", vector_size, bits_per_integer, next_count);
+    INFO("%d integers per state, %d bits per state, %d transition groups\n", vectorsize, totalbits, next_count);
 
-    if (merge_relations) {
-        BDD prime_variables = sylvan_set_empty();
-        for (int i=statebits-1; i>=0; i--) {
-            bdd_refs_push(prime_variables);
-            prime_variables = sylvan_set_add(prime_variables, i*2+1);
-            bdd_refs_pop(1);
+    /* if requested, print the transition matrix */
+    if (print_transition_matrix) {
+        for (int i=0; i<next_count; i++) {
+            INFO(""); // print time prefix
+            print_matrix_row(next[i]); // print row
+            fprintf(stdout, "\n"); // print newline
         }
+    }
 
-        bdd_refs_push(prime_variables);
+    /* merge all relations to one big transition relation if requested */
+    if (merge_relations) {
+        BDD newvars = sylvan_set_empty();
+        bdd_refs_pushptr(&newvars);
+        for (int i=totalbits-1; i>=0; i--) {
+            newvars = sylvan_set_add(newvars, i*2+1);
+            newvars = sylvan_set_add(newvars, i*2);
+        }
 
         INFO("Extending transition relations to full domain.\n");
         for (int i=0; i<next_count; i++) {
             next[i]->bdd = extend_relation(next[i]->bdd, next[i]->variables);
-            next[i]->variables = prime_variables;
+            next[i]->variables = newvars;
         }
 
+        bdd_refs_popptr(1);
+
         INFO("Taking union of all transition relations.\n");
         next[0]->bdd = big_union(0, next_count);
+
+        for (int i=1; i<next_count; i++) {
+            next[i]->bdd = sylvan_false;
+            next[i]->variables = sylvan_true;
+        }
         next_count = 1;
     }
 
     if (report_nodes) {
         INFO("BDD nodes:\n");
         INFO("Initial states: %zu BDD nodes\n", sylvan_nodecount(states->bdd));
-        for (i=0; i<next_count; i++) {
+        for (int i=0; i<next_count; i++) {
             INFO("Transition %d: %zu BDD nodes\n", i, sylvan_nodecount(next[i]->bdd));
         }
     }
 
+    print_memory_usage();
+
 #ifdef HAVE_PROFILER
     if (profile_filename != NULL) ProfilerStart(profile_filename);
 #endif
-    if (strategy == 1) {
+
+    if (strategy == 0) {
+        double t1 = wctime();
+        CALL(bfs, states);
+        double t2 = wctime();
+        INFO("BFS Time: %f\n", t2-t1);
+    } else if (strategy == 1) {
         double t1 = wctime();
         CALL(par, states);
         double t2 = wctime();
         INFO("PAR Time: %f\n", t2-t1);
-    } else {
+    } else if (strategy == 2) {
         double t1 = wctime();
-        CALL(bfs, states);
+        CALL(sat, states);
         double t2 = wctime();
-        INFO("BFS Time: %f\n", t2-t1);
+        INFO("SAT Time: %f\n", t2-t1);
+    } else if (strategy == 3) {
+        double t1 = wctime();
+        CALL(chaining, states);
+        double t2 = wctime();
+        INFO("CHAINING Time: %f\n", t2-t1);
+    } else {
+        Abort("Invalid strategy set?!\n");
     }
+
 #ifdef HAVE_PROFILER
     if (profile_filename != NULL) ProfilerStop();
 #endif
@@ -615,6 +925,8 @@ main(int argc, char **argv)
         INFO("Final states: %'zu BDD nodes\n", sylvan_nodecount(states->bdd));
     }
 
+    print_memory_usage();
+
     sylvan_stats_report(stdout);
 
     return 0;
diff --git a/resources/3rdparty/sylvan/models/anderson.4.bdd b/resources/3rdparty/sylvan/models/anderson.4.bdd
new file mode 100644
index 000000000..317d7200d
Binary files /dev/null and b/resources/3rdparty/sylvan/models/anderson.4.bdd differ
diff --git a/resources/3rdparty/sylvan/models/anderson.4.ldd b/resources/3rdparty/sylvan/models/anderson.4.ldd
new file mode 100644
index 000000000..42d5ad9c2
Binary files /dev/null and b/resources/3rdparty/sylvan/models/anderson.4.ldd differ
diff --git a/resources/3rdparty/sylvan/models/anderson.6.ldd b/resources/3rdparty/sylvan/models/anderson.6.ldd
new file mode 100644
index 000000000..985e88aae
Binary files /dev/null and b/resources/3rdparty/sylvan/models/anderson.6.ldd differ
diff --git a/resources/3rdparty/sylvan/models/anderson.8.ldd b/resources/3rdparty/sylvan/models/anderson.8.ldd
new file mode 100644
index 000000000..8a96e89b3
Binary files /dev/null and b/resources/3rdparty/sylvan/models/anderson.8.ldd differ
diff --git a/resources/3rdparty/sylvan/models/at.5.8-rgs.bdd b/resources/3rdparty/sylvan/models/at.5.8-rgs.bdd
deleted file mode 100755
index 8a0c19500..000000000
Binary files a/resources/3rdparty/sylvan/models/at.5.8-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/at.6.8-rgs.bdd b/resources/3rdparty/sylvan/models/at.6.8-rgs.bdd
deleted file mode 100755
index 71ef84a77..000000000
Binary files a/resources/3rdparty/sylvan/models/at.6.8-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/at.7.8-rgs.bdd b/resources/3rdparty/sylvan/models/at.7.8-rgs.bdd
deleted file mode 100755
index c8c29628e..000000000
Binary files a/resources/3rdparty/sylvan/models/at.7.8-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/bakery.4.bdd b/resources/3rdparty/sylvan/models/bakery.4.bdd
new file mode 100644
index 000000000..f5eb0937e
Binary files /dev/null and b/resources/3rdparty/sylvan/models/bakery.4.bdd differ
diff --git a/resources/3rdparty/sylvan/models/bakery.4.ldd b/resources/3rdparty/sylvan/models/bakery.4.ldd
new file mode 100644
index 000000000..98f797c42
Binary files /dev/null and b/resources/3rdparty/sylvan/models/bakery.4.ldd differ
diff --git a/resources/3rdparty/sylvan/models/bakery.5.ldd b/resources/3rdparty/sylvan/models/bakery.5.ldd
new file mode 100644
index 000000000..242e73556
Binary files /dev/null and b/resources/3rdparty/sylvan/models/bakery.5.ldd differ
diff --git a/resources/3rdparty/sylvan/models/bakery.6.ldd b/resources/3rdparty/sylvan/models/bakery.6.ldd
new file mode 100644
index 000000000..73e8ffe90
Binary files /dev/null and b/resources/3rdparty/sylvan/models/bakery.6.ldd differ
diff --git a/resources/3rdparty/sylvan/models/bakery.7.ldd b/resources/3rdparty/sylvan/models/bakery.7.ldd
new file mode 100644
index 000000000..b3e55346f
Binary files /dev/null and b/resources/3rdparty/sylvan/models/bakery.7.ldd differ
diff --git a/resources/3rdparty/sylvan/models/blocks.2.ldd b/resources/3rdparty/sylvan/models/blocks.2.ldd
index 1379ed1a3..0048b17f6 100755
Binary files a/resources/3rdparty/sylvan/models/blocks.2.ldd and b/resources/3rdparty/sylvan/models/blocks.2.ldd differ
diff --git a/resources/3rdparty/sylvan/models/blocks.3.ldd b/resources/3rdparty/sylvan/models/blocks.3.ldd
new file mode 100644
index 000000000..1bf94bcc2
Binary files /dev/null and b/resources/3rdparty/sylvan/models/blocks.3.ldd differ
diff --git a/resources/3rdparty/sylvan/models/blocks.4.ldd b/resources/3rdparty/sylvan/models/blocks.4.ldd
index 7270052c4..2a62be037 100755
Binary files a/resources/3rdparty/sylvan/models/blocks.4.ldd and b/resources/3rdparty/sylvan/models/blocks.4.ldd differ
diff --git a/resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd b/resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd
deleted file mode 100755
index 6db7602d1..000000000
Binary files a/resources/3rdparty/sylvan/models/collision.4.9-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/collision.4.bdd b/resources/3rdparty/sylvan/models/collision.4.bdd
new file mode 100644
index 000000000..98b89317d
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.4.bdd differ
diff --git a/resources/3rdparty/sylvan/models/collision.4.ldd b/resources/3rdparty/sylvan/models/collision.4.ldd
new file mode 100644
index 000000000..13ff031ad
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.4.ldd differ
diff --git a/resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd b/resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd
deleted file mode 100755
index 7c32c293e..000000000
Binary files a/resources/3rdparty/sylvan/models/collision.5.9-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/collision.5.bdd b/resources/3rdparty/sylvan/models/collision.5.bdd
new file mode 100644
index 000000000..02203d477
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.5.bdd differ
diff --git a/resources/3rdparty/sylvan/models/collision.5.ldd b/resources/3rdparty/sylvan/models/collision.5.ldd
new file mode 100644
index 000000000..e72ae4a9a
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.5.ldd differ
diff --git a/resources/3rdparty/sylvan/models/collision.6.bdd b/resources/3rdparty/sylvan/models/collision.6.bdd
new file mode 100644
index 000000000..a781db689
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.6.bdd differ
diff --git a/resources/3rdparty/sylvan/models/collision.6.ldd b/resources/3rdparty/sylvan/models/collision.6.ldd
new file mode 100644
index 000000000..e7476b8a9
Binary files /dev/null and b/resources/3rdparty/sylvan/models/collision.6.ldd differ
diff --git a/resources/3rdparty/sylvan/models/lifts.6.bdd b/resources/3rdparty/sylvan/models/lifts.6.bdd
new file mode 100644
index 000000000..a8bbabe1c
Binary files /dev/null and b/resources/3rdparty/sylvan/models/lifts.6.bdd differ
diff --git a/resources/3rdparty/sylvan/models/lifts.6.ldd b/resources/3rdparty/sylvan/models/lifts.6.ldd
new file mode 100644
index 000000000..1e86ed632
Binary files /dev/null and b/resources/3rdparty/sylvan/models/lifts.6.ldd differ
diff --git a/resources/3rdparty/sylvan/models/lifts.7.bdd b/resources/3rdparty/sylvan/models/lifts.7.bdd
new file mode 100644
index 000000000..6eb3d0fd6
Binary files /dev/null and b/resources/3rdparty/sylvan/models/lifts.7.bdd differ
diff --git a/resources/3rdparty/sylvan/models/lifts.7.ldd b/resources/3rdparty/sylvan/models/lifts.7.ldd
new file mode 100644
index 000000000..0d357553f
Binary files /dev/null and b/resources/3rdparty/sylvan/models/lifts.7.ldd differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd b/resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd
deleted file mode 100755
index 7c5354aef..000000000
Binary files a/resources/3rdparty/sylvan/models/schedule_world.2.8-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.2.bdd b/resources/3rdparty/sylvan/models/schedule_world.2.bdd
new file mode 100644
index 000000000..ad6165978
Binary files /dev/null and b/resources/3rdparty/sylvan/models/schedule_world.2.bdd differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.2.ldd b/resources/3rdparty/sylvan/models/schedule_world.2.ldd
new file mode 100644
index 000000000..9bd77e0e9
Binary files /dev/null and b/resources/3rdparty/sylvan/models/schedule_world.2.ldd differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd b/resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd
deleted file mode 100755
index a4e3e444d..000000000
Binary files a/resources/3rdparty/sylvan/models/schedule_world.3.8-rgs.bdd and /dev/null differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.3.bdd b/resources/3rdparty/sylvan/models/schedule_world.3.bdd
new file mode 100644
index 000000000..3a30253ac
Binary files /dev/null and b/resources/3rdparty/sylvan/models/schedule_world.3.bdd differ
diff --git a/resources/3rdparty/sylvan/models/schedule_world.3.ldd b/resources/3rdparty/sylvan/models/schedule_world.3.ldd
new file mode 100644
index 000000000..808c2cf94
Binary files /dev/null and b/resources/3rdparty/sylvan/models/schedule_world.3.ldd differ
diff --git a/resources/3rdparty/sylvan/src/CMakeLists.txt b/resources/3rdparty/sylvan/src/CMakeLists.txt
old mode 100755
new mode 100644
index 0c516c483..51fea96d0
--- a/resources/3rdparty/sylvan/src/CMakeLists.txt
+++ b/resources/3rdparty/sylvan/src/CMakeLists.txt
@@ -37,6 +37,8 @@ set(HEADERS
     sylvan_table.h
     sylvan_tls.h
     storm_wrapper.h
+    sylvan_bdd_storm.h
+    sylvan_mtbdd_storm.h
     sylvan_storm_rational_function.h
     sylvan_storm_rational_number.h
 )
@@ -47,10 +49,9 @@ option(BUILD_STATIC_LIBS "Enable/disable creation of static libraries" ON)
 add_library(sylvan ${SOURCES})
 
 find_package(GMP REQUIRED)
-find_package(Hwloc REQUIRED)
 
-include_directories(sylvan ${HWLOC_INCLUDE_DIR} ${GMP_INCLUDE_DIR})
-target_link_libraries(sylvan m pthread ${GMP_LIBRARIES} ${HWLOC_LIBRARIES})
+include_directories(sylvan ${GMP_INCLUDE_DIR})
+target_link_libraries(sylvan m pthread ${GMP_LIBRARIES})
 
 if(UNIX AND NOT APPLE)
     target_link_libraries(sylvan rt)
@@ -60,12 +61,10 @@ option(SYLVAN_STATS "Collect statistics" OFF)
 if(SYLVAN_STATS)
     set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "SYLVAN_STATS")
 endif()
-set_target_properties(sylvan PROPERTIES COMPILE_DEFINITIONS "STORM_SILENCE_WARNINGS")
 
 install(TARGETS sylvan DESTINATION "${CMAKE_INSTALL_LIBDIR}")
 install(FILES ${HEADERS} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}")
 
-
 # MODIFICATIONS NEEDED MADE FOR STORM
 
 # We need to make sure that the binary is put into a folder that is independent of the
diff --git a/resources/3rdparty/sylvan/src/avl.h b/resources/3rdparty/sylvan/src/avl.h
index 38277523d..858859d6b 100755
--- a/resources/3rdparty/sylvan/src/avl.h
+++ b/resources/3rdparty/sylvan/src/avl.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
diff --git a/resources/3rdparty/sylvan/src/lace.c b/resources/3rdparty/sylvan/src/lace.c
index abdbbca39..08e9f8843 100755
--- a/resources/3rdparty/sylvan/src/lace.c
+++ b/resources/3rdparty/sylvan/src/lace.c
@@ -15,6 +15,7 @@
  * limitations under the License.
  */
 
+#define _GNU_SOURCE
 #include <errno.h> // for errno
 #include <sched.h> // for sched_getaffinity
 #include <stdio.h>  // for fprintf
@@ -26,28 +27,46 @@
 #include <unistd.h>
 #include <assert.h>
 
-// work around for missing MAP_ANONYMOUS definition in sys/mman.h on
-// older OS X versions
-#if !(defined MAP_ANONYMOUS) && defined MAP_ANON
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
 #include <lace.h>
-#include <hwloc.h>
 
-// public Worker data
-static Worker **workers = NULL;
-static size_t default_stacksize = 0; // set by lace_init
-static size_t default_dqsize = 100000;
+#if LACE_USE_HWLOC
+#include <hwloc.h>
 
+/**
+ * HWLOC information
+ */
 static hwloc_topology_t topo;
 static unsigned int n_nodes, n_cores, n_pus;
+#endif
+
+/**
+ * (public) Worker data
+ */
+static Worker **workers = NULL;
+
+/**
+ * Default sizes for program stack and task deque
+ */
+static size_t default_stacksize = 0; // 0 means "set by lace_init"
+static size_t default_dqsize = 100000;
 
+/**
+ * Verbosity flag, set with lace_set_verbosity
+ */
 static int verbosity = 0;
 
-static int n_workers = 0;
-static int enabled_workers = 0;
+/**
+ * Number of workers and number of enabled/active workers
+ */
+static unsigned int n_workers = 0;
+static unsigned int enabled_workers = 0;
 
+/**
+ * Datastructure of the task deque etc for each worker.
+ * - first public cachelines (accessible via global "workers" variable)
+ * - then private cachelines
+ * - then the deque array
+ */
 typedef struct {
     Worker worker_public;
     char pad1[PAD(sizeof(Worker), LINE_SIZE)];
@@ -56,26 +75,51 @@ typedef struct {
     Task deque[];
 } worker_data;
 
+/**
+ * (Secret) holds pointers to the memory block allocated for each worker
+ */
 static worker_data **workers_memory = NULL;
+
+/**
+ * Number of bytes allocated for each worker's worker data.
+ */
 static size_t workers_memory_size = 0;
 
-// private Worker data (just for stats at end )
+/**
+ * (Secret) holds pointer to private Worker data, just for stats collection at end
+ */
 static WorkerP **workers_p;
 
-// set to 0 when quitting
+/**
+ * Flag to signal all workers to quit.
+ */
 static int lace_quits = 0;
 
-// for storing private Worker data
+/**
+ * Thread-specific mechanism to access current worker data
+ */
 #ifdef __linux__ // use gcc thread-local storage (i.e. __thread variables)
 static __thread WorkerP *current_worker;
 #else
 static pthread_key_t worker_key;
 #endif
+
+/**
+ * worker_attr used for creating threads
+ * - initialized by lace_init
+ * - used by lace_spawn_worker
+ */
 static pthread_attr_t worker_attr;
 
+/**
+ * The condition/mutex pair for when the root thread sleeps until the end of the program
+ */
 static pthread_cond_t wait_until_done = PTHREAD_COND_INITIALIZER;
 static pthread_mutex_t wait_until_done_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+/**
+ * Data structure that contains the stack and stack size for each worker.
+ */
 struct lace_worker_init
 {
     void* stack;
@@ -84,8 +128,14 @@ struct lace_worker_init
 
 static struct lace_worker_init *workers_init;
 
+/**
+ * Global newframe variable used for the implementation of NEWFRAME and TOGETHER
+ */
 lace_newframe_t lace_newframe;
 
+/**
+ * Get the private Worker data of the current thread
+ */
 WorkerP*
 lace_get_worker()
 {
@@ -96,14 +146,20 @@ lace_get_worker()
 #endif
 }
 
+/**
+ * Find the head of the task deque, using the given private Worker data
+ */
 Task*
 lace_get_head(WorkerP *self)
 {
     Task *dq = self->dq;
+
+    /* First check the first tasks linearly */
     if (dq[0].thief == 0) return dq;
     if (dq[1].thief == 0) return dq+1;
     if (dq[2].thief == 0) return dq+2;
 
+    /* Then fast search for a low/high bound using powers of 2: 4, 8, 16... */
     size_t low = 2;
     size_t high = self->end - self->dq;
 
@@ -118,6 +174,7 @@ lace_get_head(WorkerP *self)
         }
     }
 
+    /* Finally zoom in using binary search */
     while (low < high) {
         size_t mid = low + (high-low)/2;
         if (dq[mid].thief == 0) high = mid;
@@ -127,22 +184,27 @@ lace_get_head(WorkerP *self)
     return dq+low;
 }
 
-size_t
+/**
+ * Get the number of workers
+ */
+unsigned int
 lace_workers()
 {
     return n_workers;
 }
 
+/**
+ * Get the default stack size (or 0 for automatically determine)
+ */
 size_t
 lace_default_stacksize()
 {
     return default_stacksize;
 }
 
-#ifndef cas
-#define cas(ptr, old, new) (__sync_bool_compare_and_swap((ptr),(old),(new)))
-#endif
-
+/**
+ * If we are collecting PIE times, then we need some helper functions.
+ */
 #if LACE_PIE_TIMES
 static uint64_t count_at_start, count_at_end;
 static long long unsigned us_elapsed_timer;
@@ -169,7 +231,9 @@ us_elapsed(void)
 }
 #endif
 
-/* Barrier */
+/**
+ * Lace barrier implementation, that synchronizes on all currently enabled workers.
+ */
 typedef struct {
     volatile int __attribute__((aligned(LINE_SIZE))) count;
     volatile int __attribute__((aligned(LINE_SIZE))) leaving;
@@ -178,11 +242,14 @@ typedef struct {
 
 barrier_t lace_bar;
 
+/**
+ * Enter the Lace barrier and wait until all workers have entered the Lace barrier.
+ */
 void
 lace_barrier()
 {
     int wait = lace_bar.wait;
-    if (enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) {
+    if ((int)enabled_workers == __sync_add_and_fetch(&lace_bar.count, 1)) {
         lace_bar.count = 0;
         lace_bar.leaving = enabled_workers;
         lace_bar.wait = 1 - wait; // flip wait
@@ -193,12 +260,18 @@ lace_barrier()
     __sync_add_and_fetch(&lace_bar.leaving, -1);
 }
 
+/**
+ * Initialize the Lace barrier
+ */
 static void
 lace_barrier_init()
 {
     memset(&lace_bar, 0, sizeof(barrier_t));
 }
 
+/**
+ * Destroy the Lace barrier (just wait until all are exited)
+ */
 static void
 lace_barrier_destroy()
 {
@@ -206,9 +279,13 @@ lace_barrier_destroy()
     while (lace_bar.leaving != 0) continue;
 }
 
-static void
+/**
+ * For debugging purposes, check if memory is allocated on the correct memory nodes.
+ */
+static void __attribute__((unused))
 lace_check_memory(void)
 {
+#if LACE_USE_HWLOC
     // get our current worker
     WorkerP *w = lace_get_worker();
     void* mem = workers_memory[w->worker];
@@ -229,14 +306,10 @@ lace_check_memory(void)
     hwloc_membind_policy_t policy;
     int res = hwloc_get_area_membind_nodeset(topo, mem, sizeof(worker_data), memlocation, &policy, HWLOC_MEMBIND_STRICT);
     if (res == -1) {
-#ifndef STORM_SILENCE_WARNINGS
         fprintf(stderr, "Lace warning: hwloc_get_area_membind_nodeset returned -1!\n");
-#endif
     }
     if (policy != HWLOC_MEMBIND_BIND) {
-#ifndef STORM_SILENCE_WARNINGS
         fprintf(stderr, "Lace warning: Lace worker memory not bound with BIND policy!\n");
-#endif
     }
 #endif
 
@@ -258,22 +331,27 @@ lace_check_memory(void)
     hwloc_bitmap_free(cpuset);
     hwloc_bitmap_free(cpunodes);
     hwloc_bitmap_free(memlocation);
+#endif
 }
 
-WorkerP *
-lace_init_worker(int worker)
+void
+lace_pin_worker(void)
 {
-    // Get our core
+#if LACE_USE_HWLOC
+    // Get our worker
+    unsigned int worker = lace_get_worker()->worker;
+
+    // Get our core (hwloc object)
     hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, worker % n_cores);
 
     // Get our copy of the bitmap
     hwloc_cpuset_t bmp = hwloc_bitmap_dup(pu->cpuset);
 
-    // Get number of PUs in set
+    // Get number of PUs in bitmap
     int n = -1, count=0;
     while ((n=hwloc_bitmap_next(bmp, n)) != -1) count++;
 
-    // Check if we actually have logical processors
+    // Check if we actually have any logical processors
     if (count == 0) {
         fprintf(stderr, "Lace error: trying to pin a worker on an empty core?\n");
         exit(-1);
@@ -293,18 +371,46 @@ lace_init_worker(int worker)
 
     // Pin our thread...
     if (hwloc_set_cpubind(topo, bmp, HWLOC_CPUBIND_THREAD) == -1) {
-#ifndef STORM_SILENCE_WARNINGS
         fprintf(stderr, "Lace warning: hwloc_set_cpubind returned -1!\n");
-#endif
     }
 
-    // Free allocated memory
+    // Free our copy of the bitmap
     hwloc_bitmap_free(bmp);
 
-    // Get allocated memory
-    Worker *wt = &workers_memory[worker]->worker_public;
-    WorkerP *w = &workers_memory[worker]->worker_private;
+    // Pin the memory area (using the appropriate hwloc function)
+#ifdef HWLOC_MEMBIND_BYNODESET
+    int res = hwloc_set_area_membind(topo, workers_memory[worker], workers_memory_size, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE | HWLOC_MEMBIND_BYNODESET);
+#else
+    int res = hwloc_set_area_membind_nodeset(topo, workers_memory[worker], workers_memory_size, pu->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE);
+#endif
+    if (res != 0) {
+        fprintf(stderr, "Lace error: Unable to bind worker memory to node!\n");
+    }
+
+    // Check if everything is on the correct node
+    lace_check_memory();
+#endif
+}
+
+void
+lace_init_worker(unsigned int worker)
+{
+    // Allocate our memory
+    workers_memory[worker] = mmap(NULL, workers_memory_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+    if (workers_memory[worker] == MAP_FAILED) {
+        fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
+        exit(1);
+    }
+
+    // Set pointers
+    Worker *wt = workers[worker] = &workers_memory[worker]->worker_public;
+    WorkerP *w = workers_p[worker] = &workers_memory[worker]->worker_private;
     w->dq = workers_memory[worker]->deque;
+#ifdef __linux__
+    current_worker = w;
+#else
+    pthread_setspecific(worker_key, w);
+#endif
 
     // Initialize public worker data
     wt->dq = w->dq;
@@ -318,7 +424,11 @@ lace_init_worker(int worker)
     w->split = w->dq;
     w->allstolen = 0;
     w->worker = worker;
+#if LACE_USE_HWLOC
     w->pu = worker % n_cores;
+#else
+    w->pu = -1;
+#endif
     w->enabled = 1;
     if (workers_init[worker].stack != 0) {
         w->stack_trigger = ((size_t)workers_init[worker].stack) + workers_init[worker].stacksize/20;
@@ -328,20 +438,10 @@ lace_init_worker(int worker)
     w->rng = (((uint64_t)rand())<<32 | rand());
 
 #if LACE_COUNT_EVENTS
-    // Reset counters
+    // Initialize counters
     { int k; for (k=0; k<CTR_MAX; k++) w->ctr[k] = 0; }
 #endif
 
-    // Set pointers
-#ifdef __linux__
-    current_worker = w;
-#else
-    pthread_setspecific(worker_key, w);
-#endif
-
-    // Check if everything is on the correct node
-    lace_check_memory();
-
     // Synchronize with others
     lace_barrier();
 
@@ -350,9 +450,14 @@ lace_init_worker(int worker)
     w->level = 0;
 #endif
 
-    return w;
+    if (worker == 0) {
+        lace_time_event(w, 1);
+    }
 }
 
+/**
+ * Some OSX systems do not implement pthread_barrier_t, so we provide an implementation here.
+ */
 #if defined(__APPLE__) && !defined(pthread_barrier_t)
 
 typedef int pthread_barrierattr_t;
@@ -442,13 +547,13 @@ lace_resume()
 }
 
 /**
- * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
- * You can never disable the current worker or reduce the number of workers below 1.
+ * Disable worker <worker>.
+ * If the given worker is the current worker, this function does nothing.
  */
 void
-lace_disable_worker(int worker)
+lace_disable_worker(unsigned int worker)
 {
-    int self = lace_get_worker()->worker;
+    unsigned int self = lace_get_worker()->worker;
     if (worker == self) return;
     if (workers_p[worker]->enabled == 1) {
         workers_p[worker]->enabled = 0;
@@ -456,10 +561,14 @@ lace_disable_worker(int worker)
     }
 }
 
+/**
+ * Enable worker <worker>.
+ * If the given worker is the current worker, this function does nothing.
+ */
 void
-lace_enable_worker(int worker)
+lace_enable_worker(unsigned int worker)
 {
-    int self = lace_get_worker()->worker;
+    unsigned int self = lace_get_worker()->worker;
     if (worker == self) return;
     if (workers_p[worker]->enabled == 0) {
         workers_p[worker]->enabled = 1;
@@ -467,26 +576,38 @@ lace_enable_worker(int worker)
     }
 }
 
+/**
+ * Enables all workers 0..(N-1) and disables workers N..max.
+ * This function _should_ be called by worker 0.
+ * Ignores the current worker if >= N.
+ * The number of workers is never reduces below 1.
+ */
 void
-lace_set_workers(int workercount)
+lace_set_workers(unsigned int workercount)
 {
     if (workercount < 1) workercount = 1;
     if (workercount > n_workers) workercount = n_workers;
     enabled_workers = workercount;
-    int self = lace_get_worker()->worker;
+    unsigned int self = lace_get_worker()->worker;
     if (self >= workercount) workercount--;
-    int i;
-    for (i=0; i<n_workers; i++) {
+    for (unsigned int i=0; i<n_workers; i++) {
         workers_p[i]->enabled = (i < workercount || i == self) ? 1 : 0;
     }
 }
 
-int
+/**
+ * Get the number of currently enabled workers.
+ */
+unsigned int
 lace_enabled_workers()
 {
     return enabled_workers;
 }
 
+/**
+ * Simple random number generated (like rand) using the given seed.
+ * (Used for thread-specific (scalable) random number generation.
+ */
 static inline uint32_t
 rng(uint32_t *seed, int max)
 {
@@ -500,6 +621,9 @@ rng(uint32_t *seed, int max)
     return next % max;
 }
 
+/**
+ * (Try to) steal and execute a task from a random worker.
+ */
 VOID_TASK_0(lace_steal_random)
 {
     Worker *victim = workers[(__lace_worker->worker + 1 + rng(&__lace_worker->seed, n_workers-1)) % n_workers];
@@ -515,26 +639,19 @@ VOID_TASK_0(lace_steal_random)
     }
 }
 
-VOID_TASK_1(lace_steal_random_loop, int*, quit)
-{
-    while(!(*(volatile int*)quit)) {
-        lace_steal_random();
-
-        if (must_suspend) {
-            lace_barrier();
-            do {
-                pthread_barrier_wait(&suspend_barrier);
-            } while (__lace_worker->enabled == 0);
-        }
-    }
-}
-
+/**
+ * Variable to hold the main/root task.
+ */
 static lace_startup_cb main_cb;
 
+/**
+ * Wrapper around the main/root task.
+ */
 static void*
 lace_main_wrapper(void *arg)
 {
-    lace_init_main();
+    lace_init_worker(0);
+    lace_pin_worker();
     LACE_ME;
     WRAP(main_cb, arg);
     lace_exit();
@@ -547,7 +664,10 @@ lace_main_wrapper(void *arg)
     return NULL;
 }
 
-#define lace_steal_loop(quit) CALL(lace_steal_loop, quit)
+/**
+ * Main Lace worker implementation.
+ * Steal from random victims until "quit" is set.
+ */
 VOID_TASK_1(lace_steal_loop, int*, quit)
 {
     // Determine who I am
@@ -599,12 +719,12 @@ VOID_TASK_1(lace_steal_loop, int*, quit)
 
 /**
  * Initialize worker 0.
+ * Calls lace_init_worker and then signals the event.
  */
 void
 lace_init_main()
 {
-    WorkerP * __attribute__((unused)) __lace_worker = lace_init_worker(0);
-    lace_time_event(__lace_worker, 1);
+    lace_init_worker(0);
 }
 
 /**
@@ -614,16 +734,13 @@ lace_init_main()
  * For worker 0, use lace_init_main
  */
 void
-lace_run_worker(int worker)
+lace_run_worker(void)
 {
-    // Initialize local datastructure
-    WorkerP *__lace_worker = lace_init_worker(worker);
-    Task *__lace_dq_head = __lace_worker->dq;
-
-    // Steal for a while
-    lace_steal_loop(&lace_quits);
+    // Run the steal loop
+    LACE_ME;
+    CALL(lace_steal_loop, &lace_quits);
 
-    // Time the quit event
+    // Time worker exit event
     lace_time_event(__lace_worker, 9);
 
     // Synchronize with lace_exit
@@ -633,7 +750,10 @@ lace_run_worker(int worker)
 static void*
 lace_default_worker_thread(void* arg)
 {
-    lace_run_worker((int)(size_t)arg);
+    int worker = (int)(size_t)arg;
+    lace_init_worker(worker);
+    lace_pin_worker();
+    lace_run_worker();
     return NULL;
 }
 
@@ -646,6 +766,7 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
     size_t pagesize = sysconf(_SC_PAGESIZE);
     stacksize = (stacksize + pagesize - 1) & ~(pagesize - 1); // ceil(stacksize, pagesize)
 
+#if LACE_USE_HWLOC
     // Get our logical processor
     hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, worker % n_pus);
 
@@ -655,6 +776,9 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
         fprintf(stderr, "Lace error: Unable to allocate memory for the pthread stack!\n");
         exit(1);
     }
+#else
+    void *stack_location = mmap(NULL, stacksize+  pagesize, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+#endif
 
     if (0 != mprotect(stack_location, pagesize, PROT_NONE)) {
         fprintf(stderr, "Lace error: Unable to protect the allocated program stack with a guard page!\n");
@@ -679,22 +803,23 @@ lace_spawn_worker(int worker, size_t stacksize, void* (*fun)(void*), void* arg)
     return res;
 }
 
-static int
-get_cpu_count()
-{
-    int count = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
-    return count < 1 ? 1 : count;
-}
-
+/**
+ * Set the verbosity of Lace.
+ */
 void
 lace_set_verbosity(int level)
 {
     verbosity = level;
 }
 
+/**
+ * Initialize Lace for work-stealing with <n> workers, where
+ * each worker gets a task deque with <dqsize> elements.
+ */
 void
-lace_init(int _n_workers, size_t dqsize)
+lace_init(unsigned int _n_workers, size_t dqsize)
 {
+#if LACE_USE_HWLOC
     // Initialize topology and information about cpus
     hwloc_topology_init(&topo);
     hwloc_topology_load(topo);
@@ -702,15 +827,23 @@ lace_init(int _n_workers, size_t dqsize)
     n_nodes = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_NODE);
     n_cores = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_CORE);
     n_pus = hwloc_get_nbobjs_by_type(topo, HWLOC_OBJ_PU);
+#elif defined(sched_getaffinity)
+    cpu_set_t cs;
+    CPU_ZERO(&cs);
+    sched_getaffinity(0, sizeof(cs), &cs);
+    unsigned int n_pus = CPU_COUNT(&cs);
+#else
+    unsigned int n_pus = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
 
     // Initialize globals
-    n_workers = _n_workers;
-    if (n_workers == 0) n_workers = get_cpu_count();
+    n_workers = _n_workers == 0 ? n_pus : _n_workers;
     enabled_workers = n_workers;
     if (dqsize != 0) default_dqsize = dqsize;
+    else dqsize = default_dqsize;
     lace_quits = 0;
 
-    // Create barrier for all workers
+    // Initialize Lace barrier
     lace_barrier_init();
 
     // Create suspend barrier
@@ -724,37 +857,9 @@ lace_init(int _n_workers, size_t dqsize)
         exit(1);
     }
 
-    // Allocate memory for each worker
+    // Compute memory size for each worker
     workers_memory_size = sizeof(worker_data) + sizeof(Task) * dqsize;
 
-    for (int i=0; i<n_workers; i++) {
-        workers_memory[i] = mmap(NULL, workers_memory_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
-        if (workers_memory[i] == MAP_FAILED) {
-            fprintf(stderr, "Lace error: Unable to allocate memory for the Lace worker!\n");
-            exit(1);
-        }
-        workers[i] = &workers_memory[i]->worker_public;
-        workers_p[i] = &workers_memory[i]->worker_private;
-    }
-
-    // Pin allocated memory of each worker
-    for (int i=0; i<n_workers; i++) {
-        // Get our core
-        hwloc_obj_t core = hwloc_get_obj_by_type(topo, HWLOC_OBJ_CORE, i % n_cores);
-
-        // Pin the memory area
-#ifdef HWLOC_MEMBIND_BYNODESET
-        int res = hwloc_set_area_membind(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE | HWLOC_MEMBIND_BYNODESET);
-#else
-        int res = hwloc_set_area_membind_nodeset(topo, workers_memory[i], workers_memory_size, core->nodeset, HWLOC_MEMBIND_BIND, HWLOC_MEMBIND_STRICT | HWLOC_MEMBIND_MIGRATE);
-#endif
-        if (res != 0) {
-#ifndef STORM_SILENCE_WARNINGS
-            fprintf(stderr, "Lace error: Unable to bind worker memory to node!\n");
-#endif
-        }
-    }
-
     // Create pthread key
 #ifndef __linux__
     pthread_key_create(&worker_key, NULL);
@@ -773,7 +878,11 @@ lace_init(int _n_workers, size_t dqsize)
     }
 
     if (verbosity) {
+#if LACE_USE_HWLOC
         fprintf(stderr, "Initializing Lace, %u nodes, %u cores, %u logical processors, %d workers.\n", n_nodes, n_cores, n_pus, n_workers);
+#else
+        fprintf(stderr, "Initializing Lace, %u available cores, %d workers.\n", n_pus, n_workers);
+#endif
     }
 
     // Prepare lace_init structure
@@ -788,11 +897,18 @@ lace_init(int _n_workers, size_t dqsize)
 #endif
 }
 
+/**
+ * Start the worker threads.
+ * If cb is set, then the current thread is suspended and Worker 0 is a new thread that starts with
+ * the given cb(arg) as the root task.
+ * If cb is not set, then the current thread is Worker 0 and this function returns.
+ */
 void
 lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
 {
     if (stacksize == 0) stacksize = default_stacksize;
 
+    /* Report startup if verbose */
     if (verbosity) {
         if (cb != 0) {
             fprintf(stderr, "Lace startup, creating %d worker threads with program stack %zu bytes.\n", n_workers, stacksize);
@@ -803,22 +919,21 @@ lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
         }
     }
 
-    /* Spawn workers */
-    int i;
-    for (i=1; i<n_workers; i++) lace_spawn_worker(i, stacksize, 0, 0);
+    /* Spawn all other workers */
+    for (unsigned int i=1; i<n_workers; i++) lace_spawn_worker(i, stacksize, 0, 0);
 
     if (cb != 0) {
+        /* If cb set, spawn worker 0 */
         main_cb = cb;
         lace_spawn_worker(0, stacksize, lace_main_wrapper, arg);
 
-        // Suspend this thread until cb returns
+        /* Suspend this thread until cb returns */
         pthread_mutex_lock(&wait_until_done_mutex);
         if (lace_quits == 0) pthread_cond_wait(&wait_until_done, &wait_until_done_mutex);
         pthread_mutex_unlock(&wait_until_done_mutex);
     } else {
-        // use this thread as worker and return control
+        /* If cb not set, use current thread as worker 0 */
         lace_init_worker(0);
-        lace_time_event(lace_get_worker(), 1);
     }
 }
 
@@ -826,6 +941,9 @@ lace_startup(size_t stacksize, lace_startup_cb cb, void *arg)
 static uint64_t ctr_all[CTR_MAX];
 #endif
 
+/**
+ * Reset the counters of Lace.
+ */
 void
 lace_count_reset()
 {
@@ -851,6 +969,9 @@ lace_count_reset()
 #endif
 }
 
+/**
+ * Report counters to the given file.
+ */
 void
 lace_count_report_file(FILE *file)
 {
@@ -948,11 +1069,15 @@ lace_count_report_file(FILE *file)
     (void)file;
 }
 
+/**
+ * End Lace. All disabled threads are re-enabled, and then all Workers are signaled to quit.
+ * This function waits until all threads are done, then returns.
+ */
 void lace_exit()
 {
     lace_time_event(lace_get_worker(), 2);
 
-    // first suspend all other threads
+    // first suspend all enabled threads
     lace_suspend();
 
     // now enable all threads and tell them to quit
@@ -1030,7 +1155,7 @@ VOID_TASK_2(lace_together_helper, Task*, t, volatile int*, finished)
 
     for (;;) {
         int f = *finished;
-        if (cas(finished, f, f-1)) break;
+        if (__sync_bool_compare_and_swap(finished, f, f-1)) break;
     }
 
     while (*finished != 0) STEAL_RANDOM();
@@ -1086,7 +1211,7 @@ lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t)
     t2->d.args.arg_1 = t;
     t2->d.args.arg_2 = &done;
 
-    while (!cas(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head);
+    while (!__sync_bool_compare_and_swap(&lace_newframe.t, 0, &_t2)) lace_yield(__lace_worker, __lace_dq_head);
     lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2);
 }
 
@@ -1113,10 +1238,13 @@ lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *t)
 
     compiler_barrier();
 
-    while (!cas(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head);
+    while (!__sync_bool_compare_and_swap(&lace_newframe.t, 0, &_s)) lace_yield(__lace_worker, __lace_dq_head);
     lace_sync_and_exec(__lace_worker, __lace_dq_head, &_t2);
 }
 
+/**
+ * Called by _SPAWN functions when the Task stack is full.
+ */
 void
 lace_abort_stack_overflow(void)
 {
diff --git a/resources/3rdparty/sylvan/src/lace.h b/resources/3rdparty/sylvan/src/lace.h
index 4f49f63dc..0b4b7a06f 100755
--- a/resources/3rdparty/sylvan/src/lace.h
+++ b/resources/3rdparty/sylvan/src/lace.h
@@ -23,40 +23,281 @@
 #ifndef __LACE_H__
 #define __LACE_H__
 
+#ifdef __has_include
+#  if __has_include("lace_config.h")
+#    include <lace_config.h>
+#  else
+#    define LACE_PIE_TIMES     0
+#    define LACE_COUNT_TASKS   0
+#    define LACE_COUNT_STEALS  0
+#    define LACE_COUNT_SPLITS  0
+#    define LACE_USE_HWLOC     0
+#  endif
+#endif
+
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
 
-/* Some flags */
+/**
+ * Using Lace.
+ *
+ * Optionally set the verbosity level with lace_set_verbosity.
+ * Then call lace_init to initialize the system.
+ * - lace_init(n_workers, deque_size);
+ *   set both parameters to 0 for reasonable defaults, using all available cores.
+ *
+ * You can create Worker threads yourself or let Lace create threads with lace_startup.
+ *
+ * When creating threads yourself, call the following functions:
+ *   - lace_init_worker to allocate and initialize the worker data structures
+ *     this method returns when all workers have called lace_init_worker
+ *   - lace_pin_worker (optional) to pin the thread and memory to a core
+ * The main worker can now start its root task. All other workers:
+ *   - lace_run_worker to perform work-stealing until the main worker calls lace_exit
+ *
+ * When letting Lace create threads with lace_startup
+ * - Call lace_startup with a callback to create N threads.
+ *   Returns after the callback has returned and all created threads are destroyed
+ * - Call lace_startup without a callback to create N-1 threads.
+ *   Returns control to the caller. When lace_exit is called, all created threads are terminated.
+ */
 
-#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */
-#define LACE_DEBUG_PROGRAMSTACK 0
-#endif
+/**
+ * Type definitions used in the functions below.
+ * - WorkerP contains the (private) Worker data
+ * - Task contains a single Task
+ */
+typedef struct _WorkerP WorkerP;
+typedef struct _Task Task;
 
-#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */
-#define LACE_LEAP_RANDOM 0
-#endif
+/**
+ * The macro LACE_TYPEDEF_CB(typedefname, taskname, parametertypes) defines
+ * a Task for use as a callback function.
+ */
+#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__);
 
-#ifndef LACE_PIE_TIMES /* Record time spent stealing and leapfrogging */
-#define LACE_PIE_TIMES 0
-#endif
+/**
+ * The lace_startup_cb type for a void Task with one void* parameter.
+ */
+LACE_TYPEDEF_CB(void, lace_startup_cb, void*);
 
-#ifndef LACE_COUNT_TASKS /* Count number of tasks executed */
-#define LACE_COUNT_TASKS 0
-#endif
+/**
+ * Set verbosity level (0 = no startup messages, 1 = startup messages)
+ * Default level: 0
+ */
+void lace_set_verbosity(int level);
+
+/**
+ * Initialize Lace for <n_workers> workers with a deque size of <dqsize> per worker.
+ * If <n_workers> is set to 0, automatically detects available cores.
+ * If <dqsize> is est to 0, uses a reasonable default value.
+ */
+void lace_init(unsigned int n_workers, size_t dqsize);
+
+/**
+ * Let Lace create worker threads.
+ * If <stacksize> is set to 0, uses a reaonable default value.
+ * If cb, arg are set to 0, then the current thread is initialized as the main Worker (Worker 0).
+ *
+ * If cb,arg are set, then the current thread is suspended. A new thread is made for Worker 0 and
+ * the task cb with paremeter arg is called; when cb returns, Lace is exited automatically.
+ */
+void lace_startup(size_t stacksize, lace_startup_cb, void* arg);
+
+/**
+ * Initialize worker <worker>, allocating memory.
+ * If <worker> is 0, then the current thread is the main worker.
+ */
+void lace_init_worker(unsigned int worker);
+
+/**
+ * Use hwloc to pin the current thread to a CPU and its allocated memory in the closest domain.
+ * Call this *after* lace_init_worker and *before* lace_run_worker.
+ */
+void lace_pin_worker(void);
+
+/**
+ * Perform work-stealing until lace_exit is called.
+ */
+void lace_run_worker(void);
+
+/**
+ * Steal a random task.
+ */
+#define lace_steal_random() CALL(lace_steal_random)
+void lace_steal_random_CALL(WorkerP*, Task*);
+
+/**
+ * Enter the Lace barrier. (all active workers must enter it before we can continue)
+ */
+void lace_barrier();
+
+/**
+ * Suspend all workers except the current worker.
+ * May only be used when all other workers are idle.
+ */
+void lace_suspend();
+
+/**
+ * Resume all workers.
+ */
+void lace_resume();
+
+/**
+ * When all other workers are suspended, some workers can be disabled using the following functions.
+ * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
+ * You can never disable the current worker or reduce the number of workers below 1.
+ * You cannot add workers.
+ */
+void lace_set_workers(unsigned int workercount);
+
+/**
+ * Disable a suspended worker.
+ */
+void lace_disable_worker(unsigned int worker);
+
+/**
+ * Enable a suspended worker.
+ */
+void lace_enable_worker(unsigned int worker);
+
+/**
+ * Retrieve the number of enabled/active workers.
+ */
+unsigned int lace_enabled_workers();
+
+/**
+ * Retrieve the number of Lace workers
+ */
+unsigned int lace_workers();
+
+/**
+ * Retrieve the default program stack size
+ */
+size_t lace_default_stacksize();
+
+/**
+ * Retrieve the current worker data.
+ */
+WorkerP *lace_get_worker();
+
+/**
+ * Retrieve the current head of the deque
+ */
+Task *lace_get_head(WorkerP *);
+
+/**
+ * Exit Lace.
+ * This function is automatically called when lace_startup is called with a callback.
+ * This function must be called to exit Lace when lace_startup is called without a callback.
+ */
+void lace_exit();
+
+/**
+ * Create a pointer to a Tasks main function.
+ */
+#define TASK(f)           ( f##_CALL )
+
+/**
+ * Call a Tasks implementation (adds Lace variables to call)
+ */
+#define WRAP(f, ...)      ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) )
+
+/**
+ * Sync a task.
+ */
+#define SYNC(f)           ( __lace_dq_head--, WRAP(f##_SYNC) )
+
+/**
+ * Sync a task, but if the task is not stolen, then do not execute it.
+ */
+#define DROP()            ( __lace_dq_head--, WRAP(lace_drop) )
+
+/**
+ * Spawn a task.
+ */
+#define SPAWN(f, ...)     ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ )
+
+/**
+ * Directly execute a task.
+ */
+#define CALL(f, ...)      ( WRAP(f##_CALL, ##__VA_ARGS__) )
+
+/**
+ * Signal all workers to interrupt their current tasks and instead perform (a personal copy of) the given task.
+ */
+#define TOGETHER(f, ...)  ( WRAP(f##_TOGETHER, ##__VA_ARGS__) )
+
+/**
+ * Signal all workers to interrupt their current tasks and help the current thread with the given task.
+ */
+#define NEWFRAME(f, ...)  ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) )
+
+/**
+ * (Try to) steal a task from a random worker.
+ */
+#define STEAL_RANDOM()    ( CALL(lace_steal_random) )
+
+/**
+ * Get the current worker id.
+ */
+#define LACE_WORKER_ID    ( __lace_worker->worker )
+
+/**
+ * Get the core where the current worker is pinned.
+ */
+#define LACE_WORKER_PU    ( __lace_worker->pu )
+
+/**
+ * Initialize local variables __lace_worker and __lace_dq_head which are required for most Lace functionality.
+ */
+#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker);
 
-#ifndef LACE_COUNT_STEALS /* Count number of steals performed */
-#define LACE_COUNT_STEALS 0
+/**
+ * Check if current tasks must be interrupted, and if so, interrupt.
+ */
+void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head);
+#define YIELD_NEWFRAME() { if (unlikely((*(Task* volatile *)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
+
+/**
+ * True if the given task is stolen, False otherwise.
+ */
+#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1)
+
+/**
+ * True if the given task is completed, False otherwise.
+ */
+#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2)
+
+/**
+ * Retrieves a pointer to the result of the given task.
+ */
+#define TASK_RESULT(t) (&t->d[0])
+
+/**
+ * Compute a random number, thread-local (so scalable)
+ */
+#define LACE_TRNG (__lace_worker->rng = 2862933555777941757ULL * __lace_worker->rng + 3037000493ULL)
+
+/* Some flags that influence Lace behavior */
+
+#ifndef LACE_DEBUG_PROGRAMSTACK /* Write to stderr when 95% program stack reached */
+#define LACE_DEBUG_PROGRAMSTACK 0
 #endif
 
-#ifndef LACE_COUNT_SPLITS /* Count number of times the split point is moved */
-#define LACE_COUNT_SPLITS 0
+#ifndef LACE_LEAP_RANDOM /* Use random leaping when leapfrogging fails */
+#define LACE_LEAP_RANDOM 1
 #endif
 
 #ifndef LACE_COUNT_EVENTS
 #define LACE_COUNT_EVENTS (LACE_PIE_TIMES || LACE_COUNT_TASKS || LACE_COUNT_STEALS || LACE_COUNT_SPLITS)
 #endif
 
+/**
+ * Now follows the implementation of Lace
+ */
+
 /* Typical cacheline size of system architectures */
 #ifndef LINE_SIZE
 #define LINE_SIZE 64
@@ -167,10 +408,6 @@ typedef enum {
     CTR_MAX
 } CTR_index;
 
-struct _WorkerP;
-struct _Worker;
-struct _Task;
-
 #define THIEF_EMPTY     ((struct _Worker*)0x0)
 #define THIEF_TASK      ((struct _Worker*)0x1)
 #define THIEF_COMPLETED ((struct _Worker*)0x2)
@@ -215,7 +452,7 @@ typedef struct _WorkerP {
     size_t stack_trigger;       // for stack overflow detection
     uint64_t rng;               // my random seed (for lace_trng)
     uint32_t seed;              // my random seed (for lace_steal_random)
-    int16_t worker;             // what is my worker id?
+    uint16_t worker;            // what is my worker id?
     uint8_t allstolen;          // my allstolen
     volatile int8_t enabled;    // if this worker is enabled
 
@@ -228,145 +465,10 @@ typedef struct _WorkerP {
     int16_t pu;                 // my pu (for HWLOC)
 } WorkerP;
 
-#define LACE_TYPEDEF_CB(t, f, ...) typedef t (*f)(WorkerP *, Task *, ##__VA_ARGS__);
-LACE_TYPEDEF_CB(void, lace_startup_cb, void*);
-
-/**
- * Using Lace.
- *
- * Optionally set the verbosity level with lace_set_verbosity.
- * Call lace_init to allocate all data structures.
- *
- * You can create threads yourself or let Lace create threads with lace_startup.
- *
- * When creating threads yourself:
- * - call lace_init_main for worker 0
- *   this method returns when all other workers have started
- * - call lace_run_worker for all other workers
- *   workers perform work-stealing until worker 0 calls lace_exit
- *
- * When letting Lace create threads with lace_startup
- * - calling with startup callback creates N threads and returns
- *   after the callback has returned, and all created threads are destroyed
- * - calling without a startup callback creates N-1 threads and returns
- *   control to the caller. When lace_exit is called, all created threads are terminated.
- */
-
-/**
- * Set verbosity level (0 = no startup messages, 1 = startup messages)
- * Default level: 0
- */
-void lace_set_verbosity(int level);
-
-/**
- * Initialize master structures for Lace with <n_workers> workers
- * and default deque size of <dqsize>.
- * Does not create new threads.
- * Tries to detect number of cpus, if n_workers equals 0.
- */
-void lace_init(int n_workers, size_t dqsize);
-
-/**
- * After lace_init, start all worker threads.
- * If cb,arg are set, suspend this thread, call cb(arg) in a new thread
- * and exit Lace upon return
- * Otherwise, the current thread is initialized as worker 0.
- */
-void lace_startup(size_t stacksize, lace_startup_cb, void* arg);
-
-/**
- * Initialize worker 0. This method returns when all other workers are initialized
- * (using lace_run_worker).
- *
- * When done, run lace_exit so all worker threads return from lace_run_worker.
- */
-void lace_init_main();
-
-/**
- * Initialize the current thread as the Lace thread of worker <worker>, and perform
- * work-stealing until lace_exit is called.
- *
- * For worker 0, call lace_init_main instead.
- */
-void lace_run_worker(int worker);
-
-/**
- * Steal a random task.
- */
-#define lace_steal_random() CALL(lace_steal_random)
-void lace_steal_random_CALL(WorkerP*, Task*);
-
-/**
- * Barrier (all workers must enter it before progressing)
- */
-void lace_barrier();
-
-/**
- * Suspend and resume all other workers.
- * May only be used when all other workers are idle.
- */
-void lace_suspend();
-void lace_resume();
-
-/**
- * When all tasks are suspended, workers can be temporarily disabled.
- * With set_workers, all workers 0..(N-1) are enabled and N..max are disabled.
- * You can never disable the current worker or reduce the number of workers below 1.
- * You cannot add workers.
- */
-void lace_disable_worker(int worker);
-void lace_enable_worker(int worker);
-void lace_set_workers(int workercount);
-int lace_enabled_workers();
-
-/**
- * Retrieve number of Lace workers
- */
-size_t lace_workers();
-
-/**
- * Retrieve default program stack size
- */
-size_t lace_default_stacksize();
-
-/**
- * Retrieve current worker.
- */
-WorkerP *lace_get_worker();
-
-/**
- * Retrieve the current head of the deque
- */
-Task *lace_get_head(WorkerP *);
-
-/**
- * Exit Lace. Automatically called when started with cb,arg.
- */
-void lace_exit();
-
 #define LACE_STOLEN   ((Worker*)0)
 #define LACE_BUSY     ((Worker*)1)
 #define LACE_NOWORK   ((Worker*)2)
 
-#define TASK(f)           ( f##_CALL )
-#define WRAP(f, ...)      ( f((WorkerP *)__lace_worker, (Task *)__lace_dq_head, ##__VA_ARGS__) )
-#define SYNC(f)           ( __lace_dq_head--, WRAP(f##_SYNC) )
-#define DROP()            ( __lace_dq_head--, WRAP(lace_drop) )
-#define SPAWN(f, ...)     ( WRAP(f##_SPAWN, ##__VA_ARGS__), __lace_dq_head++ )
-#define CALL(f, ...)      ( WRAP(f##_CALL, ##__VA_ARGS__) )
-#define TOGETHER(f, ...)  ( WRAP(f##_TOGETHER, ##__VA_ARGS__) )
-#define NEWFRAME(f, ...)  ( WRAP(f##_NEWFRAME, ##__VA_ARGS__) )
-#define STEAL_RANDOM()    ( CALL(lace_steal_random) )
-#define LACE_WORKER_ID    ( __lace_worker->worker )
-#define LACE_WORKER_PU    ( __lace_worker->pu )
-
-/* Use LACE_ME to initialize Lace variables, in case you want to call multiple Lace tasks */
-#define LACE_ME WorkerP * __attribute__((unused)) __lace_worker = lace_get_worker(); Task * __attribute__((unused)) __lace_dq_head = lace_get_head(__lace_worker);
-
-#define TASK_IS_STOLEN(t) ((size_t)t->thief > 1)
-#define TASK_IS_COMPLETED(t) ((size_t)t->thief == 2)
-#define TASK_RESULT(t) (&t->d[0])
-
 #if LACE_DEBUG_PROGRAMSTACK
 static inline void CHECKSTACK(WorkerP *w)
 {
@@ -402,14 +504,6 @@ extern lace_newframe_t lace_newframe;
 void lace_do_together(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
 void lace_do_newframe(WorkerP *__lace_worker, Task *__lace_dq_head, Task *task);
 
-void lace_yield(WorkerP *__lace_worker, Task *__lace_dq_head);
-#define YIELD_NEWFRAME() { if (unlikely((*(Task* volatile *)&lace_newframe.t) != NULL)) lace_yield(__lace_worker, __lace_dq_head); }
-
-/**
- * Compute a random number, thread-local
- */
-#define LACE_TRNG (__lace_worker->rng = 2862933555777941757ULL * __lace_worker->rng + 3037000493ULL)
-
 /**
  * Make all tasks of the current worker shared.
  */
diff --git a/resources/3rdparty/sylvan/src/storm_wrapper.cpp b/resources/3rdparty/sylvan/src/storm_wrapper.cpp
index ae9a91bb9..09fb51ccf 100644
--- a/resources/3rdparty/sylvan/src/storm_wrapper.cpp
+++ b/resources/3rdparty/sylvan/src/storm_wrapper.cpp
@@ -18,6 +18,8 @@
 
 #if defined(STORM_HAVE_GMP) && !defined(STORM_USE_CLN_EA)
 #define RATIONAL_NUMBER_THREAD_SAFE
+#else
+#warning "Rational numbers do not appear to be thread-safe. Use in sylvan will be protected by mutexes, performance might degrade."
 #endif
 
 // A mutex that is used to lock all operations accessing rational numbers as they are not necessarily thread-safe.
@@ -123,6 +125,8 @@ int storm_rational_number_is_zero(storm_rational_number_ptr a) {
     std::lock_guard<std::mutex> lock(rationalNumberMutex);
 #endif
     
+    std::cout << "got ptr for eq check " << a << std::endl;
+    
     return storm::utility::isZero(*(storm::RationalNumber const*)a) ? 1 : 0;
 }
 
diff --git a/resources/3rdparty/sylvan/src/sylvan.h b/resources/3rdparty/sylvan/src/sylvan.h
index aac4026ae..41ae81948 100755
--- a/resources/3rdparty/sylvan/src/sylvan.h
+++ b/resources/3rdparty/sylvan/src/sylvan.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -17,24 +17,49 @@
 
 /**
  * Sylvan: parallel MTBDD/ListDD package.
- *
- * This is a multi-core implementation of MTBDDs with complement edges.
- *
- * This package requires parallel the work-stealing framework Lace.
- * Lace must be initialized before initializing Sylvan
+ * Include this file.
  */
 
 #include <sylvan_config.h>
 
+#include <assert.h>
+#include <stddef.h>
 #include <stdint.h>
 #include <stdio.h> // for FILE
 #include <stdlib.h> // for realloc
+#include <unistd.h>
+#include <pthread.h>
+
+#if SYLVAN_STATS
+#ifdef __MACH__
+#include <mach/mach_time.h>
+#else
+#include <time.h>
+#endif
+#endif
+
+/**
+ * Sylvan header files outside the namespace
+ */
 
 #include <lace.h>
 #include <sylvan_tls.h>
 
+#ifdef __cplusplus
+//namespace sylvan {
+#endif
+
+/**
+ * Sylvan header files inside the namespace
+ */
+
 #include <sylvan_common.h>
 #include <sylvan_stats.h>
+#include <sylvan_mt.h>
 #include <sylvan_mtbdd.h>
 #include <sylvan_bdd.h>
 #include <sylvan_ldd.h>
+
+#ifdef __cplusplus
+//}
+#endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd.c b/resources/3rdparty/sylvan/src/sylvan_bdd.c
index bb1aa0aed..e874d7276 100755
--- a/resources/3rdparty/sylvan/src/sylvan_bdd.c
+++ b/resources/3rdparty/sylvan/src/sylvan_bdd.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,20 +15,12 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan_int.h>
 
-#include <assert.h>
 #include <inttypes.h>
 #include <math.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 
-#include <sylvan.h>
-#include <sylvan_int.h>
-
 #include <avl.h>
 
 static int granularity = 1; // default
@@ -45,12 +37,6 @@ sylvan_get_granularity()
     return granularity;
 }
 
-BDD
-sylvan_ithvar(BDDVAR level)
-{
-    return sylvan_makenode(level, sylvan_false, sylvan_true);
-}
-
 /**
  * Implementation of unary, binary and if-then-else operators.
  */
@@ -1834,10 +1820,10 @@ TASK_IMPL_3(BDD, sylvan_union_cube, BDD, bdd, BDDSET, vars, uint8_t *, cube)
     } else if (v > n_level) {
         BDD high = node_high(bdd, n);
         BDD low = node_low(bdd, n);
-        SPAWN(sylvan_union_cube, high, vars, cube);
+        bdd_refs_spawn(SPAWN(sylvan_union_cube, high, vars, cube));
         BDD new_low = sylvan_union_cube(low, vars, cube);
         bdd_refs_push(new_low);
-        BDD new_high = SYNC(sylvan_union_cube);
+        BDD new_high = bdd_refs_sync(SYNC(sylvan_union_cube));
         bdd_refs_pop(1);
         if (new_low != low || new_high != high) {
             result = sylvan_makenode(n_level, new_low, new_high);
diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd.h b/resources/3rdparty/sylvan/src/sylvan_bdd.h
index 4d32fc910..f7ae74091 100755
--- a/resources/3rdparty/sylvan/src/sylvan_bdd.h
+++ b/resources/3rdparty/sylvan/src/sylvan_bdd.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -23,10 +23,19 @@
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
-    
+
 /* For strictly non-MT BDDs */
-#define sylvan_isconst(bdd) (bdd == sylvan_true || bdd == sylvan_false)
-#define sylvan_isnode(bdd)  (bdd != sylvan_true && bdd != sylvan_false)
+static inline int
+sylvan_isconst(MTBDD bdd)
+{
+    return bdd == mtbdd_true || bdd == mtbdd_false ? 1 : 0;
+}
+
+static inline int
+sylvan_isnode(MTBDD bdd)
+{
+    return bdd != mtbdd_true && bdd != mtbdd_false ? 1 : 0;
+}
 
 /**
  * Granularity (BDD only) determines usage of operation cache.
@@ -43,15 +52,16 @@ extern "C" {
 void sylvan_set_granularity(int granularity);
 int sylvan_get_granularity(void);
 
-/* Create a BDD representing just <var> or the negation of <var> */
-BDD sylvan_ithvar(BDDVAR var);
-#define sylvan_nithvar(var) sylvan_not(sylvan_ithvar(var))
-
 /*
  * Unary, binary and if-then-else operations.
  * These operations are all implemented by NOT, AND and XOR.
  */
-#define sylvan_not(a) (((BDD)a)^sylvan_complement)
+static inline BDD
+sylvan_not(BDD a)
+{
+    return a ^ sylvan_complement;
+}
+
 TASK_DECL_4(BDD, sylvan_ite, BDD, BDD, BDD, BDDVAR);
 #define sylvan_ite(a,b,c) (CALL(sylvan_ite,a,b,c,0))
 TASK_DECL_3(BDD, sylvan_and, BDD, BDD, BDDVAR);
@@ -68,6 +78,13 @@ TASK_DECL_3(BDD, sylvan_xor, BDD, BDD, BDDVAR);
 #define sylvan_diff(a,b) sylvan_and(a,sylvan_not(b))
 #define sylvan_less(a,b) sylvan_and(sylvan_not(a),b)
 
+/* Create a BDD representing just <var> or the negation of <var> */
+static inline BDD
+sylvan_nithvar(uint32_t var)
+{
+    return sylvan_not(sylvan_ithvar(var));
+}
+
 /**
  * Existential and universal quantification.
  */
@@ -265,7 +282,11 @@ sylvan_fprint(FILE *f, BDD bdd)
     sylvan_serialize_totext(f);
 }
 
-#define sylvan_print(dd) sylvan_fprint(stdout, dd)
+static void __attribute__((unused))
+sylvan_print(BDD bdd)
+{
+    return sylvan_fprint(stdout, bdd);
+}
 
 #include "sylvan_bdd_storm.h"
     
diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.c b/resources/3rdparty/sylvan/src/sylvan_bdd_storm.c
index 89f2c31a2..7e982cc1c 100644
--- a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.c
+++ b/resources/3rdparty/sylvan/src/sylvan_bdd_storm.c
@@ -12,7 +12,6 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
 	if (aRegular == sylvan_false) {
 		if (aIsNegated) {
 			if (sylvan_set_isempty(variables)) {
-				//printf("return in preprocessing...2\n");
 				return sylvan_true;
 			} else {
 				//printf("return in preprocessing...3\n");
@@ -35,18 +34,22 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
 			return a;
 		}
 	} else if (sylvan_set_isempty(variables)) {
-		//printf("return in preprocessing...4\n");
 		return a;
 	}
+    
+    BDD result;
+    if (cache_get3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, variables, (size_t)2, &result)) {
+        sylvan_stats_count(MTBDD_ABSTRACT_CACHED);
+        return result;
+    }
+    
 	/* From now on, f and cube are non-constant. */
 	bddnode_t na = MTBDD_GETNODE(a);
     BDDVAR level = bddnode_getvariable(na);
 
     bddnode_t nv = MTBDD_GETNODE(variables);
     BDDVAR vv = bddnode_getvariable(nv);
-
-	//printf("a level %i and cube level %i\n", level, vv);
-
+    
 	/* Abstract a variable that does not appear in f. */
     if (level > vv) {
 		BDD _v = sylvan_set_next(variables);
@@ -64,7 +67,6 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
         }
         sylvan_deref(res);
 
-		//printf("return after abstr. var that does not appear in f...\n");
        	return res1;
     }
 
@@ -128,13 +130,14 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
             return sylvan_invalid;
         }
 
-        // cuddCacheInsert2(manager, Cudd_bddExistAbstractRepresentative, f, cube, res);
-		// TODO: CACHING HERE
+        /* Store in cache */
+        if (cache_put3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, variables, (size_t)2, res)) {
+            sylvan_stats_count(MTBDD_ABSTRACT_CACHEDPUT);
+        }
 		
 		sylvan_deref(res1Inf);
 		sylvan_deref(res2Inf);
 		
-		//printf("return properly computed result...\n");
         return res;
     } else { /* if (level == vv) */
         BDD res1 = CALL(sylvan_existsRepresentative, aLow, variables, level);
@@ -162,7 +165,11 @@ TASK_IMPL_3(BDD, sylvan_existsRepresentative, BDD, a, BDD, variables, BDDVAR, pr
 		sylvan_deref(res1);
 		sylvan_deref(res2);
 		
-		//printf("return of last case...\n");
+        /* Store in cache */
+        if (cache_put3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, variables, (size_t)2, res)) {
+            sylvan_stats_count(MTBDD_ABSTRACT_CACHEDPUT);
+        }
+        
         return res;
     }
 	
diff --git a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h b/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h
index f28259a84..737ca0c65 100644
--- a/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h
+++ b/resources/3rdparty/sylvan/src/sylvan_bdd_storm.h
@@ -1,6 +1,14 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+    
 #define bdd_isnegated(dd) ((dd & sylvan_complement) ? 1 : 0)
 #define bdd_regular(dd) (dd & ~sylvan_complement)
 #define bdd_isterminal(dd) (dd == sylvan_false || dd == sylvan_true)
 
 TASK_DECL_3(BDD, sylvan_existsRepresentative, BDD, BDD, BDDVAR);
 #define sylvan_existsRepresentative(a, vars) (CALL(sylvan_existsRepresentative, a, vars, 0))
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_cache.c b/resources/3rdparty/sylvan/src/sylvan_cache.c
index 295679f59..8a39d751a 100755
--- a/resources/3rdparty/sylvan/src/sylvan_cache.c
+++ b/resources/3rdparty/sylvan/src/sylvan_cache.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,19 +15,20 @@
  * limitations under the License.
  */
 
+#include <sylvan_int.h>
+
 #include <errno.h>  // for errno
-#include <stdio.h>  // for fprintf
-#include <stdint.h> // for uint32_t etc
-#include <stdlib.h> // for exit
 #include <string.h> // for strerror
 #include <sys/mman.h> // for mmap
 
-#include <sylvan_cache.h>
-
 #ifndef MAP_ANONYMOUS
 #define MAP_ANONYMOUS MAP_ANON
 #endif
 
+#ifndef CACHE_MASK
+#define CACHE_MASK 1
+#endif
+
 #ifndef compiler_barrier
 #define compiler_barrier() { asm volatile("" ::: "memory"); }
 #endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_cache.h b/resources/3rdparty/sylvan/src/sylvan_cache.h
index e40185454..88afb1af1 100755
--- a/resources/3rdparty/sylvan/src/sylvan_cache.h
+++ b/resources/3rdparty/sylvan/src/sylvan_cache.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,21 +15,15 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+/* Do not include this file directly. Instead, include sylvan_int.h */
 
-#include <stdint.h> // for uint32_t etc
-
-#ifndef CACHE_H
-#define CACHE_H
+#ifndef SYLVAN_CACHE_H
+#define SYLVAN_CACHE_H
 
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
 
-#ifndef CACHE_MASK
-#define CACHE_MASK 1
-#endif
-
 /**
  * Operation cache
  *
diff --git a/resources/3rdparty/sylvan/src/sylvan_common.c b/resources/3rdparty/sylvan/src/sylvan_common.c
index 38d3a8876..71f748f8c 100755
--- a/resources/3rdparty/sylvan/src/sylvan_common.c
+++ b/resources/3rdparty/sylvan/src/sylvan_common.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
diff --git a/resources/3rdparty/sylvan/src/sylvan_common.h b/resources/3rdparty/sylvan/src/sylvan_common.h
index bc6841134..4e55d9153 100755
--- a/resources/3rdparty/sylvan/src/sylvan_common.h
+++ b/resources/3rdparty/sylvan/src/sylvan_common.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+/* Do not include this file directly. Instead, include sylvan.h */
+
 #ifndef SYLVAN_COMMON_H
 #define SYLVAN_COMMON_H
 
diff --git a/resources/3rdparty/sylvan/src/sylvan_gmp.c b/resources/3rdparty/sylvan/src/sylvan_gmp.c
index bda6c2eee..394033f6f 100755
--- a/resources/3rdparty/sylvan/src/sylvan_gmp.c
+++ b/resources/3rdparty/sylvan/src/sylvan_gmp.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,14 +16,11 @@
  */
 
 #include <sylvan_int.h>
+#include <sylvan_gmp.h>
 
-#include <assert.h>
 #include <math.h>
 #include <string.h>
 
-#include <sylvan_gmp.h>
-#include <gmp.h>
-
 static uint32_t gmp_type;
 
 /**
diff --git a/resources/3rdparty/sylvan/src/sylvan_gmp.h b/resources/3rdparty/sylvan/src/sylvan_gmp.h
index 7675999c0..8bf3b909a 100755
--- a/resources/3rdparty/sylvan/src/sylvan_gmp.h
+++ b/resources/3rdparty/sylvan/src/sylvan_gmp.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -19,13 +19,14 @@
  * This is an implementation of GMP mpq custom leaves of MTBDDs
  */
 
-#ifndef SYLVAN_GMP_H
-#define SYLVAN_GMP_H
-
 #include <sylvan.h>
 #include <gmp.h>
 
+#ifndef SYLVAN_GMP_H
+#define SYLVAN_GMP_H
+
 #ifdef __cplusplus
+namespace sylvan {
 extern "C" {
 #endif /* __cplusplus */
 
@@ -185,6 +186,7 @@ TASK_DECL_2(MTBDD, gmp_strict_threshold_d, MTBDD, double);
 
 #ifdef __cplusplus
 }
+}
 #endif /* __cplusplus */
 
 #endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_int.h b/resources/3rdparty/sylvan/src/sylvan_int.h
index e8a96df81..0a54d34ce 100755
--- a/resources/3rdparty/sylvan/src/sylvan_int.h
+++ b/resources/3rdparty/sylvan/src/sylvan_int.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,14 +16,22 @@
  */
 
 /**
- * Internals of Sylvan
+ * Sylvan: parallel MTBDD/ListDD package.
+ * Include this file for access to internals.
  */
 
 #include <sylvan.h>
 
+#ifdef __cplusplus
+namespace sylvan {
+#endif
+
+/**
+ * Sylvan internal header files inside the namespace
+ */
+
 #include <sylvan_cache.h>
 #include <sylvan_table.h>
-#include <sylvan_stats.h>
 
 #ifndef SYLVAN_INT_H
 #define SYLVAN_INT_H
@@ -42,68 +50,68 @@ extern llmsset_t nodes;
  */
 
 // BDD operations
-#define CACHE_BDD_ITE                   (0LL<<40)
-#define CACHE_BDD_AND                   (1LL<<40)
-#define CACHE_BDD_XOR                   (2LL<<40)
-#define CACHE_BDD_EXISTS                (3LL<<40)
-#define CACHE_BDD_PROJECT               (4LL<<40)
-#define CACHE_BDD_AND_EXISTS            (5LL<<40)
-#define CACHE_BDD_AND_PROJECT           (6LL<<40)
-#define CACHE_BDD_RELNEXT               (7LL<<40)
-#define CACHE_BDD_RELPREV               (8LL<<40)
-#define CACHE_BDD_SATCOUNT              (9LL<<40)
-#define CACHE_BDD_COMPOSE               (10LL<<40)
-#define CACHE_BDD_RESTRICT              (11LL<<40)
-#define CACHE_BDD_CONSTRAIN             (12LL<<40)
-#define CACHE_BDD_CLOSURE               (13LL<<40)
-#define CACHE_BDD_ISBDD                 (14LL<<40)
-#define CACHE_BDD_SUPPORT               (15LL<<40)
-#define CACHE_BDD_PATHCOUNT             (16LL<<40)
+static const uint64_t CACHE_BDD_ITE                 = (0LL<<40);
+static const uint64_t CACHE_BDD_AND                 = (1LL<<40);
+static const uint64_t CACHE_BDD_XOR                 = (2LL<<40);
+static const uint64_t CACHE_BDD_EXISTS              = (3LL<<40);
+static const uint64_t CACHE_BDD_PROJECT             = (4LL<<40);
+static const uint64_t CACHE_BDD_AND_EXISTS          = (5LL<<40);
+static const uint64_t CACHE_BDD_AND_PROJECT         = (6LL<<40);
+static const uint64_t CACHE_BDD_RELNEXT             = (7LL<<40);
+static const uint64_t CACHE_BDD_RELPREV             = (8LL<<40);
+static const uint64_t CACHE_BDD_SATCOUNT            = (9LL<<40);
+static const uint64_t CACHE_BDD_COMPOSE             = (10LL<<40);
+static const uint64_t CACHE_BDD_RESTRICT            = (11LL<<40);
+static const uint64_t CACHE_BDD_CONSTRAIN           = (12LL<<40);
+static const uint64_t CACHE_BDD_CLOSURE             = (13LL<<40);
+static const uint64_t CACHE_BDD_ISBDD               = (14LL<<40);
+static const uint64_t CACHE_BDD_SUPPORT             = (15LL<<40);
+static const uint64_t CACHE_BDD_PATHCOUNT           = (16LL<<40);
 
 // MDD operations
-#define CACHE_MDD_RELPROD               (20LL<<40)
-#define CACHE_MDD_MINUS                 (21LL<<40)
-#define CACHE_MDD_UNION                 (22LL<<40)
-#define CACHE_MDD_INTERSECT             (23LL<<40)
-#define CACHE_MDD_PROJECT               (24LL<<40)
-#define CACHE_MDD_JOIN                  (25LL<<40)
-#define CACHE_MDD_MATCH                 (26LL<<40)
-#define CACHE_MDD_RELPREV               (27LL<<40)
-#define CACHE_MDD_SATCOUNT              (28LL<<40)
-#define CACHE_MDD_SATCOUNTL1            (29LL<<40)
-#define CACHE_MDD_SATCOUNTL2            (30LL<<40)
+static const uint64_t CACHE_MDD_RELPROD             = (20LL<<40);
+static const uint64_t CACHE_MDD_MINUS               = (21LL<<40);
+static const uint64_t CACHE_MDD_UNION               = (22LL<<40);
+static const uint64_t CACHE_MDD_INTERSECT           = (23LL<<40);
+static const uint64_t CACHE_MDD_PROJECT             = (24LL<<40);
+static const uint64_t CACHE_MDD_JOIN                = (25LL<<40);
+static const uint64_t CACHE_MDD_MATCH               = (26LL<<40);
+static const uint64_t CACHE_MDD_RELPREV             = (27LL<<40);
+static const uint64_t CACHE_MDD_SATCOUNT            = (28LL<<40);
+static const uint64_t CACHE_MDD_SATCOUNTL1          = (29LL<<40);
+static const uint64_t CACHE_MDD_SATCOUNTL2          = (30LL<<40);
 
 // MTBDD operations
-#define CACHE_MTBDD_APPLY               (40LL<<40)
-#define CACHE_MTBDD_UAPPLY              (41LL<<40)
-#define CACHE_MTBDD_ABSTRACT            (42LL<<40)
-#define CACHE_MTBDD_ITE                 (43LL<<40)
-#define CACHE_MTBDD_AND_ABSTRACT_PLUS   (44LL<<40)
-#define CACHE_MTBDD_AND_ABSTRACT_MAX    (45LL<<40)
-#define CACHE_MTBDD_SUPPORT             (46LL<<40)
-#define CACHE_MTBDD_COMPOSE             (47LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM          (48LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM_REL      (49LL<<40)
-#define CACHE_MTBDD_MINIMUM             (50LL<<40)
-#define CACHE_MTBDD_MAXIMUM             (51LL<<40)
-#define CACHE_MTBDD_LEQ                 (52LL<<40)
-#define CACHE_MTBDD_LESS                (53LL<<40)
-#define CACHE_MTBDD_GEQ                 (54LL<<40)
-#define CACHE_MTBDD_GREATER             (55LL<<40)
-#define CACHE_MTBDD_EVAL_COMPOSE        (56LL<<40)
-#define CACHE_MTBDD_NONZERO_COUNT       (57LL<<40)
-#define CACHE_MTBDD_AND_EXISTS_RN       (58LL<<40)
-#define CACHE_MTBDD_MINIMUM_RN          (59LL<<40)
-#define CACHE_MTBDD_MAXIMUM_RN          (60LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM_RN       (61LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM_REL_RN   (62LL<<40)
-#define CACHE_MTBDD_AND_EXISTS_RF       (63LL<<40)
-#define CACHE_MTBDD_MINIMUM_RF          (64LL<<40)
-#define CACHE_MTBDD_MAXIMUM_RF          (65LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM_RF       (66LL<<40)
-#define CACHE_MTBDD_EQUAL_NORM_REL_RF   (67LL<<40)
-    
-#define CACHE_MTBDD_ABSTRACT_REPRESENTATIVE (68LL<<40)
+static const uint64_t CACHE_MTBDD_APPLY             = (40LL<<40);
+static const uint64_t CACHE_MTBDD_UAPPLY            = (41LL<<40);
+static const uint64_t CACHE_MTBDD_ABSTRACT          = (42LL<<40);
+static const uint64_t CACHE_MTBDD_ITE               = (43LL<<40);
+static const uint64_t CACHE_MTBDD_AND_ABSTRACT_PLUS = (44LL<<40);
+static const uint64_t CACHE_MTBDD_AND_ABSTRACT_MAX  = (45LL<<40);
+static const uint64_t CACHE_MTBDD_SUPPORT           = (46LL<<40);
+static const uint64_t CACHE_MTBDD_COMPOSE           = (47LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM        = (48LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL    = (49LL<<40);
+static const uint64_t CACHE_MTBDD_MINIMUM           = (50LL<<40);
+static const uint64_t CACHE_MTBDD_MAXIMUM           = (51LL<<40);
+static const uint64_t CACHE_MTBDD_LEQ               = (52LL<<40);
+static const uint64_t CACHE_MTBDD_LESS              = (53LL<<40);
+static const uint64_t CACHE_MTBDD_GEQ               = (54LL<<40);
+static const uint64_t CACHE_MTBDD_GREATER           = (55LL<<40);
+static const uint64_t CACHE_MTBDD_EVAL_COMPOSE      = (56LL<<40);
+static const uint64_t CACHE_MTBDD_NONZERO_COUNT     = (57LL<<40);
+static const uint64_t CACHE_MTBDD_AND_EXISTS_RN     = (58LL<<40);
+static const uint64_t CACHE_MTBDD_MINIMUM_RN        = (59LL<<40);
+static const uint64_t CACHE_MTBDD_MAXIMUM_RN        = (60LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM_RN     = (61LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL_RN = (62LL<<40);
+static const uint64_t CACHE_MTBDD_AND_EXISTS_RF     = (63LL<<40);
+static const uint64_t CACHE_MTBDD_MINIMUM_RF        = (64LL<<40);
+static const uint64_t CACHE_MTBDD_MAXIMUM_RF        = (65LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM_RF     = (66LL<<40);
+static const uint64_t CACHE_MTBDD_EQUAL_NORM_REL_RF = (67LL<<40);
+
+static const uint64_t CACHE_MTBDD_ABSTRACT_REPRESENTATIVE = (68LL<<40);
     
 #ifdef __cplusplus
 }
@@ -112,4 +120,8 @@ extern llmsset_t nodes;
 #include <sylvan_mtbdd_int.h>
 #include <sylvan_ldd_int.h>
 
+#ifdef __cplusplus
+} /* namespace */
+#endif
+
 #endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_ldd.c b/resources/3rdparty/sylvan/src/sylvan_ldd.c
index 6449e2f7d..0835933de 100755
--- a/resources/3rdparty/sylvan/src/sylvan_ldd.c
+++ b/resources/3rdparty/sylvan/src/sylvan_ldd.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,20 +15,12 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan_int.h>
 
-#include <assert.h>
 #include <inttypes.h>
 #include <math.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 
-#include <sylvan.h>
-#include <sylvan_int.h>
-
 #include <avl.h>
 #include <sylvan_refs.h>
 #include <sha2.h>
@@ -54,13 +46,15 @@ VOID_TASK_IMPL_1(lddmc_gc_mark_rec, MDD, mdd)
  * External references
  */
 
-refs_table_t mdd_refs;
+refs_table_t lddmc_refs;
+refs_table_t lddmc_protected;
+static int lddmc_protected_created = 0;
 
 MDD
 lddmc_ref(MDD a)
 {
     if (a == lddmc_true || a == lddmc_false) return a;
-    refs_up(&mdd_refs, a);
+    refs_up(&lddmc_refs, a);
     return a;
 }
 
@@ -68,13 +62,36 @@ void
 lddmc_deref(MDD a)
 {
     if (a == lddmc_true || a == lddmc_false) return;
-    refs_down(&mdd_refs, a);
+    refs_down(&lddmc_refs, a);
 }
 
 size_t
 lddmc_count_refs()
 {
-    return refs_count(&mdd_refs);
+    return refs_count(&lddmc_refs);
+}
+
+void
+lddmc_protect(MDD *a)
+{
+    if (!lddmc_protected_created) {
+        // In C++, sometimes lddmc_protect is called before Sylvan is initialized. Just create a table.
+        protect_create(&lddmc_protected, 4096);
+        lddmc_protected_created = 1;
+    }
+    protect_up(&lddmc_protected, (size_t)a);
+}
+
+void
+lddmc_unprotect(MDD *a)
+{
+    if (lddmc_protected.refs_table != NULL) protect_down(&lddmc_protected, (size_t)a);
+}
+
+size_t
+lddmc_count_protected(void)
+{
+    return protect_count(&lddmc_protected);
 }
 
 /* Called during garbage collection */
@@ -82,9 +99,24 @@ VOID_TASK_0(lddmc_gc_mark_external_refs)
 {
     // iterate through refs hash table, mark all found
     size_t count=0;
-    uint64_t *it = refs_iter(&mdd_refs, 0, mdd_refs.refs_size);
+    uint64_t *it = refs_iter(&lddmc_refs, 0, lddmc_refs.refs_size);
     while (it != NULL) {
-        SPAWN(lddmc_gc_mark_rec, refs_next(&mdd_refs, &it, mdd_refs.refs_size));
+        SPAWN(lddmc_gc_mark_rec, refs_next(&lddmc_refs, &it, lddmc_refs.refs_size));
+        count++;
+    }
+    while (count--) {
+        SYNC(lddmc_gc_mark_rec);
+    }
+}
+
+VOID_TASK_0(lddmc_gc_mark_protected)
+{
+    // iterate through refs hash table, mark all found
+    size_t count=0;
+    uint64_t *it = protect_iter(&lddmc_protected, 0, lddmc_protected.refs_size);
+    while (it != NULL) {
+        MDD *to_mark = (MDD*)protect_next(&lddmc_protected, &it, lddmc_protected.refs_size);
+        SPAWN(lddmc_gc_mark_rec, *to_mark);
         count++;
     }
     while (count--) {
@@ -93,33 +125,77 @@ VOID_TASK_0(lddmc_gc_mark_external_refs)
 }
 
 /* Infrastructure for internal markings */
+typedef struct lddmc_refs_task
+{
+    Task *t;
+    void *f;
+} *lddmc_refs_task_t;
+
+typedef struct lddmc_refs_internal
+{
+    const MDD **pbegin, **pend, **pcur;
+    MDD *rbegin, *rend, *rcur;
+    lddmc_refs_task_t sbegin, send, scur;
+} *lddmc_refs_internal_t;
+
 DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
 
-VOID_TASK_0(lddmc_refs_mark_task)
+VOID_TASK_2(lddmc_refs_mark_p_par, const MDD**, begin, size_t, count)
 {
-    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-    size_t i, j=0;
-    for (i=0; i<lddmc_refs_key->r_count; i++) {
-        if (j >= 40) {
-            while (j--) SYNC(lddmc_gc_mark_rec);
-            j=0;
+    if (count < 32) {
+        while (count) {
+            lddmc_gc_mark_rec(**(begin++));
+            count--;
         }
-        SPAWN(lddmc_gc_mark_rec, lddmc_refs_key->results[i]);
-        j++;
-    }
-    for (i=0; i<lddmc_refs_key->s_count; i++) {
-        Task *t = lddmc_refs_key->spawns[i];
-        if (!TASK_IS_STOLEN(t)) break;
-        if (TASK_IS_COMPLETED(t)) {
-            if (j >= 40) {
-                while (j--) SYNC(lddmc_gc_mark_rec);
-                j=0;
+    } else {
+        SPAWN(lddmc_refs_mark_p_par, begin, count / 2);
+        CALL(lddmc_refs_mark_p_par, begin + (count / 2), count - count / 2);
+        SYNC(lddmc_refs_mark_p_par);
+    }
+}
+
+VOID_TASK_2(lddmc_refs_mark_r_par, MDD*, begin, size_t, count)
+{
+    if (count < 32) {
+        while (count) {
+            lddmc_gc_mark_rec(*begin++);
+            count--;
+        }
+    } else {
+        SPAWN(lddmc_refs_mark_r_par, begin, count / 2);
+        CALL(lddmc_refs_mark_r_par, begin + (count / 2), count - count / 2);
+        SYNC(lddmc_refs_mark_r_par);
+    }
+}
+
+VOID_TASK_2(lddmc_refs_mark_s_par, lddmc_refs_task_t, begin, size_t, count)
+{
+    if (count < 32) {
+        while (count) {
+            Task *t = begin->t;
+            if (!TASK_IS_STOLEN(t)) return;
+            if (t->f == begin->f && TASK_IS_COMPLETED(t)) {
+                lddmc_gc_mark_rec(*(BDD*)TASK_RESULT(t));
             }
-            SPAWN(lddmc_gc_mark_rec, *(BDD*)TASK_RESULT(t));
-            j++;
+            begin += 1;
+            count -= 1;
         }
+    } else {
+        if (!TASK_IS_STOLEN(begin->t)) return;
+        SPAWN(lddmc_refs_mark_s_par, begin, count / 2);
+        CALL(lddmc_refs_mark_s_par, begin + (count / 2), count - count / 2);
+        SYNC(lddmc_refs_mark_s_par);
     }
-    while (j--) SYNC(lddmc_gc_mark_rec);
+}
+
+VOID_TASK_0(lddmc_refs_mark_task)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    SPAWN(lddmc_refs_mark_p_par, lddmc_refs_key->pbegin, lddmc_refs_key->pcur-lddmc_refs_key->pbegin);
+    SPAWN(lddmc_refs_mark_r_par, lddmc_refs_key->rbegin, lddmc_refs_key->rcur-lddmc_refs_key->rbegin);
+    CALL(lddmc_refs_mark_s_par, lddmc_refs_key->sbegin, lddmc_refs_key->scur-lddmc_refs_key->sbegin);
+    SYNC(lddmc_refs_mark_r_par);
+    SYNC(lddmc_refs_mark_p_par);
 }
 
 VOID_TASK_0(lddmc_refs_mark)
@@ -130,12 +206,12 @@ VOID_TASK_0(lddmc_refs_mark)
 VOID_TASK_0(lddmc_refs_init_task)
 {
     lddmc_refs_internal_t s = (lddmc_refs_internal_t)malloc(sizeof(struct lddmc_refs_internal));
-    s->r_size = 128;
-    s->r_count = 0;
-    s->s_size = 128;
-    s->s_count = 0;
-    s->results = (BDD*)malloc(sizeof(BDD) * 128);
-    s->spawns = (Task**)malloc(sizeof(Task*) * 128);
+    s->pcur = s->pbegin = (const MDD**)malloc(sizeof(MDD*) * 1024);
+    s->pend = s->pbegin + 1024;
+    s->rcur = s->rbegin = (MDD*)malloc(sizeof(MDD) * 1024);
+    s->rend = s->rbegin + 1024;
+    s->scur = s->sbegin = (lddmc_refs_task_t)malloc(sizeof(struct lddmc_refs_task) * 1024);
+    s->send = s->sbegin + 1024;
     SET_THREAD_LOCAL(lddmc_refs_key, s);
 }
 
@@ -146,6 +222,83 @@ VOID_TASK_0(lddmc_refs_init)
     sylvan_gc_add_mark(TASK(lddmc_refs_mark));
 }
 
+void
+lddmc_refs_ptrs_up(lddmc_refs_internal_t lddmc_refs_key)
+{
+    size_t size = lddmc_refs_key->pend - lddmc_refs_key->pbegin;
+    lddmc_refs_key->pbegin = (const MDD**)realloc(lddmc_refs_key->pbegin, sizeof(MDD*) * size * 2);
+    lddmc_refs_key->pcur = lddmc_refs_key->pbegin + size;
+    lddmc_refs_key->pend = lddmc_refs_key->pbegin + (size * 2);
+}
+
+MDD __attribute__((noinline))
+lddmc_refs_refs_up(lddmc_refs_internal_t lddmc_refs_key, MDD res)
+{
+    long size = lddmc_refs_key->rend - lddmc_refs_key->rbegin;
+    lddmc_refs_key->rbegin = (MDD*)realloc(lddmc_refs_key->rbegin, sizeof(MDD) * size * 2);
+    lddmc_refs_key->rcur = lddmc_refs_key->rbegin + size;
+    lddmc_refs_key->rend = lddmc_refs_key->rbegin + (size * 2);
+    return res;
+}
+
+void __attribute__((noinline))
+lddmc_refs_tasks_up(lddmc_refs_internal_t lddmc_refs_key)
+{
+    long size = lddmc_refs_key->send - lddmc_refs_key->sbegin;
+    lddmc_refs_key->sbegin = (lddmc_refs_task_t)realloc(lddmc_refs_key->sbegin, sizeof(struct lddmc_refs_task) * size * 2);
+    lddmc_refs_key->scur = lddmc_refs_key->sbegin + size;
+    lddmc_refs_key->send = lddmc_refs_key->sbegin + (size * 2);
+}
+
+void __attribute__((unused))
+lddmc_refs_pushptr(const MDD *ptr)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    *lddmc_refs_key->pcur++ = ptr;
+    if (lddmc_refs_key->pcur == lddmc_refs_key->pend) lddmc_refs_ptrs_up(lddmc_refs_key);
+}
+
+void __attribute__((unused))
+lddmc_refs_popptr(size_t amount)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    lddmc_refs_key->pcur -= amount;
+}
+
+MDD __attribute__((unused))
+lddmc_refs_push(MDD lddmc)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    *(lddmc_refs_key->rcur++) = lddmc;
+    if (lddmc_refs_key->rcur == lddmc_refs_key->rend) return lddmc_refs_refs_up(lddmc_refs_key, lddmc);
+    else return lddmc;
+}
+
+void __attribute__((unused))
+lddmc_refs_pop(long amount)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    lddmc_refs_key->rcur -= amount;
+}
+
+void __attribute__((unused))
+lddmc_refs_spawn(Task *t)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    lddmc_refs_key->scur->t = t;
+    lddmc_refs_key->scur->f = t->f;
+    lddmc_refs_key->scur += 1;
+    if (lddmc_refs_key->scur == lddmc_refs_key->send) lddmc_refs_tasks_up(lddmc_refs_key);
+}
+
+MDD __attribute__((unused))
+lddmc_refs_sync(MDD result)
+{
+    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
+    lddmc_refs_key->scur -= 1;
+    return result;
+}
+
 VOID_TASK_DECL_0(lddmc_gc_mark_serialize);
 
 /**
@@ -155,7 +308,7 @@ VOID_TASK_DECL_0(lddmc_gc_mark_serialize);
 static void
 lddmc_quit()
 {
-    refs_free(&mdd_refs);
+    refs_free(&lddmc_refs);
 }
 
 void
@@ -163,9 +316,14 @@ sylvan_init_ldd()
 {
     sylvan_register_quit(lddmc_quit);
     sylvan_gc_add_mark(TASK(lddmc_gc_mark_external_refs));
+    sylvan_gc_add_mark(TASK(lddmc_gc_mark_protected));
     sylvan_gc_add_mark(TASK(lddmc_gc_mark_serialize));
 
-    refs_create(&mdd_refs, 1024);
+    refs_create(&lddmc_refs, 1024);
+    if (!lddmc_protected_created) {
+        protect_create(&lddmc_protected, 4096);
+        lddmc_protected_created = 1;
+    }
 
     LACE_ME;
     CALL(lddmc_refs_init);
@@ -2000,7 +2158,7 @@ VOID_TASK_3(lddmc_match_sat, struct lddmc_match_sat_info *, info, lddmc_enum_cb,
     ri->mdd = mddnode_getright(na);
     di->mdd = mddnode_getdown(na);
     ri->match = b;
-    di->match = mddnode_getdown(nb);
+    di->match = p_val == 1 ? mddnode_getdown(nb) : b;
     ri->proj = proj;
     di->proj = mddnode_getdown(p_node);
     ri->count = info->count;
diff --git a/resources/3rdparty/sylvan/src/sylvan_ldd.h b/resources/3rdparty/sylvan/src/sylvan_ldd.h
index 394e773ce..8ee19caa0 100755
--- a/resources/3rdparty/sylvan/src/sylvan_ldd.h
+++ b/resources/3rdparty/sylvan/src/sylvan_ldd.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -24,11 +24,10 @@
 extern "C" {
 #endif /* __cplusplus */
 
-
 typedef uint64_t MDD;       // Note: low 40 bits only
 
-#define lddmc_false         ((MDD)0)
-#define lddmc_true          ((MDD)1)
+static const MDD lddmc_false = 0;
+static const MDD lddmc_true = 1;
 
 /* Initialize LDD functionality */
 void sylvan_init_ldd(void);
@@ -53,19 +52,49 @@ MDD lddmc_make_copynode(MDD ifeq, MDD ifneq);
 int lddmc_iscopy(MDD mdd);
 MDD lddmc_followcopy(MDD mdd);
 
-/* Add or remove external reference to MDD */
-MDD lddmc_ref(MDD a);
-void lddmc_deref(MDD a);
+/**
+ * Infrastructure for external references using a hash table.
+ * Two hash tables store external references: a pointers table and a values table.
+ * The pointers table stores pointers to MDD variables, manipulated with protect and unprotect.
+ * The values table stores MDD, manipulated with ref and deref.
+ * We strongly recommend using the pointers table whenever possible.
+ */
 
-/* For use in custom mark functions */
-VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD)
-#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd)
+/**
+ * Store the pointer <ptr> in the pointers table.
+ */
+void lddmc_protect(MDD* ptr);
+
+/**
+ * Delete the pointer <ptr> from the pointers table.
+ */
+void lddmc_unprotect(MDD* ptr);
+
+/**
+ * Compute the number of pointers in the pointers table.
+ */
+size_t lddmc_count_protected(void);
+
+/**
+ * Store the MDD <dd> in the values table.
+ */
+MDD lddmc_ref(MDD dd);
+
+/**
+ * Delete the MDD <dd> from the values table.
+ */
+void lddmc_deref(MDD dd);
 
-/* Return the number of external references */
+/**
+ * Compute the number of values in the values table.
+ */
 size_t lddmc_count_refs(void);
 
-/* Mark MDD for "notify on dead" */
-#define lddmc_notify_ondead(mdd) llmsset_notify_ondead(nodes, mdd)
+/**
+ * Call mtbdd_gc_mark_rec for every mtbdd you want to keep in your custom mark functions.
+ */
+VOID_TASK_DECL_1(lddmc_gc_mark_rec, MDD)
+#define lddmc_gc_mark_rec(mdd) CALL(lddmc_gc_mark_rec, mdd)
 
 /* Sanity check - returns depth of MDD including 'true' terminal or 0 for empty set */
 #ifndef NDEBUG
@@ -233,54 +262,49 @@ void lddmc_serialize_totext(FILE *out);
 void lddmc_serialize_tofile(FILE *out);
 void lddmc_serialize_fromfile(FILE *in);
 
-/* Infrastructure for internal markings */
-typedef struct lddmc_refs_internal
-{
-    size_t r_size, r_count;
-    size_t s_size, s_count;
-    MDD *results;
-    Task **spawns;
-} *lddmc_refs_internal_t;
-
-extern DECLARE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-
-static inline MDD
-lddmc_refs_push(MDD ldd)
-{
-    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-    if (lddmc_refs_key->r_count >= lddmc_refs_key->r_size) {
-        lddmc_refs_key->r_size *= 2;
-        lddmc_refs_key->results = (MDD*)realloc(lddmc_refs_key->results, sizeof(MDD) * lddmc_refs_key->r_size);
-    }
-    lddmc_refs_key->results[lddmc_refs_key->r_count++] = ldd;
-    return ldd;
-}
+/**
+ * Infrastructure for internal references.
+ * Every thread has its own reference stacks. There are three stacks: pointer, values, tasks stack.
+ * The pointers stack stores pointers to LDD variables, manipulated with pushptr and popptr.
+ * The values stack stores LDD, manipulated with push and pop.
+ * The tasks stack stores Lace tasks (that return LDD), manipulated with spawn and sync.
+ *
+ * It is recommended to use the pointers stack for local variables and the tasks stack for tasks.
+ */
 
-static inline void
-lddmc_refs_pop(int amount)
-{
-    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-    lddmc_refs_key->r_count-=amount;
-}
+/**
+ * Push a LDD variable to the pointer reference stack.
+ * During garbage collection the variable will be inspected and the contents will be marked.
+ */
+void lddmc_refs_pushptr(const MDD *ptr);
 
-static inline void
-lddmc_refs_spawn(Task *t)
-{
-    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-    if (lddmc_refs_key->s_count >= lddmc_refs_key->s_size) {
-        lddmc_refs_key->s_size *= 2;
-        lddmc_refs_key->spawns = (Task**)realloc(lddmc_refs_key->spawns, sizeof(Task*) * lddmc_refs_key->s_size);
-    }
-    lddmc_refs_key->spawns[lddmc_refs_key->s_count++] = t;
-}
+/**
+ * Pop the last <amount> LDD variables from the pointer reference stack.
+ */
+void lddmc_refs_popptr(size_t amount);
 
-static inline MDD
-lddmc_refs_sync(MDD result)
-{
-    LOCALIZE_THREAD_LOCAL(lddmc_refs_key, lddmc_refs_internal_t);
-    lddmc_refs_key->s_count--;
-    return result;
-}
+/**
+ * Push an LDD to the values reference stack.
+ * During garbage collection the references LDD will be marked.
+ */
+MDD lddmc_refs_push(MDD dd);
+
+/**
+ * Pop the last <amount> LDD from the values reference stack.
+ */
+void lddmc_refs_pop(long amount);
+
+/**
+ * Push a Task that returns an LDD to the tasks reference stack.
+ * Usage: lddmc_refs_spawn(SPAWN(function, ...));
+ */
+void lddmc_refs_spawn(Task *t);
+
+/**
+ * Pop a Task from the task reference stack.
+ * Usage: MDD result = lddmc_refs_sync(SYNC(function));
+ */
+MDD lddmc_refs_sync(MDD dd);
 
 #ifdef __cplusplus
 }
diff --git a/resources/3rdparty/sylvan/src/sylvan_ldd_int.h b/resources/3rdparty/sylvan/src/sylvan_ldd_int.h
index d748c7291..7cf65e3ba 100755
--- a/resources/3rdparty/sylvan/src/sylvan_ldd_int.h
+++ b/resources/3rdparty/sylvan/src/sylvan_ldd_int.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,24 +15,7 @@
  * limitations under the License.
  */
 
-/*#include <sylvan_config.h>
-
-#include <assert.h>
-#include <inttypes.h>
-#include <math.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <sylvan.h>
-#include <sylvan_int.h>
-
-#include <avl.h>
-#include <sylvan_refs.h>
-#include <sha2.h>
-*/
+/* Do not include this file directly. Instead, include sylvan_int.h */
 
 /**
  * Internals for LDDs
@@ -51,7 +34,11 @@ typedef struct __attribute__((packed)) mddnode {
     uint64_t a, b;
 } * mddnode_t; // 16 bytes
 
-#define LDD_GETNODE(mdd) ((mddnode_t)llmsset_index_to_ptr(nodes, mdd))
+static inline mddnode_t
+LDD_GETNODE(MDD mdd)
+{
+    return ((mddnode_t)llmsset_index_to_ptr(nodes, mdd));
+}
 
 static inline uint32_t __attribute__((unused))
 mddnode_getvalue(mddnode_t n)
diff --git a/resources/3rdparty/sylvan/src/sylvan_mt.c b/resources/3rdparty/sylvan/src/sylvan_mt.c
index 05f1443a2..543a81e98 100755
--- a/resources/3rdparty/sylvan/src/sylvan_mt.c
+++ b/resources/3rdparty/sylvan/src/sylvan_mt.c
@@ -15,16 +15,11 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan_int.h> // for llmsset*, nodes, sylvan_register_quit
 
-#include <assert.h>
 #include <inttypes.h>
-#include <stdlib.h>
 #include <string.h>
 
-#include <sylvan_mt.h>
-#include <sylvan_int.h> // for llmsset*, nodes, sylvan_register_quit
-
 /**
  * Handling of custom leaves "registry"
  */
diff --git a/resources/3rdparty/sylvan/src/sylvan_mt.h b/resources/3rdparty/sylvan/src/sylvan_mt.h
index 516b450af..280bc620b 100755
--- a/resources/3rdparty/sylvan/src/sylvan_mt.h
+++ b/resources/3rdparty/sylvan/src/sylvan_mt.h
@@ -19,13 +19,11 @@
  * This file contains declarations for custom Multi-Terminal support.
  */
 
+/* Do not include this file directly. Instead, include sylvan.h */
+
 #ifndef SYLVAN_MT_H
 #define SYLVAN_MT_H
 
-#include <stddef.h>
-#include <stdint.h>
-#include <stdio.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd.c b/resources/3rdparty/sylvan/src/sylvan_mtbdd.c
index a1a39fd28..1eef5fa19 100755
--- a/resources/3rdparty/sylvan/src/sylvan_mtbdd.c
+++ b/resources/3rdparty/sylvan/src/sylvan_mtbdd.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,26 +15,16 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan_int.h>
 
-#include <assert.h>
 #include <inttypes.h>
 #include <math.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
 #include <string.h>
 
-#include <sylvan.h>
-#include <sylvan_int.h>
-
 #include <sylvan_refs.h>
 #include <sylvan_sl.h>
 #include <sha2.h>
 
-#define BDD                     MTBDD
-
 /* Primitives */
 int
 mtbdd_isleaf(MTBDD bdd)
@@ -194,33 +184,77 @@ VOID_TASK_0(mtbdd_gc_mark_protected)
 }
 
 /* Infrastructure for internal markings */
+typedef struct mtbdd_refs_task
+{
+    Task *t;
+    void *f;
+} *mtbdd_refs_task_t;
+
+typedef struct mtbdd_refs_internal
+{
+    const MTBDD **pbegin, **pend, **pcur;
+    MTBDD *rbegin, *rend, *rcur;
+    mtbdd_refs_task_t sbegin, send, scur;
+} *mtbdd_refs_internal_t;
+
 DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
 
-VOID_TASK_0(mtbdd_refs_mark_task)
+VOID_TASK_2(mtbdd_refs_mark_p_par, const MTBDD**, begin, size_t, count)
 {
-    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
-    size_t i, j=0;
-    for (i=0; i<mtbdd_refs_key->r_count; i++) {
-        if (j >= 40) {
-            while (j--) SYNC(mtbdd_gc_mark_rec);
-            j=0;
+    if (count < 32) {
+        while (count) {
+            mtbdd_gc_mark_rec(**(begin++));
+            count--;
         }
-        SPAWN(mtbdd_gc_mark_rec, mtbdd_refs_key->results[i]);
-        j++;
-    }
-    for (i=0; i<mtbdd_refs_key->s_count; i++) {
-        Task *t = mtbdd_refs_key->spawns[i];
-        if (!TASK_IS_STOLEN(t)) break;
-        if (TASK_IS_COMPLETED(t)) {
-            if (j >= 40) {
-                while (j--) SYNC(mtbdd_gc_mark_rec);
-                j=0;
+    } else {
+        SPAWN(mtbdd_refs_mark_p_par, begin, count / 2);
+        CALL(mtbdd_refs_mark_p_par, begin + (count / 2), count - count / 2);
+        SYNC(mtbdd_refs_mark_p_par);
+    }
+}
+
+VOID_TASK_2(mtbdd_refs_mark_r_par, MTBDD*, begin, size_t, count)
+{
+    if (count < 32) {
+        while (count) {
+            mtbdd_gc_mark_rec(*begin++);
+            count--;
+        }
+    } else {
+        SPAWN(mtbdd_refs_mark_r_par, begin, count / 2);
+        CALL(mtbdd_refs_mark_r_par, begin + (count / 2), count - count / 2);
+        SYNC(mtbdd_refs_mark_r_par);
+    }
+}
+
+VOID_TASK_2(mtbdd_refs_mark_s_par, mtbdd_refs_task_t, begin, size_t, count)
+{
+    if (count < 32) {
+        while (count > 0) {
+            Task *t = begin->t;
+            if (!TASK_IS_STOLEN(t)) return;
+            if (t->f == begin->f && TASK_IS_COMPLETED(t)) {
+                mtbdd_gc_mark_rec(*(MTBDD*)TASK_RESULT(t));
             }
-            SPAWN(mtbdd_gc_mark_rec, *(BDD*)TASK_RESULT(t));
-            j++;
+            begin += 1;
+            count -= 1;
         }
+    } else {
+        if (!TASK_IS_STOLEN(begin->t)) return;
+        SPAWN(mtbdd_refs_mark_s_par, begin, count / 2);
+        CALL(mtbdd_refs_mark_s_par, begin + (count / 2), count - count / 2);
+        SYNC(mtbdd_refs_mark_s_par);
     }
-    while (j--) SYNC(mtbdd_gc_mark_rec);
+}
+
+VOID_TASK_0(mtbdd_refs_mark_task)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    SPAWN(mtbdd_refs_mark_p_par, mtbdd_refs_key->pbegin, mtbdd_refs_key->pcur-mtbdd_refs_key->pbegin);
+    SPAWN(mtbdd_refs_mark_r_par, mtbdd_refs_key->rbegin, mtbdd_refs_key->rcur-mtbdd_refs_key->rbegin);
+    CALL(mtbdd_refs_mark_s_par, mtbdd_refs_key->sbegin, mtbdd_refs_key->scur-mtbdd_refs_key->sbegin);
+    SYNC(mtbdd_refs_mark_r_par);
+    SYNC(mtbdd_refs_mark_p_par);
 }
 
 VOID_TASK_0(mtbdd_refs_mark)
@@ -231,12 +265,12 @@ VOID_TASK_0(mtbdd_refs_mark)
 VOID_TASK_0(mtbdd_refs_init_task)
 {
     mtbdd_refs_internal_t s = (mtbdd_refs_internal_t)malloc(sizeof(struct mtbdd_refs_internal));
-    s->r_size = 128;
-    s->r_count = 0;
-    s->s_size = 128;
-    s->s_count = 0;
-    s->results = (BDD*)malloc(sizeof(BDD) * 128);
-    s->spawns = (Task**)malloc(sizeof(Task*) * 128);
+    s->pcur = s->pbegin = (const MTBDD**)malloc(sizeof(MTBDD*) * 1024);
+    s->pend = s->pbegin + 1024;
+    s->rcur = s->rbegin = (MTBDD*)malloc(sizeof(MTBDD) * 1024);
+    s->rend = s->rbegin + 1024;
+    s->scur = s->sbegin = (mtbdd_refs_task_t)malloc(sizeof(struct mtbdd_refs_task) * 1024);
+    s->send = s->sbegin + 1024;
     SET_THREAD_LOCAL(mtbdd_refs_key, s);
 }
 
@@ -247,6 +281,84 @@ VOID_TASK_0(mtbdd_refs_init)
     sylvan_gc_add_mark(TASK(mtbdd_refs_mark));
 }
 
+void
+mtbdd_refs_ptrs_up(mtbdd_refs_internal_t mtbdd_refs_key)
+{
+    size_t cur = mtbdd_refs_key->pcur - mtbdd_refs_key->pbegin;
+    size_t size = mtbdd_refs_key->pend - mtbdd_refs_key->pbegin;
+    mtbdd_refs_key->pbegin = (const MTBDD**)realloc(mtbdd_refs_key->pbegin, sizeof(MTBDD*) * size * 2);
+    mtbdd_refs_key->pcur = mtbdd_refs_key->pbegin + cur;
+    mtbdd_refs_key->pend = mtbdd_refs_key->pbegin + (size * 2);
+}
+
+MTBDD __attribute__((noinline))
+mtbdd_refs_refs_up(mtbdd_refs_internal_t mtbdd_refs_key, MTBDD res)
+{
+    long size = mtbdd_refs_key->rend - mtbdd_refs_key->rbegin;
+    mtbdd_refs_key->rbegin = (MTBDD*)realloc(mtbdd_refs_key->rbegin, sizeof(MTBDD) * size * 2);
+    mtbdd_refs_key->rcur = mtbdd_refs_key->rbegin + size;
+    mtbdd_refs_key->rend = mtbdd_refs_key->rbegin + (size * 2);
+    return res;
+}
+
+void __attribute__((noinline))
+mtbdd_refs_tasks_up(mtbdd_refs_internal_t mtbdd_refs_key)
+{
+    long size = mtbdd_refs_key->send - mtbdd_refs_key->sbegin;
+    mtbdd_refs_key->sbegin = (mtbdd_refs_task_t)realloc(mtbdd_refs_key->sbegin, sizeof(struct mtbdd_refs_task) * size * 2);
+    mtbdd_refs_key->scur = mtbdd_refs_key->sbegin + size;
+    mtbdd_refs_key->send = mtbdd_refs_key->sbegin + (size * 2);
+}
+
+void __attribute__((unused))
+mtbdd_refs_pushptr(const MTBDD *ptr)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    *mtbdd_refs_key->pcur++ = ptr;
+    if (mtbdd_refs_key->pcur == mtbdd_refs_key->pend) mtbdd_refs_ptrs_up(mtbdd_refs_key);
+}
+
+void __attribute__((unused))
+mtbdd_refs_popptr(size_t amount)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    mtbdd_refs_key->pcur -= amount;
+}
+
+MTBDD __attribute__((unused))
+mtbdd_refs_push(MTBDD mtbdd)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    *(mtbdd_refs_key->rcur++) = mtbdd;
+    if (mtbdd_refs_key->rcur == mtbdd_refs_key->rend) return mtbdd_refs_refs_up(mtbdd_refs_key, mtbdd);
+    else return mtbdd;
+}
+
+void __attribute__((unused))
+mtbdd_refs_pop(long amount)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    mtbdd_refs_key->rcur -= amount;
+}
+
+void
+mtbdd_refs_spawn(Task *t)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    mtbdd_refs_key->scur->t = t;
+    mtbdd_refs_key->scur->f = t->f;
+    mtbdd_refs_key->scur += 1;
+    if (mtbdd_refs_key->scur == mtbdd_refs_key->send) mtbdd_refs_tasks_up(mtbdd_refs_key);
+}
+
+MTBDD
+mtbdd_refs_sync(MTBDD result)
+{
+    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+    mtbdd_refs_key->scur -= 1;
+    return result;
+}
+
 /**
  * Initialize and quit functions
  */
@@ -394,6 +506,12 @@ mtbdd_makemapnode(uint32_t var, MTBDD low, MTBDD high)
     return index;
 }
 
+MTBDD
+mtbdd_ithvar(uint32_t var)
+{
+    return mtbdd_makenode(var, mtbdd_false, mtbdd_true);
+}
+
 /* Operations */
 
 /**
@@ -449,31 +567,6 @@ mtbdd_fraction(int64_t nom, uint64_t denom)
     return mtbdd_makeleaf(2, (nom<<32)|denom);
 }
 
-/**
- * Create the cube of variables in arr.
- */
-MTBDD
-mtbdd_fromarray(uint32_t* arr, size_t length)
-{
-    if (length == 0) return mtbdd_true;
-    else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true);
-    else return mtbdd_makenode(*arr, mtbdd_false, mtbdd_fromarray(arr+1, length-1));
-}
-
-/**
- * Given a cube of variables, write each variable to arr.
- * WARNING: arr must be sufficiently long!
- */
-void
-mtbdd_toarray(MTBDD set, uint32_t *arr)
-{
-    while (set != mtbdd_true) {
-        mtbddnode_t n = MTBDD_GETNODE(set);
-        *arr++ = mtbddnode_getvariable(n);
-        set = node_gethigh(set, n);
-    }
-}
-
 /**
  * Create a MTBDD cube representing the conjunction of variables in their positive or negative
  * form depending on whether the cube[idx] equals 0 (negative), 1 (positive) or 2 (any).
@@ -538,10 +631,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
     if (va < v) {
         MTBDD low = node_getlow(mtbdd, na);
         MTBDD high = node_gethigh(mtbdd, na);
-        SPAWN(mtbdd_union_cube, high, vars, cube, terminal);
+        mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, high, vars, cube, terminal));
         BDD new_low = mtbdd_union_cube(low, vars, cube, terminal);
         mtbdd_refs_push(new_low);
-        BDD new_high = SYNC(mtbdd_union_cube);
+        BDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
         mtbdd_refs_pop(1);
         if (new_low != low || new_high != high) return mtbdd_makenode(va, new_low, new_high);
         else return mtbdd;
@@ -563,10 +656,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
         }
         case 2:
         {
-            SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal);
+            mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, high, node_gethigh(vars, nv), cube+1, terminal));
             MTBDD new_low = mtbdd_union_cube(low, node_gethigh(vars, nv), cube+1, terminal);
             mtbdd_refs_push(new_low);
-            MTBDD new_high = SYNC(mtbdd_union_cube);
+            MTBDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
             mtbdd_refs_pop(1);
             if (new_low != low || new_high != high) return mtbdd_makenode(v, new_low, new_high);
             return mtbdd;
@@ -592,10 +685,10 @@ TASK_IMPL_4(MTBDD, mtbdd_union_cube, MTBDD, mtbdd, MTBDD, vars, uint8_t*, cube,
         }
         case 2:
         {
-            SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal);
+            mtbdd_refs_spawn(SPAWN(mtbdd_union_cube, mtbdd, node_gethigh(vars, nv), cube+1, terminal));
             MTBDD new_low = mtbdd_union_cube(mtbdd, node_gethigh(vars, nv), cube+1, terminal);
             mtbdd_refs_push(new_low);
-            MTBDD new_high = SYNC(mtbdd_union_cube);
+            MTBDD new_high = mtbdd_refs_sync(SYNC(mtbdd_union_cube));
             mtbdd_refs_pop(1);
             return mtbdd_makenode(v, new_low, new_high);
         }
@@ -1272,6 +1365,36 @@ TASK_IMPL_2(MTBDD, mtbdd_op_max, MTBDD*, pa, MTBDD*, pb)
     return mtbdd_invalid;
 }
 
+TASK_IMPL_2(MTBDD, mtbdd_op_cmpl, MTBDD, a, size_t, k)
+{
+    // if a is false, then it is a partial function. Keep partial!
+    if (a == mtbdd_false) return mtbdd_false;
+
+    // a != constant
+    mtbddnode_t na = MTBDD_GETNODE(a);
+
+    if (mtbddnode_isleaf(na)) {
+        if (mtbddnode_gettype(na) == 0) {
+            int64_t v = mtbdd_getint64(a);
+            if (v == 0) return mtbdd_int64(1);
+            else return mtbdd_int64(0);
+        } else if (mtbddnode_gettype(na) == 1) {
+            double d = mtbdd_getdouble(a);
+            if (d == 0.0) return mtbdd_double(1.0);
+            else return mtbdd_double(0.0);
+        } else if (mtbddnode_gettype(na) == 2) {
+            uint64_t v = mtbddnode_getvalue(na);
+            if (v == 1) return mtbdd_fraction(1, 1);
+            else return mtbdd_fraction(0, 1);
+        } else {
+            assert(0); // failure
+        }
+    }
+
+    return mtbdd_invalid;
+    (void)k; // unused variable
+}
+
 TASK_IMPL_2(MTBDD, mtbdd_op_negate, MTBDD, a, size_t, k)
 {
     // if a is false, then it is a partial function. Keep partial!
@@ -2347,7 +2470,17 @@ TASK_IMPL_2(double, mtbdd_satcount, MTBDD, dd, size_t, nvars)
 {
     /* Trivial cases */
     if (dd == mtbdd_false) return 0.0;
-    if (mtbdd_isleaf(dd)) return powl(2.0L, nvars);
+
+    if (mtbdd_isleaf(dd)) {
+        // test if 0
+        mtbddnode_t dd_node = MTBDD_GETNODE(dd);
+        if (dd != mtbdd_true) {
+            if (mtbddnode_gettype(dd_node) == 0 && mtbdd_getint64(dd) == 0) return 0.0;
+            else if (mtbddnode_gettype(dd_node) == 1 && mtbdd_getdouble(dd) == 0.0) return 0.0;
+            else if (mtbddnode_gettype(dd_node) == 2 && mtbdd_getvalue(dd) == 1) return 0.0;
+        }
+        return powl(2.0L, nvars);
+    }
 
     /* Perhaps execute garbage collection */
     sylvan_gc_test();
@@ -2726,8 +2859,6 @@ mtbdd_leafcount_more(const MTBDD *mtbdds, size_t count)
 static size_t
 mtbdd_nodecount_mark(MTBDD mtbdd)
 {
-    if (mtbdd == mtbdd_true) return 0; // do not count true/false leaf
-    if (mtbdd == mtbdd_false) return 0; // do not count true/false leaf
     mtbddnode_t n = MTBDD_GETNODE(mtbdd);
     if (mtbddnode_getmark(n)) return 0;
     mtbddnode_setmark(n, 1);
@@ -3258,11 +3389,107 @@ TASK_IMPL_3(int, mtbdd_reader_frombinary, FILE*, in, MTBDD*, dds, int, count)
 }
 
 /**
- * Implementation of convenience functions for handling variable sets, i.e., cubes.
+ * Implementation of variable sets, i.e., cubes of (positive) variables.
  */
 
+/**
+ * Create a set of variables, represented as the conjunction of (positive) variables.
+ */
+MTBDD
+mtbdd_set_from_array(uint32_t* arr, size_t length)
+{
+    if (length == 0) return mtbdd_true;
+    else if (length == 1) return mtbdd_makenode(*arr, mtbdd_false, mtbdd_true);
+    else return mtbdd_set_add(mtbdd_fromarray(arr+1, length-1), *arr);
+}
+
+/**
+ * Write all variables in a variable set to the given array.
+ * The array must be sufficiently large.
+ */
+void
+mtbdd_set_to_array(MTBDD set, uint32_t *arr)
+{
+    while (set != mtbdd_true) {
+        mtbddnode_t n = MTBDD_GETNODE(set);
+        *arr++ = mtbddnode_getvariable(n);
+        set = node_gethigh(set, n);
+    }
+}
+
+/**
+ * Add the variable <var> to <set>.
+ */
+MTBDD
+mtbdd_set_add(MTBDD set, uint32_t var)
+{
+    if (set == mtbdd_true) return mtbdd_makenode(var, mtbdd_false, mtbdd_true);
+
+    mtbddnode_t set_node = MTBDD_GETNODE(set);
+    uint32_t set_var = mtbddnode_getvariable(set_node);
+    if (var < set_var) return mtbdd_makenode(var, mtbdd_false, set);
+    else if (set_var == var) return set;
+    else {
+        MTBDD sub = mtbddnode_followhigh(set, set_node);
+        MTBDD res = mtbdd_set_add(sub, var);
+        res = sub == res ? set : mtbdd_makenode(set_var, mtbdd_false, res);
+        return res;
+    }
+}
+
+/**
+ * Remove the variable <var> from <set>.
+ */
+MTBDD
+mtbdd_set_remove(MTBDD set, uint32_t var)
+{
+    if (set == mtbdd_true) return mtbdd_true;
+
+    mtbddnode_t set_node = MTBDD_GETNODE(set);
+    uint32_t set_var = mtbddnode_getvariable(set_node);
+    if (var < set_var) return set;
+    else if (set_var == var) return mtbddnode_followhigh(set, set_node);
+    else {
+        MTBDD sub = mtbddnode_followhigh(set, set_node);
+        MTBDD res = mtbdd_set_remove(sub, var);
+        res = sub == res ? set : mtbdd_makenode(set_var, mtbdd_false, res);
+        return res;
+    }
+}
+
+/**
+ * Remove variables in <set2> from <set1>.
+ */
+TASK_IMPL_2(MTBDD, mtbdd_set_minus, MTBDD, set1, MTBDD, set2)
+{
+    if (set1 == mtbdd_true) return mtbdd_true;
+    if (set2 == mtbdd_true) return set1;
+    if (set1 == set2) return mtbdd_true;
+
+    mtbddnode_t set1_node = MTBDD_GETNODE(set1);
+    mtbddnode_t set2_node = MTBDD_GETNODE(set2);
+    uint32_t set1_var = mtbddnode_getvariable(set1_node);
+    uint32_t set2_var = mtbddnode_getvariable(set2_node);
+
+    if (set1_var == set2_var) {
+        return mtbdd_set_minus(mtbddnode_followhigh(set1, set1_node), mtbddnode_followhigh(set2, set2_node));
+    }
+
+    if (set1_var > set2_var) {
+        return mtbdd_set_minus(set1, mtbddnode_followhigh(set2, set2_node));
+    }
+
+    /* set1_var < set2_var */
+    MTBDD sub = mtbddnode_followhigh(set1, set1_node);
+    MTBDD res = mtbdd_set_minus(sub, set2);
+    return res == sub ? set1 : mtbdd_makenode(set1_var, mtbdd_false, res);
+}
+
+/**
+ * Return 1 if <set> contains <var>, 0 otherwise.
+ */
 int
-mtbdd_set_in(MTBDD set, uint32_t var)
+mtbdd_set_contains(MTBDD set, uint32_t var)
 {
     while (set != mtbdd_true) {
         mtbddnode_t n = MTBDD_GETNODE(set);
@@ -3274,6 +3501,9 @@ mtbdd_set_in(MTBDD set, uint32_t var)
     return 0;
 }
 
+/**
+ * Compute the number of variables in a given set of variables.
+ */
 size_t
 mtbdd_set_count(MTBDD set)
 {
@@ -3285,6 +3515,10 @@ mtbdd_set_count(MTBDD set)
     return result;
 }
 
+/**
+ * Sanity check if the given MTBDD is a conjunction of positive variables,
+ * and if all nodes are marked in the nodes table (detects violations after garbage collection).
+ */
 void
 mtbdd_test_isset(MTBDD set)
 {
@@ -3336,7 +3570,9 @@ mtbdd_map_count(MTBDDMAP map)
 MTBDDMAP
 mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value)
 {
-    if (mtbdd_map_isempty(map)) return mtbdd_makemapnode(key, mtbdd_map_empty(), value);
+    if (mtbdd_map_isempty(map)) {
+        return mtbdd_makemapnode(key, mtbdd_map_empty(), value);
+    }
 
     mtbddnode_t n = MTBDD_GETNODE(map);
     uint32_t k = mtbddnode_getvariable(n);
@@ -3357,7 +3593,7 @@ mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value)
  * Add all values from map2 to map1, overwrites if key already in map1.
  */
 MTBDDMAP
-mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2)
+mtbdd_map_update(MTBDDMAP map1, MTBDDMAP map2)
 {
     if (mtbdd_map_isempty(map1)) return map2;
     if (mtbdd_map_isempty(map2)) return map1;
@@ -3369,13 +3605,13 @@ mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2)
 
     MTBDDMAP result;
     if (k1 < k2) {
-        MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), map2);
+        MTBDDMAP low = mtbdd_map_update(node_getlow(map1, n1), map2);
         result = mtbdd_makemapnode(k1, low, node_gethigh(map1, n1));
     } else if (k1 > k2) {
-        MTBDDMAP low = mtbdd_map_addall(map1, node_getlow(map2, n2));
+        MTBDDMAP low = mtbdd_map_update(map1, node_getlow(map2, n2));
         result = mtbdd_makemapnode(k2, low, node_gethigh(map2, n2));
     } else {
-        MTBDDMAP low = mtbdd_map_addall(node_getlow(map1, n1), node_getlow(map2, n2));
+        MTBDDMAP low = mtbdd_map_update(node_getlow(map1, n1), node_getlow(map2, n2));
         result = mtbdd_makemapnode(k2, low, node_gethigh(map2, n2));
     }
 
diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd.h
index 6614fb870..41cb2528e 100755
--- a/resources/3rdparty/sylvan/src/sylvan_mtbdd.h
+++ b/resources/3rdparty/sylvan/src/sylvan_mtbdd.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -39,8 +39,6 @@
 #ifndef SYLVAN_MTBDD_H
 #define SYLVAN_MTBDD_H
 
-#include <sylvan_mt.h>
-
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
@@ -48,29 +46,38 @@ extern "C" {
 /**
  * An MTBDD is a 64-bit value. The low 40 bits are an index into the unique table.
  * The highest 1 bit is the complement edge, indicating negation.
- * For Boolean MTBDDs, this means "not X", for Integer and Real MTBDDs, this means "-X".
+ *
+ * Currently, negation using complement edges is only implemented for Boolean MTBDDs.
+ * For Integer/Real MTBDDs, negation is not well-defined, as "-0" = "0".
+ *
+ * A MTBDD node has 24 bits for the variable.
+ * A set of MTBDD variables is represented by the MTBDD of the conjunction of these variables.
+ * A MTBDDMAP uses special "MAP" nodes in the MTBDD nodes table.
  */
 typedef uint64_t MTBDD;
-typedef uint64_t BDD;
 typedef MTBDD MTBDDMAP;
 
 /**
- * mtbdd_true is only used in Boolean MTBDDs. mtbdd_false has multiple roles (see above).
+ * mtbdd_true and mtbdd_false are the Boolean leaves representing True and False.
+ * False is also used in Integer/Real/Fraction MTBDDs for partially defined functions.
  */
-#define mtbdd_complement    ((MTBDD)0x8000000000000000LL)
-#define mtbdd_false         ((MTBDD)0)
-#define mtbdd_true          (mtbdd_false|mtbdd_complement)
-#define mtbdd_invalid       ((MTBDD)0xffffffffffffffffLL)
+static const MTBDD mtbdd_complement = 0x8000000000000000LL;
+static const MTBDD mtbdd_false      = 0;
+static const MTBDD mtbdd_true       = 0x8000000000000000LL;
+static const MTBDD mtbdd_invalid    = 0xffffffffffffffffLL;
 
-/* Compatibility */
-// #define BDD                     MTBDD
-#define BDDMAP                  MTBDDMAP
-#define BDDSET                  MTBDD
-#define BDDVAR                  uint32_t
-#define sylvan_complement       mtbdd_complement
-#define sylvan_false            mtbdd_false
-#define sylvan_true             mtbdd_true
-#define sylvan_invalid          mtbdd_invalid
+/**
+ * Definitions for backward compatibility...
+ * We now consider BDDs to be a special case of MTBDDs.
+ */
+typedef MTBDD BDD;
+typedef MTBDDMAP BDDMAP;
+typedef MTBDD BDDSET;
+typedef uint32_t BDDVAR;
+static const MTBDD sylvan_complement = 0x8000000000000000LL;
+static const MTBDD sylvan_false      = 0;
+static const MTBDD sylvan_true       = 0x8000000000000000LL;
+static const MTBDD sylvan_invalid    = 0xffffffffffffffffLL;
 #define sylvan_init_bdd         sylvan_init_mtbdd
 #define sylvan_ref              mtbdd_ref
 #define sylvan_deref            mtbdd_deref
@@ -79,7 +86,9 @@ typedef MTBDD MTBDDMAP;
 #define sylvan_unprotect        mtbdd_unprotect
 #define sylvan_count_protected  mtbdd_count_protected
 #define sylvan_gc_mark_rec      mtbdd_gc_mark_rec
-#define sylvan_notify_ondead    mtbdd_notify_ondead
+#define sylvan_ithvar           mtbdd_ithvar
+#define bdd_refs_pushptr        mtbdd_refs_pushptr
+#define bdd_refs_popptr         mtbdd_refs_popptr
 #define bdd_refs_push           mtbdd_refs_push
 #define bdd_refs_pop            mtbdd_refs_pop
 #define bdd_refs_spawn          mtbdd_refs_spawn
@@ -147,59 +156,199 @@ static inline MTBDD mtbdd_makenode(uint32_t var, MTBDD low, MTBDD high)
 }
 
 /**
- * Returns 1 is the MTBDD is a terminal, or 0 otherwise.
+ * Return 1 if the MTBDD is a terminal, or 0 otherwise.
  */
 int mtbdd_isleaf(MTBDD mtbdd);
-#define mtbdd_isnode(mtbdd) (mtbdd_isleaf(mtbdd) ? 0 : 1)
 
 /**
- * For MTBDD terminals, returns <type> and <value>
+ * Return 1 if the MTBDD is an internal node, or 0 otherwise.
+ */
+static inline int mtbdd_isnode(MTBDD mtbdd) { return mtbdd_isleaf(mtbdd) ? 0 : 1; }
+
+/**
+ * Return the <type> field of the given leaf.
+ */
+uint32_t mtbdd_gettype(MTBDD leaf);
+
+/**
+ * Return the <value> field of the given leaf.
  */
-uint32_t mtbdd_gettype(MTBDD terminal);
-uint64_t mtbdd_getvalue(MTBDD terminal);
+uint64_t mtbdd_getvalue(MTBDD leaf);
 
 /**
- * For internal MTBDD nodes, returns <var>, <low> and <high>
+ * Return the variable field of the given internal node.
  */
 uint32_t mtbdd_getvar(MTBDD node);
+
+/**
+ * Follow the low/false edge of the given internal node.
+ * Also takes complement edges into account.
+ */
 MTBDD mtbdd_getlow(MTBDD node);
+
+/**
+ * Follow the high/true edge of the given internal node.
+ * Also takes complement edges into account.
+ */
 MTBDD mtbdd_gethigh(MTBDD node);
 
 /**
- * Compute the complement of the MTBDD.
- * For Boolean MTBDDs, this means "not X".
+ * Obtain the complement of the MTBDD.
+ * This is only valid for Boolean MTBDDs or custom implementations that support it.
  */
-#define mtbdd_hascomp(dd) ((dd & mtbdd_complement) ? 1 : 0)
-#define mtbdd_comp(dd) (dd ^ mtbdd_complement)
-#define mtbdd_not(dd) (dd ^ mtbdd_complement)
+
+static inline int
+mtbdd_hascomp(MTBDD dd)
+{
+    return (dd & mtbdd_complement) ? 1 : 0;
+}
+
+static inline MTBDD
+mtbdd_comp(MTBDD dd)
+{
+    return dd ^ mtbdd_complement;
+}
+
+static inline MTBDD
+mtbdd_not(MTBDD dd)
+{
+    return dd ^ mtbdd_complement;
+}
 
 /**
- * Create terminals representing int64_t (type 0), double (type 1), or fraction (type 2) values
+ * Create an Integer leaf with the given value.
  */
 MTBDD mtbdd_int64(int64_t value);
+
+/**
+ * Create a Real leaf with the given value.
+ */
 MTBDD mtbdd_double(double value);
+
+/**
+ * Create a Fraction leaf with the given numerator and denominator.
+ */
 MTBDD mtbdd_fraction(int64_t numer, uint64_t denom);
 
 /**
- * Get the value of a terminal (for Integer, Real and Fraction terminals, types 0, 1 and 2)
+ * Obtain the value of an Integer leaf.
  */
 int64_t mtbdd_getint64(MTBDD terminal);
+
+/**
+ * Obtain the value of a Real leaf.
+ */
 double mtbdd_getdouble(MTBDD terminal);
-#define mtbdd_getnumer(terminal) ((int32_t)(mtbdd_getvalue(terminal)>>32))
-#define mtbdd_getdenom(terminal) ((uint32_t)(mtbdd_getvalue(terminal)&0xffffffff))
 
 /**
- * Create the conjunction of variables in arr,
- * i.e. arr[0] \and arr[1] \and ... \and arr[length-1]
- * The variable in arr must be ordered.
+ * Obtain the numerator of a Fraction leaf.
+ */
+static inline int32_t
+mtbdd_getnumer(MTBDD terminal)
+{
+    return (int32_t)(mtbdd_getvalue(terminal)>>32);
+}
+
+/**
+ * Obtain the denominator of a Fraction leaf.
+ */
+static inline uint32_t
+mtbdd_getdenom(MTBDD terminal)
+{
+    return (uint32_t)(mtbdd_getvalue(terminal)&0xffffffff);
+}
+
+/**
+ * Create the Boolean MTBDD representing "if <var> then True else False"
+ */
+MTBDD mtbdd_ithvar(uint32_t var);
+
+/**
+ * Functions to manipulate sets of MTBDD variables.
+ *
+ * A set of variables is represented by a cube/conjunction of (positive) variables.
+ */
+static inline MTBDD
+mtbdd_set_empty()
+{
+    return mtbdd_true;
+}
+
+static inline int
+mtbdd_set_isempty(MTBDD set)
+{
+    return (set == mtbdd_true) ? 1 : 0;
+}
+
+static inline uint32_t
+mtbdd_set_first(MTBDD set)
+{
+    return mtbdd_getvar(set);
+}
+
+static inline MTBDD
+mtbdd_set_next(MTBDD set)
+{
+    return mtbdd_gethigh(set);
+}
+
+/**
+ * Create a set of variables, represented as the conjunction of (positive) variables.
+ */
+MTBDD mtbdd_set_from_array(uint32_t* arr, size_t length);
+
+/**
+ * Write all variables in a variable set to the given array.
+ * The array must be sufficiently large.
+ */
+void mtbdd_set_to_array(MTBDD set, uint32_t *arr);
+
+/**
+ * Compute the number of variables in a given set of variables.
+ */
+size_t mtbdd_set_count(MTBDD set);
+
+/**
+ * Compute the union of <set1> and <set2>
+ */
+#define mtbdd_set_union(set1, set2) sylvan_and(set1, set2)
+
+/**
+ * Remove variables in <set2> from <set1>
  */
-MTBDD mtbdd_fromarray(uint32_t* arr, size_t length);
+#define mtbdd_set_minus(set1, set2) CALL(mtbdd_set_minus, set1, set2)
+TASK_DECL_2(MTBDD, mtbdd_set_minus, MTBDD, MTBDD);
 
 /**
- * Given a cube of variables, write each variable to arr.
- * WARNING: arr must be sufficiently long!
+ * Return 1 if <set> contains <var>, 0 otherwise.
  */
-void mtbdd_toarray(MTBDD set, uint32_t *arr);
+int mtbdd_set_contains(MTBDD set, uint32_t var);
+
+/**
+ * Add the variable <var> to <set>.
+ */
+MTBDD mtbdd_set_add(MTBDD set, uint32_t var);
+
+/**
+ * Remove the variable <var> from <set>.
+ */
+MTBDD mtbdd_set_remove(MTBDD set, uint32_t var);
+
+/**
+ * Sanity check if the given MTBDD is a conjunction of positive variables,
+ * and if all nodes are marked in the nodes table (detects violations after garbage collection).
+ */
+void mtbdd_test_isset(MTBDD set);
+
+/**
+ * Definitions for backwards compatibility
+ */
+#define mtbdd_fromarray mtbdd_set_from_array
+#define mtbdd_set_fromarray mtbdd_set_from_array
+#define mtbdd_set_toarray mtbdd_set_to_array
+#define mtbdd_set_addall mtbdd_set_union
+#define mtbdd_set_removeall mtbdd_set_minus
+#define mtbdd_set_in mtbdd_set_contains
 
 /**
  * Create a MTBDD cube representing the conjunction of variables in their positive or negative
@@ -292,6 +441,12 @@ TASK_DECL_3(MTBDD, mtbdd_abstract, MTBDD, MTBDD, mtbdd_abstract_op);
  */
 TASK_DECL_2(MTBDD, mtbdd_op_negate, MTBDD, size_t);
 
+/**
+ * Unary opeation Complement.
+ * Supported domains: Integer, Real, Fraction
+ */
+TASK_DECL_2(MTBDD, mtbdd_op_cmpl, MTBDD, size_t);
+
 /**
  * Binary operation Plus (for MTBDDs of same type)
  * Only for MTBDDs where either all leaves are Boolean, or Integer, or Double.
@@ -336,9 +491,17 @@ TASK_DECL_3(MTBDD, mtbdd_abstract_op_max, MTBDD, MTBDD, int);
 
 /**
  * Compute -a
+ * (negation, where 0 stays 0, and x into -x)
  */
 #define mtbdd_negate(a) mtbdd_uapply(a, TASK(mtbdd_op_negate), 0)
 
+/**
+ * Compute ~a for partial MTBDDs.
+ * Does not negate Boolean True/False.
+ * (complement, where 0 is turned into 1, and non-0 into 0)
+ */
+#define mtbdd_cmpl(a) mtbdd_uapply(a, TASK(mtbdd_op_cmpl), 0)
+
 /**
  * Compute a + b
  */
@@ -776,34 +939,39 @@ MTBDD mtbdd_reader_get(uint64_t* arr, uint64_t identifier);
  */
 void mtbdd_reader_end(uint64_t *arr);
 
-/**
- * MTBDDSET
- * Just some convenience functions for handling sets of variables represented as a 
- * cube (conjunction) of positive literals
- */
-#define mtbdd_set_empty()                   mtbdd_true
-#define mtbdd_set_isempty(set)              (set == mtbdd_true)
-#define mtbdd_set_add(set, var)             sylvan_and(set, sylvan_ithvar(var))
-#define mtbdd_set_addall(set, set2)         sylvan_and(set, set2)
-#define mtbdd_set_remove(set, var)          sylvan_exists(set, var)
-#define mtbdd_set_removeall(set, set2)      sylvan_exists(set, set2)
-#define mtbdd_set_first(set)                sylvan_var(set)
-#define mtbdd_set_next(set)                 sylvan_high(set)
-#define mtbdd_set_fromarray(arr, count)     mtbdd_fromarray(arr, count)
-#define mtbdd_set_toarray(set, arr)         mtbdd_toarray(set, arr)
-int mtbdd_set_in(BDDSET set, BDDVAR var);
-size_t mtbdd_set_count(BDDSET set);
-void mtbdd_test_isset(BDDSET set);
-
 /**
  * MTBDDMAP, maps uint32_t variables to MTBDDs.
  * A MTBDDMAP node has variable level, low edge going to the next MTBDDMAP, high edge to the mapped MTBDD.
  */
-#define mtbdd_map_empty() mtbdd_false
-#define mtbdd_map_isempty(map) (map == mtbdd_false ? 1 : 0)
-#define mtbdd_map_key(map) mtbdd_getvar(map)
-#define mtbdd_map_value(map) mtbdd_gethigh(map)
-#define mtbdd_map_next(map) mtbdd_getlow(map)
+static inline MTBDD
+mtbdd_map_empty()
+{
+    return mtbdd_false;
+}
+
+static inline int
+mtbdd_map_isempty(MTBDD map)
+{
+    return (map == mtbdd_false) ? 1 : 0;
+}
+
+static inline uint32_t
+mtbdd_map_key(MTBDD map)
+{
+    return mtbdd_getvar(map);
+}
+
+static inline MTBDD
+mtbdd_map_value(MTBDD map)
+{
+    return mtbdd_gethigh(map);
+}
+
+static inline MTBDD
+mtbdd_map_next(MTBDD map)
+{
+    return mtbdd_getlow(map);
+}
 
 /**
  * Return 1 if the map contains the key, 0 otherwise.
@@ -823,7 +991,8 @@ MTBDDMAP mtbdd_map_add(MTBDDMAP map, uint32_t key, MTBDD value);
 /**
  * Add all values from map2 to map1, overwrites if key already in map1.
  */
-MTBDDMAP mtbdd_map_addall(MTBDDMAP map1, MTBDDMAP map2);
+MTBDDMAP mtbdd_map_update(MTBDDMAP map1, MTBDDMAP map2);
+#define mtbdd_map_addall mtbdd_map_update
 
 /**
  * Remove the key <key> from the map and return the result
@@ -850,85 +1019,87 @@ VOID_TASK_DECL_1(mtbdd_gc_mark_rec, MTBDD);
 #define mtbdd_gc_mark_rec(mtbdd) CALL(mtbdd_gc_mark_rec, mtbdd)
 
 /**
- * Default external referencing. During garbage collection, MTBDDs marked with mtbdd_ref will
- * be kept in the forest.
- * It is recommended to prefer mtbdd_protect and mtbdd_unprotect.
+ * Infrastructure for external references using a hash table.
+ * Two hash tables store external references: a pointers table and a values table.
+ * The pointers table stores pointers to MTBDD variables, manipulated with protect and unprotect.
+ * The values table stores MTBDDs, manipulated with ref and deref.
+ * We strongly recommend using the pointers table whenever possible.
  */
-MTBDD mtbdd_ref(MTBDD a);
-void mtbdd_deref(MTBDD a);
-size_t mtbdd_count_refs(void);
 
 /**
- * Default external pointer referencing. During garbage collection, the pointers are followed and the MTBDD
- * that they refer to are kept in the forest.
+ * Store the pointer <ptr> in the pointers table.
  */
 void mtbdd_protect(MTBDD* ptr);
+
+/**
+ * Delete the pointer <ptr> from the pointers table.
+ */
 void mtbdd_unprotect(MTBDD* ptr);
+
+/**
+ * Compute the number of pointers in the pointers table.
+ */
 size_t mtbdd_count_protected(void);
 
 /**
- * If mtbdd_set_ondead is set to a callback, then this function marks MTBDDs (terminals).
- * When they are dead after the mark phase in garbage collection, the callback is called for marked MTBDDs.
- * The ondead callback can either perform cleanup or resurrect dead terminals.
+ * Store the MTBDD <dd> in the values table.
  */
-#define mtbdd_notify_ondead(dd) llmsset_notify_ondead(nodes, dd&~mtbdd_complement)
+MTBDD mtbdd_ref(MTBDD dd);
 
 /**
- * Infrastructure for internal references (per-thread, e.g. during MTBDD operations)
- * Use mtbdd_refs_push and mtbdd_refs_pop to put MTBDDs on a thread-local reference stack.
- * Use mtbdd_refs_spawn and mtbdd_refs_sync around SPAWN and SYNC operations when the result
- * of the spawned Task is a MTBDD that must be kept during garbage collection.
+ * Delete the MTBDD <dd> from the values table.
  */
-typedef struct mtbdd_refs_internal
-{
-    size_t r_size, r_count;
-    size_t s_size, s_count;
-    MTBDD *results;
-    Task **spawns;
-} *mtbdd_refs_internal_t;
+void mtbdd_deref(MTBDD dd);
 
-extern DECLARE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
+/**
+ * Compute the number of values in the values table.
+ */
+size_t mtbdd_count_refs(void);
 
-static inline MTBDD
-mtbdd_refs_push(MTBDD mtbdd)
-{
-    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
-    if (mtbdd_refs_key->r_count >= mtbdd_refs_key->r_size) {
-        mtbdd_refs_key->r_size *= 2;
-        mtbdd_refs_key->results = (MTBDD*)realloc(mtbdd_refs_key->results, sizeof(MTBDD) * mtbdd_refs_key->r_size);
-    }
-    mtbdd_refs_key->results[mtbdd_refs_key->r_count++] = mtbdd;
-    return mtbdd;
-}
+/**
+ * Infrastructure for internal references.
+ * Every thread has its own reference stacks. There are three stacks: pointer, values, tasks stack.
+ * The pointers stack stores pointers to MTBDD variables, manipulated with pushptr and popptr.
+ * The values stack stores MTBDDs, manipulated with push and pop.
+ * The tasks stack stores Lace tasks (that return MTBDDs), manipulated with spawn and sync.
+ *
+ * It is recommended to use the pointers stack for local variables and the tasks stack for tasks.
+ */
 
-static inline void
-mtbdd_refs_pop(int amount)
-{
-    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
-    mtbdd_refs_key->r_count-=amount;
-}
+/**
+ * Push a MTBDD variable to the pointer reference stack.
+ * During garbage collection the variable will be inspected and the contents will be marked.
+ */
+void mtbdd_refs_pushptr(const MTBDD *ptr);
 
-static inline void
-mtbdd_refs_spawn(Task *t)
-{
-    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
-    if (mtbdd_refs_key->s_count >= mtbdd_refs_key->s_size) {
-        mtbdd_refs_key->s_size *= 2;
-        mtbdd_refs_key->spawns = (Task**)realloc(mtbdd_refs_key->spawns, sizeof(Task*) * mtbdd_refs_key->s_size);
-    }
-    mtbdd_refs_key->spawns[mtbdd_refs_key->s_count++] = t;
-}
+/**
+ * Pop the last <amount> MTBDD variables from the pointer reference stack.
+ */
+void mtbdd_refs_popptr(size_t amount);
 
-static inline MTBDD
-mtbdd_refs_sync(MTBDD result)
-{
-    LOCALIZE_THREAD_LOCAL(mtbdd_refs_key, mtbdd_refs_internal_t);
-    mtbdd_refs_key->s_count--;
-    return result;
-}
+/**
+ * Push an MTBDD to the values reference stack.
+ * During garbage collection the references MTBDD will be marked.
+ */
+MTBDD mtbdd_refs_push(MTBDD mtbdd);
+
+/**
+ * Pop the last <amount> MTBDDs from the values reference stack.
+ */
+void mtbdd_refs_pop(long amount);
+
+/**
+ * Push a Task that returns an MTBDD to the tasks reference stack.
+ * Usage: mtbdd_refs_spawn(SPAWN(function, ...));
+ */
+void mtbdd_refs_spawn(Task *t);
+
+/**
+ * Pop a Task from the task reference stack.
+ * Usage: MTBDD result = mtbdd_refs_sync(SYNC(function));
+ */
+MTBDD mtbdd_refs_sync(MTBDD mtbdd);
 
-#include "sylvan_mtbdd_storm.h"
-    
 #ifdef __cplusplus
 }
 #endif /* __cplusplus */
diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h
index 8c16eca43..4fefc47e4 100755
--- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h
+++ b/resources/3rdparty/sylvan/src/sylvan_mtbdd_int.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+/* Do not include this file directly. Instead, include sylvan_int.h */
+
 /**
  * Internals for MTBDDs
  */
@@ -29,17 +31,48 @@ typedef struct __attribute__((packed)) mtbddnode {
     uint64_t a, b;
 } * mtbddnode_t; // 16 bytes
 
-#define MTBDD_GETNODE(mtbdd) ((mtbddnode_t)llmsset_index_to_ptr(nodes, mtbdd&0x000000ffffffffff))
+static inline mtbddnode_t
+MTBDD_GETNODE(MTBDD dd)
+{
+    return (mtbddnode_t)llmsset_index_to_ptr(nodes, dd&0x000000ffffffffff);
+}
 
 /**
  * Complement handling macros
  */
-#define MTBDD_HASMARK(s)              (s&mtbdd_complement?1:0)
-#define MTBDD_TOGGLEMARK(s)           (s^mtbdd_complement)
-#define MTBDD_STRIPMARK(s)            (s&~mtbdd_complement)
-#define MTBDD_TRANSFERMARK(from, to)  (to ^ (from & mtbdd_complement))
-// Equal under mark
-#define MTBDD_EQUALM(a, b)            ((((a)^(b))&(~mtbdd_complement))==0)
+
+static inline int
+MTBDD_HASMARK(MTBDD dd)
+{
+    return (dd & mtbdd_complement) ? 1 : 0;
+}
+
+static inline MTBDD
+MTBDD_TOGGLEMARK(MTBDD dd)
+{
+    return dd ^ mtbdd_complement;
+}
+
+static inline MTBDD
+MTBDD_STRIPMARK(MTBDD dd)
+{
+    return dd & (~mtbdd_complement);
+}
+
+static inline MTBDD
+MTBDD_TRANSFERMARK(MTBDD from, MTBDD to)
+{
+    return (to ^ (from & mtbdd_complement));
+}
+
+/**
+ * Are two MTBDDs equal modulo mark?
+ */
+static inline int
+MTBDD_EQUALM(MTBDD a, MTBDD b)
+{
+    return ((a^b)&(~mtbdd_complement)) ? 0 : 1;
+}
 
 // Leaf: a = L=1, M, type; b = value
 // Node: a = L=0, C, M, high; b = variable, low
diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
index 76792affe..39de0400b 100644
--- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
+++ b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
@@ -1,6 +1,9 @@
-#include <sylvan_mtbdd_int.h>
+#include <stdint.h>
+#include <math.h>
+#include "sylvan_int.h"
 
 #include "storm_wrapper.h"
+#include "sylvan_mtbdd_storm.h"
 
 // Import the types created for rational numbers and functions.
 extern uint32_t srn_type;
@@ -554,11 +557,6 @@ int mtbdd_isnonzero(MTBDD dd) {
     return mtbdd_iszero(dd) ? 0 : 1;
 }
 
-MTBDD
-mtbdd_ithvar(uint32_t level) {
-    return mtbdd_makenode(level, mtbdd_false, mtbdd_true);
-}
-
 TASK_IMPL_2(MTBDD, mtbdd_op_complement, MTBDD, a, size_t, k)
 {
     // if a is false, then it is a partial function. Keep partial!
@@ -618,11 +616,18 @@ TASK_IMPL_3(BDD, mtbdd_min_abstract_representative, MTBDD, a, BDD, v, BDDVAR, pr
 		return res1;
     }
 	
+    /* Check cache */
+    MTBDD result;
+    if (cache_get3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, v, (size_t)1, &result)) {
+        sylvan_stats_count(MTBDD_ABSTRACT_CACHED);
+        return result;
+    }
+    
 	mtbddnode_t na = MTBDD_GETNODE(a);
 	uint32_t va = mtbddnode_getvariable(na);
 	bddnode_t nv = MTBDD_GETNODE(v);
 	BDDVAR vv = bddnode_getvariable(nv);
-
+    
     /* Abstract a variable that does not appear in a. */
     if (va > vv) {
 		BDD _v = sylvan_set_next(v);
@@ -642,13 +647,6 @@ TASK_IMPL_3(BDD, mtbdd_min_abstract_representative, MTBDD, a, BDD, v, BDDVAR, pr
        	return res1;
     }
     
-    /* Check cache */
-    MTBDD result;
-    if (cache_get3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, v, (size_t)1, &result)) {
-        sylvan_stats_count(MTBDD_ABSTRACT_CACHED);
-        return result;
-    }
-    
     MTBDD E = mtbdd_getlow(a);
     MTBDD T = mtbdd_gethigh(a);
     
@@ -798,12 +796,19 @@ TASK_IMPL_3(BDD, mtbdd_max_abstract_representative, MTBDD, a, MTBDD, v, uint32_t
         
 		return res1;
     }
-	
+
+    /* Check cache */
+    MTBDD result;
+    if (cache_get3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, v, (size_t)0, &result)) {
+        sylvan_stats_count(MTBDD_ABSTRACT_CACHED);
+        return result;
+    }
+
 	mtbddnode_t na = MTBDD_GETNODE(a);
 	uint32_t va = mtbddnode_getvariable(na);
 	bddnode_t nv = MTBDD_GETNODE(v);
 	BDDVAR vv = bddnode_getvariable(nv);
-
+    
     /* Abstract a variable that does not appear in a. */
     if (vv < va) {
 		BDD _v = sylvan_set_next(v);
@@ -823,13 +828,6 @@ TASK_IMPL_3(BDD, mtbdd_max_abstract_representative, MTBDD, a, MTBDD, v, uint32_t
        	return res1;
     }
     
-    /* Check cache */
-    MTBDD result;
-    if (cache_get3(CACHE_MTBDD_ABSTRACT_REPRESENTATIVE, a, v, (size_t)0, &result)) {
-        sylvan_stats_count(MTBDD_ABSTRACT_CACHED);
-        return result;
-    }
-    
     MTBDD E = mtbdd_getlow(a);
     MTBDD T = mtbdd_gethigh(a);
     
diff --git a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
index 33bfc9e4f..27066fe19 100644
--- a/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
+++ b/resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
@@ -1,3 +1,7 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 /**
  * Binary operation Divide (for MTBDDs of same type)
  * Only for MTBDDs where all leaves are Integer or Double.
@@ -148,3 +152,7 @@ TASK_DECL_3(BDD, mtbdd_max_abstract_representative, MTBDD, MTBDD, uint32_t);
 
 TASK_DECL_3(MTBDD, mtbdd_uapply_nocache, MTBDD, mtbdd_uapply_op, size_t);
 #define mtbdd_uapply_nocache(dd, op, param) CALL(mtbdd_uapply_nocache, dd, op, param)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp b/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp
index 86d2afc85..5cbf27cc1 100644
--- a/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp
+++ b/resources/3rdparty/sylvan/src/sylvan_obj_storm.cpp
@@ -1,4 +1,5 @@
 #include "storm_wrapper.h"
+#include "sylvan_mtbdd_storm.h"
 #include "sylvan_storm_rational_number.h"
 #include "sylvan_storm_rational_function.h"
 
diff --git a/resources/3rdparty/sylvan/src/sylvan_refs.c b/resources/3rdparty/sylvan/src/sylvan_refs.c
index 54495d480..968721370 100755
--- a/resources/3rdparty/sylvan/src/sylvan_refs.c
+++ b/resources/3rdparty/sylvan/src/sylvan_refs.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,18 +15,13 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan.h>
+#include <sylvan_refs.h>
 
-#include <assert.h> // for assert
 #include <errno.h>  // for errno
-#include <stdio.h>  // for fprintf
-#include <stdint.h> // for uint32_t etc
-#include <stdlib.h> // for exit
 #include <string.h> // for strerror
 #include <sys/mman.h> // for mmap
 
-#include <sylvan_refs.h>
-
 #ifndef compiler_barrier
 #define compiler_barrier() { asm volatile("" ::: "memory"); }
 #endif
diff --git a/resources/3rdparty/sylvan/src/sylvan_refs.h b/resources/3rdparty/sylvan/src/sylvan_refs.h
index fe02bb372..a0e20d90b 100755
--- a/resources/3rdparty/sylvan/src/sylvan_refs.h
+++ b/resources/3rdparty/sylvan/src/sylvan_refs.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,8 +15,7 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
-#include <stdint.h> // for uint32_t etc
+/* Do not include this file directly. Instead, include sylvan.h */
 
 #ifndef REFS_INLINE_H
 #define REFS_INLINE_H
diff --git a/resources/3rdparty/sylvan/src/sylvan_sl.c b/resources/3rdparty/sylvan/src/sylvan_sl.c
index 39218f7b0..00045dfd9 100755
--- a/resources/3rdparty/sylvan/src/sylvan_sl.c
+++ b/resources/3rdparty/sylvan/src/sylvan_sl.c
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,13 +14,11 @@
  * limitations under the License.
  */
 
-#include <assert.h>
-#include <stdio.h>
-#include <sys/mman.h> // for mmap, munmap, etc
-
 #include <sylvan.h>
 #include <sylvan_sl.h>
 
+#include <sys/mman.h> // for mmap, munmap, etc
+
 /* A SL_DEPTH of 6 means 32 bytes per bucket, of 14 means 64 bytes per bucket.
    However, there is a very large performance drop with only 6 levels. */
 #define SL_DEPTH 14
diff --git a/resources/3rdparty/sylvan/src/sylvan_sl.h b/resources/3rdparty/sylvan/src/sylvan_sl.h
index dac0ec461..ad09c92a3 100755
--- a/resources/3rdparty/sylvan/src/sylvan_sl.h
+++ b/resources/3rdparty/sylvan/src/sylvan_sl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
diff --git a/resources/3rdparty/sylvan/src/sylvan_stats.c b/resources/3rdparty/sylvan/src/sylvan_stats.c
index 1c9f52c9b..fb60d2be4 100755
--- a/resources/3rdparty/sylvan/src/sylvan_stats.c
+++ b/resources/3rdparty/sylvan/src/sylvan_stats.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,14 +15,13 @@
  * limitations under the License.
  */
 
+#include <sylvan_int.h>
+
 #include <errno.h>  // for errno
 #include <string.h> // memset
-#include <sylvan_stats.h>
 #include <sys/mman.h>
 #include <inttypes.h>
 
-#include <sylvan_int.h>
-
 #if SYLVAN_STATS
 
 #ifdef __ELF__
@@ -31,9 +30,6 @@ __thread sylvan_stats_t sylvan_stats;
 pthread_key_t sylvan_stats_key;
 #endif
 
-#include <hwloc.h>
-static hwloc_topology_t topo;
-
 /**
  * Instructions for sylvan_stats_report
  */
@@ -127,11 +123,8 @@ VOID_TASK_0(sylvan_stats_reset_perthread)
             fprintf(stderr, "sylvan_stats: Unable to allocate memory: %s!\n", strerror(errno));
             exit(1);
         }
-        // Ensure the stats object is on our pu
-        hwloc_obj_t pu = hwloc_get_obj_by_type(topo, HWLOC_OBJ_PU, LACE_WORKER_PU);
-        hwloc_set_area_membind(topo, sylvan_stats, sizeof(sylvan_stats_t), pu->cpuset, HWLOC_MEMBIND_BIND, 0);
-        pthread_setspecific(sylvan_stats_key, sylvan_stats);
     }
+    pthread_setspecific(sylvan_stats_key, sylvan_stats);
     for (int i=0; i<SYLVAN_COUNTER_COUNTER; i++) {
         sylvan_stats->counters[i] = 0;
     }
@@ -146,8 +139,6 @@ VOID_TASK_IMPL_0(sylvan_stats_init)
 #ifndef __ELF__
     pthread_key_create(&sylvan_stats_key, NULL);
 #endif
-    hwloc_topology_init(&topo);
-    hwloc_topology_load(topo);
     TOGETHER(sylvan_stats_reset_perthread);
 }
 
diff --git a/resources/3rdparty/sylvan/src/sylvan_stats.h b/resources/3rdparty/sylvan/src/sylvan_stats.h
index 792e67baa..16c227a6a 100755
--- a/resources/3rdparty/sylvan/src/sylvan_stats.h
+++ b/resources/3rdparty/sylvan/src/sylvan_stats.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,8 +15,7 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
-#include <lace.h>
+/* Do not include this file directly. Instead, include sylvan.h */
 
 #ifndef SYLVAN_STATS_H
 #define SYLVAN_STATS_H
@@ -93,6 +92,8 @@ typedef enum {
     SYLVAN_COUNTER_COUNTER
 } Sylvan_Counters;
 
+#undef OPCOUNTER
+
 typedef enum
 {
     SYLVAN_GC,
@@ -134,10 +135,8 @@ void sylvan_stats_report(FILE* target);
 #if SYLVAN_STATS
 
 #ifdef __MACH__
-#include <mach/mach_time.h>
 #define getabstime() mach_absolute_time()
 #else
-#include <time.h>
 static uint64_t
 getabstime(void)
 {
@@ -153,7 +152,6 @@ getabstime(void)
 #ifdef __ELF__
 extern __thread sylvan_stats_t sylvan_stats;
 #else
-#include <pthread.h>
 extern pthread_key_t sylvan_stats_key;
 #endif
 
diff --git a/resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c b/resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c
index dc9e97aa2..5f21b9d11 100644
--- a/resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c
+++ b/resources/3rdparty/sylvan/src/sylvan_storm_rational_function.c
@@ -114,6 +114,8 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_plus, MTBDD*, pa, MTBDD*, p
     /* Check for partial functions */
     if (a == mtbdd_false) return b;
     if (b == mtbdd_false) return a;
+    
+    if (a == mtbdd_true || b == mtbdd_true) return mtbdd_true;
 
     /* If both leaves, compute plus */
     if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
@@ -163,6 +165,8 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_times, MTBDD*, pa, MTBDD*,
 
     /* Check for partial functions */
     if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false;
+    if (a == mtbdd_true) return b;
+    if (b == mtbdd_true) return a;
 
     /* If both leaves, compute multiplication */
     if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
@@ -550,7 +554,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_threshold, MTBDD, a, size_t
     
     if (mtbdd_isleaf(a)) {
         storm_rational_function_ptr ma = mtbdd_getstorm_rational_function_ptr(a);
-        return storm_rational_function_less_or_equal(ma, value) ? mtbdd_false : mtbdd_true;
+        return storm_rational_function_less(ma, value) ? mtbdd_false : mtbdd_true;
     }
     
     return mtbdd_invalid;
@@ -562,7 +566,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_function_op_strict_threshold, MTBDD, a,
     if (mtbdd_isleaf(a)) {
         storm_rational_function_ptr ma = mtbdd_getstorm_rational_function_ptr(a);
         
-        return storm_rational_function_less(ma, value) ? mtbdd_false : mtbdd_true;
+        return storm_rational_function_less_or_equal(ma, value) ? mtbdd_false : mtbdd_true;
     }
     
     return mtbdd_invalid;
@@ -679,7 +683,7 @@ TASK_4(MTBDD, sylvan_storm_rational_function_equal_norm_d2, MTBDD, a, MTBDD, b,
     
     /* Check cache */
     MTBDD result;
-    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_RF, a, b, svalue, &result)) {
+    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_RF, a, b, (uint64_t)svalue, &result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_CACHED);
         return result;
     }
@@ -703,7 +707,7 @@ TASK_4(MTBDD, sylvan_storm_rational_function_equal_norm_d2, MTBDD, a, MTBDD, b,
     if (result == mtbdd_false) *shortcircuit = 1;
     
     /* Store in cache */
-    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_RF, a, b, svalue, result)) {
+    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_RF, a, b, (uint64_t)svalue, result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_CACHEDPUT);
     }
     
@@ -752,7 +756,7 @@ TASK_4(MTBDD, sylvan_storm_rational_function_equal_norm_rel_d2, MTBDD, a, MTBDD,
     
     /* Check cache */
     MTBDD result;
-    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL_RF, a, b, svalue, &result)) {
+    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL_RF, a, b, (uint64_t)svalue, &result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_REL_CACHED);
         return result;
     }
@@ -776,7 +780,7 @@ TASK_4(MTBDD, sylvan_storm_rational_function_equal_norm_rel_d2, MTBDD, a, MTBDD,
     if (result == mtbdd_false) *shortcircuit = 1;
     
     /* Store in cache */
-    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_REL_RF, a, b, svalue, result)) {
+    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_REL_RF, a, b, (uint64_t)svalue, result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_REL_CACHEDPUT);
     }
     
diff --git a/resources/3rdparty/sylvan/src/sylvan_storm_rational_number.c b/resources/3rdparty/sylvan/src/sylvan_storm_rational_number.c
index d2f59450c..22f9ef053 100644
--- a/resources/3rdparty/sylvan/src/sylvan_storm_rational_number.c
+++ b/resources/3rdparty/sylvan/src/sylvan_storm_rational_number.c
@@ -114,6 +114,8 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_number_op_plus, MTBDD*, pa, MTBDD*, pb)
     /* Check for partial functions */
     if (a == mtbdd_false) return b;
     if (b == mtbdd_false) return a;
+    
+    if (a == mtbdd_true || b == mtbdd_true) return mtbdd_true;
 
     /* If both leaves, compute plus */
     if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
@@ -163,6 +165,8 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_number_op_times, MTBDD*, pa, MTBDD*, pb
 
     /* Check for partial functions */
     if (a == mtbdd_false || b == mtbdd_false) return mtbdd_false;
+    if (a == mtbdd_true) return b;
+    if (b == mtbdd_true) return a;
 
     /* If both leaves, compute multiplication */
     if (mtbdd_isleaf(a) && mtbdd_isleaf(b)) {
@@ -584,7 +588,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_number_op_threshold, MTBDD, a, size_t*,
     
     if (mtbdd_isleaf(a)) {
         storm_rational_number_ptr ma = mtbdd_getstorm_rational_number_ptr(a);
-        return storm_rational_number_less_or_equal(ma, value) ? mtbdd_false : mtbdd_true;
+        return storm_rational_number_less(ma, value) ? mtbdd_false : mtbdd_true;
     }
     
     return mtbdd_invalid;
@@ -596,7 +600,7 @@ TASK_IMPL_2(MTBDD, sylvan_storm_rational_number_op_strict_threshold, MTBDD, a, s
     if (mtbdd_isleaf(a)) {
         storm_rational_number_ptr ma = mtbdd_getstorm_rational_number_ptr(a);
         
-        return storm_rational_number_less(ma, value) ? mtbdd_false : mtbdd_true;
+        return storm_rational_number_less_or_equal(ma, value) ? mtbdd_false : mtbdd_true;
     }
     
     return mtbdd_invalid;
@@ -713,7 +717,7 @@ TASK_4(MTBDD, sylvan_storm_rational_number_equal_norm_d2, MTBDD, a, MTBDD, b, st
     
     /* Check cache */
     MTBDD result;
-    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_RN, a, b, svalue, &result)) {
+    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_RN, a, b, (uint64_t)svalue, &result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_CACHED);
         return result;
     }
@@ -737,7 +741,7 @@ TASK_4(MTBDD, sylvan_storm_rational_number_equal_norm_d2, MTBDD, a, MTBDD, b, st
     if (result == mtbdd_false) *shortcircuit = 1;
     
     /* Store in cache */
-    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_RN, a, b, svalue, result)) {
+    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_RN, a, b, (uint64_t)svalue, result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_CACHEDPUT);
     }
     
@@ -786,7 +790,7 @@ TASK_4(MTBDD, sylvan_storm_rational_number_equal_norm_rel_d2, MTBDD, a, MTBDD, b
     
     /* Check cache */
     MTBDD result;
-    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL_RN, a, b, svalue, &result)) {
+    if (cache_get3(CACHE_MTBDD_EQUAL_NORM_REL_RN, a, b, (uint64_t)svalue, &result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_REL_CACHED);
         return result;
     }
@@ -810,7 +814,7 @@ TASK_4(MTBDD, sylvan_storm_rational_number_equal_norm_rel_d2, MTBDD, a, MTBDD, b
     if (result == mtbdd_false) *shortcircuit = 1;
     
     /* Store in cache */
-    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_REL_RN, a, b, svalue, result)) {
+    if (cache_put3(CACHE_MTBDD_EQUAL_NORM_REL_RN, a, b, (uint64_t)svalue, result)) {
         sylvan_stats_count(MTBDD_EQUAL_NORM_REL_CACHEDPUT);
     }
     
diff --git a/resources/3rdparty/sylvan/src/sylvan_table.c b/resources/3rdparty/sylvan/src/sylvan_table.c
index 83d30b842..bead5e6f8 100755
--- a/resources/3rdparty/sylvan/src/sylvan_table.c
+++ b/resources/3rdparty/sylvan/src/sylvan_table.c
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,23 +15,12 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+#include <sylvan_int.h>
 
 #include <errno.h>  // for errno
-#include <stdint.h> // for uint64_t etc
-#include <stdio.h>  // for printf
-#include <stdlib.h>
 #include <string.h> // memset
 #include <sys/mman.h> // for mmap
 
-#include <sylvan_table.h>
-#include <sylvan_stats.h>
-#include <sylvan_tls.h>
-
-#include <hwloc.h>
-
-static hwloc_topology_t topo;
-
 #ifndef MAP_ANONYMOUS
 #define MAP_ANONYMOUS MAP_ANON
 #endif
@@ -120,15 +109,178 @@ is_custom_bucket(const llmsset_t dbs, uint64_t index)
     return (*ptr & mask) ? 1 : 0;
 }
 
+/**
+ * This tricks the compiler into generating the bit-wise rotation instruction
+ */
+static uint64_t __attribute__((unused))
+rotr64 (uint64_t n, unsigned int c)
+{
+    return (n >> c) | (n << (64-c));
+}
+
+/**
+ * Pseudo-RNG for initializing the hashtab tables.
+ * Implementation of xorshift128+ by Vigna 2016, which is
+ * based on "Xorshift RNGs", Marsaglia 2003
+ */
+static uint64_t __attribute__((unused))
+xor64(void)
+{
+    // For the initial state of s, we select two numbers:
+    // - the initializer of Marsaglia's original xorshift
+    // - the FNV-1a 64-bit offset basis
+    static uint64_t s[2] = {88172645463325252LLU, 14695981039346656037LLU};
+
+    uint64_t s1 = s[0];
+    const uint64_t s0 = s[1];
+    const uint64_t result = s0 + s1;
+    s[0] = s0;
+    s1 ^= s1 << 23; // a
+    s[1] = s1 ^ s0 ^ (s1 >> 18) ^ (s0 >> 5); // b, c
+    return result;
+}
+
+/**
+ * The table for tabulation hashing
+ */
+static uint64_t hashtab[256*16];
+
+/**
+ * Implementation of simple tabulation.
+ * Proposed by e.g. Thorup 2017 "Fast and Powerful Hashing using Tabulation"
+ */
+uint64_t
+llmsset_tabhash(uint64_t a, uint64_t b, uint64_t seed)
+{
+    // we use the seed as base
+    uint64_t *t = hashtab;
+    for (int i=0; i<8; i++) {
+        seed ^= t[(uint8_t)a];
+        t += 256; // next table
+        a >>= 8;
+    }
+    for (int i=0; i<8; i++) {
+        seed ^= t[(uint8_t)b];
+        t += 256; // next table
+        b >>= 8;
+    }
+    return seed;
+}
+
+/**
+ * Encoding of the prime 2^89-1 for CWhash
+ */
+static const uint64_t Prime89_0 = (((uint64_t)1)<<32)-1;
+static const uint64_t Prime89_1 = (((uint64_t)1)<<32)-1;
+static const uint64_t Prime89_2 = (((uint64_t)1)<<25)-1;
+static const uint64_t Prime89_21 = (((uint64_t)1)<<57)-1;
+
+typedef uint64_t INT96[3];
+
+/**
+ * Computes (r mod Prime89) mod 2ˆ64
+ * (for CWhash, implementation by Thorup et al.)
+ */
+static uint64_t
+Mod64Prime89(INT96 r)
+{
+    uint64_t r0, r1, r2;
+    r2 = r[2];
+    r1 = r[1];
+    r0 = r[0] + (r2>>25);
+    r2 &= Prime89_2;
+    return (r2 == Prime89_2 && r1 == Prime89_1 && r0 >= Prime89_0) ? (r0 - Prime89_0) : (r0 + (r1<<32));
+}
+
+/**
+ * Computes a 96-bit r such that r = ax+b (mod Prime89)
+ * (for CWhash, implementation by Thorup et al.)
+ */
+static void
+MultAddPrime89(INT96 r, uint64_t x, const INT96 a, const INT96 b)
+{
+#define LOW(x) ((x)&0xFFFFFFFF)
+#define HIGH(x) ((x)>>32)
+    uint64_t x1, x0, c21, c20, c11, c10, c01, c00;
+    uint64_t d0, d1, d2, d3;
+    uint64_t s0, s1, carry;
+    x1 = HIGH(x);
+    x0 = LOW(x);
+    c21 = a[2]*x1;
+    c11 = a[1]*x1;
+    c01 = a[0]*x1;
+    c20 = a[2]*x0;
+    c10 = a[1]*x0;
+    c00 = a[0]*x0;
+    d0 = (c20>>25)+(c11>>25)+(c10>>57)+(c01>>57);
+    d1 = (c21<<7);
+    d2 = (c10&Prime89_21) + (c01&Prime89_21);
+    d3 = (c20&Prime89_2) + (c11&Prime89_2) + (c21>>57);
+    s0 = b[0] + LOW(c00) + LOW(d0) + LOW(d1);
+    r[0] = LOW(s0);
+    carry = HIGH(s0);
+    s1 = b[1] + HIGH(c00) + HIGH(d0) + HIGH(d1) + LOW(d2) + carry;
+    r[1] = LOW(s1);
+    carry = HIGH(s1);
+    r[2] = b[2] + HIGH(d2) + d3 + carry;
+#undef LOW
+#undef HIGH
+}
+
+/**
+ * Compute Carter/Wegman k-independent hash
+ * Implementation by Thorup et al.
+ * - compute polynomial on prime field of 2^89-1 (10th Marsenne prime)
+ * - random coefficients from random.org
+ */
+static uint64_t
+CWhash(uint64_t x)
+{
+    INT96 A = {0xcf90094b0ab9939e, 0x817f998697604ff3, 0x1a6e6f08b65440ea};
+    INT96 B = {0xb989a05a5dcf57f1, 0x7c007611f28daee7, 0xd8bd809d68c26854};
+    INT96 C = {0x1041070633a92679, 0xba9379fd71cd939d, 0x271793709e1cd781};
+    INT96 D = {0x5c240a710b0c6beb, 0xc24ac3b68056ea1c, 0xd46c9c7f2adfaf71};
+    INT96 E = {0xa527cea74b053a87, 0x69ba4a5e23f90577, 0x707b6e053c7741e7};
+    INT96 F = {0xa6c0812cdbcdb982, 0x8cb0c8b73f701489, 0xee08c4dc1dbef243};
+    INT96 G = {0xcf3ab0ec9d538853, 0x982a8457b6db03a9, 0x8659cf6b636c9d37};
+    INT96 H = {0x905d5d14efefc0dd, 0x7e9870e018ead6a2, 0x47e2c9af0ea9325a};
+    INT96 I = {0xc59351a9bf283b09, 0x4a39e35dbc280c7f, 0xc5f160732996be4f};
+    INT96 J = {0x4d58e0b7a57ccddf, 0xc362a25c267d1db4, 0x7c79d2fcd89402b2};
+    INT96 K = {0x62ac342c4393930c, 0xdb2fd2740ebef2a0, 0xc672fd5e72921377};
+    INT96 L = {0xbdae267838862c6d, 0x0e0ee206fdbaf1d1, 0xc270e26fd8dfbae7};
+
+    INT96 r;
+    MultAddPrime89(r, x, A, B);
+    MultAddPrime89(r, x, r, C);
+    MultAddPrime89(r, x, r, D);
+    MultAddPrime89(r, x, r, E);
+    MultAddPrime89(r, x, r, F);
+    MultAddPrime89(r, x, r, G);
+    MultAddPrime89(r, x, r, H);
+    MultAddPrime89(r, x, r, I);
+    MultAddPrime89(r, x, r, J);
+    MultAddPrime89(r, x, r, K);
+    MultAddPrime89(r, x, r, L);
+    return Mod64Prime89(r);
+}
+
+/**
+ * The well-known FNV-1a hash for 64 bits.
+ * Typical seed value (base offset) is 14695981039346656037LLU.
+ *
+ * NOTE: this particular hash is bad for certain nodes, resulting in
+ * early garbage collection and failure. We xor with shifted hash which
+ * suffices as a band-aid, but this is obviously not an ideal solution.
+ */
 uint64_t
-llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed)
+llmsset_fnvhash(const uint64_t a, const uint64_t b, const uint64_t seed)
 {
     // The FNV-1a hash for 64 bits
     const uint64_t prime = 1099511628211;
     uint64_t hash = seed;
     hash = (hash ^ a) * prime;
     hash = (hash ^ b) * prime;
-    return hash;
+    return hash ^ (hash>>32);
 }
 
 /*
@@ -247,6 +399,7 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
     const int custom = is_custom_bucket(dbs, d_idx) ? 1 : 0;
     if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
     else hash_rehash = llmsset_hash(a, b, hash_rehash);
+    const uint64_t step = (((hash_rehash >> 20) | 1) << 3);
     const uint64_t new_v = (hash_rehash & MASK_HASH) | d_idx;
     int i=0;
 
@@ -271,8 +424,7 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
             }
 
             // go to next cache line in probe sequence
-            if (custom) hash_rehash = dbs->hash_cb(a, b, hash_rehash);
-            else hash_rehash = llmsset_hash(a, b, hash_rehash);
+            hash_rehash += step;
 
 #if LLMSSET_MASK
             last = idx = hash_rehash & dbs->mask;
@@ -286,9 +438,6 @@ llmsset_rehash_bucket(const llmsset_t dbs, uint64_t d_idx)
 llmsset_t
 llmsset_create(size_t initial_size, size_t max_size)
 {
-    hwloc_topology_init(&topo);
-    hwloc_topology_load(topo);
-
     llmsset_t dbs = NULL;
     if (posix_memalign((void**)&dbs, LINE_SIZE, sizeof(struct llmsset)) != 0) {
         fprintf(stderr, "llmsset_create: Unable to allocate memory!\n");
@@ -347,12 +496,6 @@ llmsset_create(size_t initial_size, size_t max_size)
     madvise(dbs->table, dbs->max_size * 8, MADV_RANDOM);
 #endif
 
-    hwloc_set_area_membind(topo, dbs->table, dbs->max_size * 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
-    hwloc_set_area_membind(topo, dbs->data, dbs->max_size * 16, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
-    hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
-    hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
-    hwloc_set_area_membind(topo, dbs->bitmapc, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
-
     // forbid first two positions (index 0 and 1)
     dbs->bitmap2[0] = 0xc000000000000000LL;
 
@@ -369,6 +512,9 @@ llmsset_create(size_t initial_size, size_t max_size)
     INIT_THREAD_LOCAL(my_region);
     TOGETHER(llmsset_reset_region);
 
+    // initialize hashtab
+    for (int i=0; i<256*16; i++) hashtab[i] = CWhash(i);
+
     return dbs;
 }
 
@@ -392,13 +538,11 @@ VOID_TASK_IMPL_1(llmsset_clear, llmsset_t, dbs)
 VOID_TASK_IMPL_1(llmsset_clear_data, llmsset_t, dbs)
 {
     if (mmap(dbs->bitmap1, dbs->max_size / (512*8), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
-        hwloc_set_area_membind(topo, dbs->bitmap1, dbs->max_size / (512*8), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
     } else {
         memset(dbs->bitmap1, 0, dbs->max_size / (512*8));
     }
 
     if (mmap(dbs->bitmap2, dbs->max_size / 8, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != (void*)-1) {
-        hwloc_set_area_membind(topo, dbs->bitmap2, dbs->max_size / 8, hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_FIRSTTOUCH, 0);
     } else {
         memset(dbs->bitmap2, 0, dbs->max_size / 8);
     }
@@ -416,7 +560,6 @@ VOID_TASK_IMPL_1(llmsset_clear_hashes, llmsset_t, dbs)
 #if defined(madvise) && defined(MADV_RANDOM)
         madvise(dbs->table, sizeof(uint64_t[dbs->max_size]), MADV_RANDOM);
 #endif
-        hwloc_set_area_membind(topo, dbs->table, sizeof(uint64_t[dbs->max_size]), hwloc_topology_get_allowed_cpuset(topo), HWLOC_MEMBIND_INTERLEAVE, 0);
     } else {
         // reallocate failed... expensive fallback
         memset(dbs->table, 0, dbs->max_size * 8);
diff --git a/resources/3rdparty/sylvan/src/sylvan_table.h b/resources/3rdparty/sylvan/src/sylvan_table.h
index 8f4c0642e..fe25d6de3 100755
--- a/resources/3rdparty/sylvan/src/sylvan_table.h
+++ b/resources/3rdparty/sylvan/src/sylvan_table.h
@@ -1,6 +1,6 @@
 /*
  * Copyright 2011-2016 Formal Methods and Tools, University of Twente
- * Copyright 2016 Tom van Dijk, Johannes Kepler University Linz
+ * Copyright 2016-2017 Tom van Dijk, Johannes Kepler University Linz
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -15,24 +15,15 @@
  * limitations under the License.
  */
 
-#include <sylvan_config.h>
+/* Do not include this file directly. Instead, include sylvan_int.h */
 
-#include <stdint.h>
-#include <unistd.h>
-
-#include <lace.h>
-
-#ifndef LLMSSET_H
-#define LLMSSET_H
+#ifndef SYLVAN_TABLE_H
+#define SYLVAN_TABLE_H
 
 #ifdef __cplusplus
 extern "C" {
 #endif /* __cplusplus */
 
-#ifndef LLMSSET_MASK
-#define LLMSSET_MASK 0 // set to 1 to use bit mask instead of modulo
-#endif
-
 /**
  * Lockless hash table (set) to store 16-byte keys.
  * Each unique key is associated with a 42-bit number.
@@ -210,9 +201,19 @@ VOID_TASK_DECL_1(llmsset_destroy_unmarked, llmsset_t);
 void llmsset_set_custom(const llmsset_t dbs, llmsset_hash_cb hash_cb, llmsset_equals_cb equals_cb, llmsset_create_cb create_cb, llmsset_destroy_cb destroy_cb);
 
 /**
- * Default hashing function
+ * Default hashing functions.
+ */
+#define llmsset_hash llmsset_tabhash
+
+/**
+ * FNV-1a hash
+ */
+uint64_t llmsset_fnvhash(uint64_t a, uint64_t b, uint64_t seed);
+
+/**
+ * Twisted tabulation hash
  */
-uint64_t llmsset_hash(const uint64_t a, const uint64_t b, const uint64_t seed);
+uint64_t llmsset_tabhash(uint64_t a, uint64_t b, uint64_t seed);
 
 #ifdef __cplusplus
 }
diff --git a/resources/3rdparty/sylvan/src/sylvan_tls.h b/resources/3rdparty/sylvan/src/sylvan_tls.h
index 80fdfe7e5..09958b2d3 100755
--- a/resources/3rdparty/sylvan/src/sylvan_tls.h
+++ b/resources/3rdparty/sylvan/src/sylvan_tls.h
@@ -5,7 +5,6 @@
  * A platform independant wrapper around thread-local storage. On platforms that don't support
  * __thread variables (e.g. Mac OS X), we have to use the pthreads library for thread-local storage
  */
-#include <assert.h>
 
 #ifndef TLS_H
 #define TLS_H
@@ -18,8 +17,6 @@
 
 #else//!__ELF__
 
-#include <pthread.h>
-
 #define DECLARE_THREAD_LOCAL(name, type) pthread_key_t name##_KEY
 
 #define INIT_THREAD_LOCAL(name) \
diff --git a/resources/examples/testfiles/dtmc/die.pm b/resources/examples/testfiles/dtmc/die.pm
index af0797cff..e41139001 100644
--- a/resources/examples/testfiles/dtmc/die.pm
+++ b/resources/examples/testfiles/dtmc/die.pm
@@ -26,7 +26,4 @@ endrewards
 label "one" = s=7&d=1;
 label "two" = s=7&d=2;
 label "three" = s=7&d=3;
-label "four" = s=7&d=4;
-label "five" = s=7&d=5;
-label "six" = s=7&d=6;
 label "done" = s=7;
diff --git a/resources/examples/testfiles/mdp/leader3.nm b/resources/examples/testfiles/mdp/leader3.nm
index 96339dd96..b8f365984 100644
--- a/resources/examples/testfiles/mdp/leader3.nm
+++ b/resources/examples/testfiles/mdp/leader3.nm
@@ -84,6 +84,9 @@ module process2=process1[s1=s2,p1=p2,c1=c2,sent1=sent2,receive1=receive2,p12=p23
 module process3=process1[s1=s3,p1=p3,c1=c3,sent1=sent3,receive1=receive3,p12=p31,p31=p23,c12=c31,c31=c23,p3=p2,c3=c2] endmodule
 
 //----------------------------------------------------------------------------------------------------------------------------
+rewards "rounds"
+        [c12] true : 1;
+endrewards
 
 //----------------------------------------------------------------------------------------------------------------------------
 formula leaders = (s1=4?1:0)+(s2=4?1:0)+(s3=4?1:0);
diff --git a/src/storm-cli-utilities/cli.cpp b/src/storm-cli-utilities/cli.cpp
index 66d83ece7..cb33c3205 100644
--- a/src/storm-cli-utilities/cli.cpp
+++ b/src/storm-cli-utilities/cli.cpp
@@ -1,24 +1,5 @@
 #include "cli.h"
 
-#include "storm/storage/SymbolicModelDescription.h"
-
-#include "storm/models/ModelBase.h"
-
-#include "storm/exceptions/OptionParserException.h"
-
-#include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
-
-#include "storm/models/sparse/StandardRewardModel.h"
-#include "storm/models/symbolic/StandardRewardModel.h"
-
-#include "storm/settings/SettingsManager.h"
-#include "storm/settings/modules/ResourceSettings.h"
-#include "storm/settings/modules/JitBuilderSettings.h"
-#include "storm/settings/modules/DebugSettings.h"
-#include "storm/settings/modules/IOSettings.h"
-#include "storm/settings/modules/CoreSettings.h"
-#include "storm/settings/modules/ResourceSettings.h"
-#include "storm/settings/modules/JaniExportSettings.h"
 
 #include "storm/utility/resources.h"
 #include "storm/utility/file.h"
@@ -30,7 +11,7 @@
 
 #include <type_traits>
 
-#include "storm/api/storm.h"
+#include "storm-cli-utilities/model-handling.h"
 
 
 // Includes for the linked libraries and versions header.
@@ -231,595 +212,7 @@ namespace storm {
             setLogLevel();
             setFileLogging();
         }
-        
-        struct SymbolicInput {
-            // The symbolic model description.
-            boost::optional<storm::storage::SymbolicModelDescription> model;
-            
-            // The properties to check.
-            std::vector<storm::jani::Property> properties;
-        };
-        
-        void parseSymbolicModelDescription(storm::settings::modules::IOSettings const& ioSettings, SymbolicInput& input) {
-            if (ioSettings.isPrismOrJaniInputSet()) {
-                if (ioSettings.isPrismInputSet()) {
-                    input.model = storm::api::parseProgram(ioSettings.getPrismInputFilename());
-                } else {
-                    auto janiInput = storm::api::parseJaniModel(ioSettings.getJaniInputFilename());
-                    input.model = janiInput.first;
-                    auto const& janiPropertyInput = janiInput.second;
-                    
-                    if (ioSettings.isJaniPropertiesSet()) {
-                        for (auto const& propName : ioSettings.getJaniProperties()) {
-                            auto propertyIt = janiPropertyInput.find(propName);
-                            STORM_LOG_THROW(propertyIt != janiPropertyInput.end(), storm::exceptions::InvalidArgumentException, "No JANI property with name '" << propName << "' is known.");
-                            input.properties.emplace_back(propertyIt->second);
-                        }
-                    }
-                }
-            }
-        }
-        
-        void parseProperties(storm::settings::modules::IOSettings const& ioSettings, SymbolicInput& input, boost::optional<std::set<std::string>> const& propertyFilter) {
-            if (ioSettings.isPropertySet()) {
-                std::vector<storm::jani::Property> newProperties;
-                if (input.model) {
-                    newProperties = storm::api::parsePropertiesForSymbolicModelDescription(ioSettings.getProperty(), input.model.get(), propertyFilter);
-                } else {
-                    newProperties = storm::api::parseProperties(ioSettings.getProperty(), propertyFilter);
-                }
-                
-                input.properties.insert(input.properties.end(), newProperties.begin(), newProperties.end());
-            }
-        }
-        
-        SymbolicInput parseSymbolicInput() {
-            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-
-            // Parse the property filter, if any is given.
-            boost::optional<std::set<std::string>> propertyFilter = storm::api::parsePropertyFilter(ioSettings.getPropertyFilter());
-            
-            SymbolicInput input;
-            parseSymbolicModelDescription(ioSettings, input);
-            parseProperties(ioSettings, input, propertyFilter);
-            
-            return input;
-        }
-        
-        SymbolicInput preprocessSymbolicInput(SymbolicInput const& input) {
-            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
-
-            SymbolicInput output = input;
-            
-            // Substitute constant definitions in symbolic input.
-            std::string constantDefinitionString = ioSettings.getConstantDefinitionString();
-            std::map<storm::expressions::Variable, storm::expressions::Expression> constantDefinitions;
-            if (output.model) {
-                constantDefinitions = output.model.get().parseConstantDefinitions(constantDefinitionString);
-                output.model = output.model.get().preprocess(constantDefinitions);
-            }
-            if (!output.properties.empty()) {
-                output.properties = storm::api::substituteConstantsInProperties(output.properties, constantDefinitions);
-            }
-            
-            // Check whether conversion for PRISM to JANI is requested or necessary.
-            if (input.model && input.model.get().isPrismProgram()) {
-                bool transformToJani = ioSettings.isPrismToJaniSet();
-                bool transformToJaniForJit = coreSettings.getEngine() == storm::settings::modules::CoreSettings::Engine::Sparse && ioSettings.isJitSet();
-                STORM_LOG_WARN_COND(transformToJani || !transformToJaniForJit, "The JIT-based model builder is only available for JANI models, automatically converting the PRISM input model.");
-                transformToJani |= transformToJaniForJit;
-                
-                if (transformToJani) {
-                    storm::prism::Program const& model = output.model.get().asPrismProgram();
-                    auto modelAndRenaming = model.toJaniWithLabelRenaming(true);
-                    output.model = modelAndRenaming.first;
-                    
-                    if (!modelAndRenaming.second.empty()) {
-                        std::map<std::string, std::string> const& labelRenaming = modelAndRenaming.second;
-                        std::vector<storm::jani::Property> amendedProperties;
-                        for (auto const& property : output.properties) {
-                            amendedProperties.emplace_back(property.substituteLabels(labelRenaming));
-                        }
-                        output.properties = std::move(amendedProperties);
-                    }
-                }
-            }
-            
-            return output;
-        }
-        
-        void exportSymbolicInput(SymbolicInput const& input) {
-            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-            if (input.model && input.model.get().isJaniModel()) {
-                storm::storage::SymbolicModelDescription const& model = input.model.get();
-                if (ioSettings.isExportJaniDotSet()) {
-                    storm::api::exportJaniModelAsDot(model.asJaniModel(), ioSettings.getExportJaniDotFilename());
-                }
-                
-                if (model.isJaniModel() && storm::settings::getModule<storm::settings::modules::JaniExportSettings>().isJaniFileSet()) {
-                    storm::api::exportJaniModel(model.asJaniModel(), input.properties, storm::settings::getModule<storm::settings::modules::JaniExportSettings>().getJaniFilename());
-                }
-            }
-        }
-        
-        SymbolicInput parseAndPreprocessSymbolicInput() {
-            SymbolicInput input = parseSymbolicInput();
-            input = preprocessSymbolicInput(input);
-            exportSymbolicInput(input);
-            return input;
-        }
-        
-        std::vector<std::shared_ptr<storm::logic::Formula const>> createFormulasToRespect(std::vector<storm::jani::Property> const& properties) {
-            std::vector<std::shared_ptr<storm::logic::Formula const>> result = storm::api::extractFormulasFromProperties(properties);
-            
-            for (auto const& property : properties) {
-                if (!property.getFilter().getStatesFormula()->isInitialFormula()) {
-                    result.push_back(property.getFilter().getStatesFormula());
-                }
-            }
-
-            return result;
-        }
-        
-        template <storm::dd::DdType DdType, typename ValueType>
-        std::shared_ptr<storm::models::ModelBase> buildModelDd(SymbolicInput const& input) {
-            return storm::api::buildSymbolicModel<DdType, ValueType>(input.model.get(), createFormulasToRespect(input.properties));
-        }
-
-        template <typename ValueType>
-        std::shared_ptr<storm::models::ModelBase> buildModelSparse(SymbolicInput const& input, storm::settings::modules::IOSettings const& ioSettings) {
-            auto counterexampleGeneratorSettings = storm::settings::getModule<storm::settings::modules::CounterexampleGeneratorSettings>();
-            storm::builder::BuilderOptions options(createFormulasToRespect(input.properties));
-            options.setBuildChoiceLabels(ioSettings.isBuildChoiceLabelsSet());
-            options.setBuildChoiceOrigins(counterexampleGeneratorSettings.isMinimalCommandSetGenerationSet());
-            options.setBuildAllLabels(ioSettings.isBuildFullModelSet());
-            options.setBuildAllRewardModels(ioSettings.isBuildFullModelSet());
-            if (ioSettings.isBuildFullModelSet()) {
-                options.clearTerminalStates();
-            }
-            return storm::api::buildSparseModel<ValueType>(input.model.get(), options, ioSettings.isJitSet(), storm::settings::getModule<storm::settings::modules::JitBuilderSettings>().isDoctorSet());
-        }
-
-        template <typename ValueType>
-        std::shared_ptr<storm::models::ModelBase> buildModelExplicit(storm::settings::modules::IOSettings const& ioSettings) {
-            std::shared_ptr<storm::models::ModelBase> result;
-            if (ioSettings.isExplicitSet()) {
-                result = storm::api::buildExplicitModel<ValueType>(ioSettings.getTransitionFilename(), ioSettings.getLabelingFilename(), ioSettings.isStateRewardsSet() ? boost::optional<std::string>(ioSettings.getStateRewardsFilename()) : boost::none, ioSettings.isTransitionRewardsSet() ? boost::optional<std::string>(ioSettings.getTransitionRewardsFilename()) : boost::none, ioSettings.isChoiceLabelingSet() ? boost::optional<std::string>(ioSettings.getChoiceLabelingFilename()) : boost::none);
-            } else if (ioSettings.isExplicitDRNSet()) {
-                result = storm::api::buildExplicitDRNModel<ValueType>(ioSettings.getExplicitDRNFilename());
-            } else {
-                STORM_LOG_THROW(ioSettings.isExplicitIMCASet(), storm::exceptions::InvalidSettingsException, "Unexpected explicit model input type.");
-                result = storm::api::buildExplicitIMCAModel<ValueType>(ioSettings.getExplicitIMCAFilename());
-            }
-            return result;
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        std::shared_ptr<storm::models::ModelBase> buildModel(storm::settings::modules::CoreSettings::Engine const& engine, SymbolicInput const& input, storm::settings::modules::IOSettings const& ioSettings) {
-            storm::utility::Stopwatch modelBuildingWatch(true);
-
-            std::shared_ptr<storm::models::ModelBase> result;
-            if (input.model) {
-                if (engine == storm::settings::modules::CoreSettings::Engine::Dd || engine == storm::settings::modules::CoreSettings::Engine::Hybrid) {
-                    result = buildModelDd<DdType, ValueType>(input);
-                } else if (engine == storm::settings::modules::CoreSettings::Engine::Sparse) {
-                    result = buildModelSparse<ValueType>(input, ioSettings);
-                }
-            } else if (ioSettings.isExplicitSet() || ioSettings.isExplicitDRNSet() || ioSettings.isExplicitIMCASet()) {
-                STORM_LOG_THROW(engine == storm::settings::modules::CoreSettings::Engine::Sparse, storm::exceptions::InvalidSettingsException, "Can only use sparse engine with explicit input.");
-                result = buildModelExplicit<ValueType>(ioSettings);
-            }
-            
-            modelBuildingWatch.stop();
-            if (result) {
-                STORM_PRINT_AND_LOG("Time for model construction: " << modelBuildingWatch << "." << std::endl << std::endl);
-            }
-
-            return result;
-        }
-        
-        template <typename ValueType>
-        std::shared_ptr<storm::models::sparse::Model<ValueType>> preprocessSparseMarkovAutomaton(std::shared_ptr<storm::models::sparse::MarkovAutomaton<ValueType>> const& model) {
-            std::shared_ptr<storm::models::sparse::Model<ValueType>> result = model;
-            model->close();
-            if (model->hasOnlyTrivialNondeterminism()) {
-                result = model->convertToCTMC();
-            }
-            return result;
-        }
 
-        template <typename ValueType>
-        std::shared_ptr<storm::models::sparse::Model<ValueType>> preprocessSparseModelBisimulation(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input, storm::settings::modules::BisimulationSettings const& bisimulationSettings) {
-            storm::storage::BisimulationType bisimType = storm::storage::BisimulationType::Strong;
-            if (bisimulationSettings.isWeakBisimulationSet()) {
-                bisimType = storm::storage::BisimulationType::Weak;
-            }
-            
-            STORM_LOG_INFO("Performing bisimulation minimization...");
-            return storm::api::performBisimulationMinimization<ValueType>(model, createFormulasToRespect(input.properties), bisimType);
-        }
-        
-        template <typename ValueType>
-        std::pair<std::shared_ptr<storm::models::sparse::Model<ValueType>>, bool> preprocessSparseModel(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input) {
-            auto generalSettings = storm::settings::getModule<storm::settings::modules::GeneralSettings>();
-            auto bisimulationSettings = storm::settings::getModule<storm::settings::modules::BisimulationSettings>();
-            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-            
-            std::pair<std::shared_ptr<storm::models::sparse::Model<ValueType>>, bool> result = std::make_pair(model, false);
-            
-            if (result.first->isOfType(storm::models::ModelType::MarkovAutomaton)) {
-                result.first = preprocessSparseMarkovAutomaton(result.first->template as<storm::models::sparse::MarkovAutomaton<ValueType>>());
-                result.second = true;
-            }
-            
-            if (generalSettings.isBisimulationSet()) {
-                result.first = preprocessSparseModelBisimulation(result.first, input, bisimulationSettings);
-                result.second = true;
-            }
-            
-            return result;
-        }
-        
-        template <typename ValueType>
-        void exportSparseModel(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input) {
-            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-            
-            if (ioSettings.isExportExplicitSet()) {
-                storm::api::exportSparseModelAsDrn(model, ioSettings.getExportExplicitFilename(), input.model ? input.model.get().getParameterNames() : std::vector<std::string>());
-            }
-            
-            if (ioSettings.isExportDotSet()) {
-                storm::api::exportSparseModelAsDot(model, ioSettings.getExportDotFilename());
-            }
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        void exportDdModel(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, SymbolicInput const& input) {
-            // Intentionally left empty.
-        }
-        
-        template <storm::dd::DdType DdType, typename ValueType>
-        void exportModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            if (model->isSparseModel()) {
-                exportSparseModel<ValueType>(model->as<storm::models::sparse::Model<ValueType>>(), input);
-            } else {
-                exportDdModel<DdType, ValueType>(model->as<storm::models::symbolic::Model<DdType, ValueType>>(), input);
-            }
-        }
-        
-        template <storm::dd::DdType DdType, typename ValueType>
-        std::pair<std::shared_ptr<storm::models::ModelBase>, bool> preprocessDdModel(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, SymbolicInput const& input) {
-            return std::make_pair(model, false);
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        std::pair<std::shared_ptr<storm::models::ModelBase>, bool> preprocessModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            storm::utility::Stopwatch preprocessingWatch(true);
-            
-            std::pair<std::shared_ptr<storm::models::ModelBase>, bool> result = std::make_pair(model, false);
-            if (model->isSparseModel()) {
-                result = preprocessSparseModel<ValueType>(result.first->as<storm::models::sparse::Model<ValueType>>(), input);
-            } else {
-                STORM_LOG_ASSERT(model->isSymbolicModel(), "Unexpected model type.");
-                result = preprocessDdModel<DdType, ValueType>(result.first->as<storm::models::symbolic::Model<DdType, ValueType>>(), input);
-            }
-            
-            if (result.second) {
-                STORM_PRINT_AND_LOG(std::endl << "Time for model preprocessing: " << preprocessingWatch << "." << std::endl << std::endl);
-            }
-            return result;
-        }
-        
-        void printComputingCounterexample(storm::jani::Property const& property) {
-            STORM_PRINT_AND_LOG("Computing counterexample for property " << *property.getRawFormula() << " ..." << std::endl);
-        }
-        
-        void printCounterexample(std::shared_ptr<storm::counterexamples::Counterexample> const& counterexample, storm::utility::Stopwatch* watch = nullptr) {
-            if (counterexample) {
-                STORM_PRINT_AND_LOG(*counterexample << std::endl);
-                if (watch) {
-                    STORM_PRINT_AND_LOG("Time for computation: " << *watch << "." << std::endl);
-                }
-            } else {
-                STORM_PRINT_AND_LOG(" failed." << std::endl);
-            }
-        }
-
-        template <typename ValueType>
-        void generateCounterexamples(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Counterexample generation is not supported for this data-type.");
-        }
-        
-        template <>
-        void generateCounterexamples<double>(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            typedef double ValueType;
-            
-            STORM_LOG_THROW(model->isSparseModel(), storm::exceptions::NotSupportedException, "Counterexample generation is currently only supported for sparse models.");
-            auto sparseModel = model->as<storm::models::sparse::Model<ValueType>>();
-            
-            STORM_LOG_THROW(sparseModel->isOfType(storm::models::ModelType::Mdp), storm::exceptions::NotSupportedException, "Counterexample is currently only supported for MDPs.");
-            auto mdp = sparseModel->template as<storm::models::sparse::Mdp<ValueType>>();
-            
-            auto counterexampleSettings = storm::settings::getModule<storm::settings::modules::CounterexampleGeneratorSettings>();
-            if (counterexampleSettings.isMinimalCommandSetGenerationSet()) {
-                STORM_LOG_THROW(input.model && input.model.get().isPrismProgram(), storm::exceptions::NotSupportedException, "Minimal command set counterexamples are only supported for PRISM model input.");
-                storm::prism::Program const& program = input.model.get().asPrismProgram();
-
-                bool useMilp = counterexampleSettings.isUseMilpBasedMinimalCommandSetGenerationSet();
-                for (auto const& property : input.properties) {
-                    std::shared_ptr<storm::counterexamples::Counterexample> counterexample;
-                    printComputingCounterexample(property);
-                    storm::utility::Stopwatch watch(true);
-                    if (useMilp) {
-                        counterexample = storm::api::computePrismHighLevelCounterexampleMilp(program, mdp, property.getRawFormula());
-                    } else {
-                        counterexample = storm::api::computePrismHighLevelCounterexampleMaxSmt(program, mdp, property.getRawFormula());
-                    }
-                    watch.stop();
-                    printCounterexample(counterexample, &watch);
-                }
-            } else {
-                STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The selected counterexample formalism is unsupported.");
-            }
-        }
-        
-        template<typename ValueType>
-        void printFilteredResult(std::unique_ptr<storm::modelchecker::CheckResult> const& result, storm::modelchecker::FilterType ft) {
-            if (result->isQuantitative()) {
-                switch (ft) {
-                    case storm::modelchecker::FilterType::VALUES:
-                        STORM_PRINT_AND_LOG(*result);
-                        break;
-                    case storm::modelchecker::FilterType::SUM:
-                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().sum());
-                        break;
-                    case storm::modelchecker::FilterType::AVG:
-                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().average());
-                        break;
-                    case storm::modelchecker::FilterType::MIN:
-                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().getMin());
-                        break;
-                    case storm::modelchecker::FilterType::MAX:
-                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().getMax());
-                        break;
-                    case storm::modelchecker::FilterType::ARGMIN:
-                    case storm::modelchecker::FilterType::ARGMAX:
-                        STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Outputting states is not supported.");
-                    case storm::modelchecker::FilterType::EXISTS:
-                    case storm::modelchecker::FilterType::FORALL:
-                    case storm::modelchecker::FilterType::COUNT:
-                        STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Filter type only defined for qualitative results.");
-                }
-            } else {
-                switch (ft) {
-                    case storm::modelchecker::FilterType::VALUES:
-                        STORM_PRINT_AND_LOG(*result << std::endl);
-                        break;
-                    case storm::modelchecker::FilterType::EXISTS:
-                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().existsTrue());
-                        break;
-                    case storm::modelchecker::FilterType::FORALL:
-                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().forallTrue());
-                        break;
-                    case storm::modelchecker::FilterType::COUNT:
-                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().count());
-                        break;
-                    case storm::modelchecker::FilterType::ARGMIN:
-                    case storm::modelchecker::FilterType::ARGMAX:
-                        STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Outputting states is not supported.");
-                    case storm::modelchecker::FilterType::SUM:
-                    case storm::modelchecker::FilterType::AVG:
-                    case storm::modelchecker::FilterType::MIN:
-                    case storm::modelchecker::FilterType::MAX:
-                        STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Filter type only defined for quantitative results.");
-                }
-            }
-            STORM_PRINT_AND_LOG(std::endl);
-        }
-        
-        void printModelCheckingProperty(storm::jani::Property const& property) {
-            STORM_PRINT_AND_LOG(std::endl << "Model checking property " << *property.getRawFormula() << " ..." << std::endl);
-        }
-
-        template<typename ValueType>
-        void printResult(std::unique_ptr<storm::modelchecker::CheckResult> const& result, storm::jani::Property const& property, storm::utility::Stopwatch* watch = nullptr) {
-            if (result) {
-                std::stringstream ss;
-                ss << "'" << *property.getFilter().getStatesFormula() << "'";
-                STORM_PRINT_AND_LOG("Result (for " << (property.getFilter().getStatesFormula()->isInitialFormula() ? "initial" : ss.str()) << " states): ");
-                printFilteredResult<ValueType>(result, property.getFilter().getFilterType());
-                if (watch) {
-                    STORM_PRINT_AND_LOG("Time for model checking: " << *watch << "." << std::endl);
-                }
-            } else {
-                STORM_PRINT_AND_LOG(" failed, property is unsupported by selected engine/settings." << std::endl);
-            }
-        }
-        
-        struct PostprocessingIdentity {
-            void operator()(std::unique_ptr<storm::modelchecker::CheckResult> const&) {
-                // Intentionally left empty.
-            }
-        };
-        
-        template<typename ValueType>
-        void verifyProperties(std::vector<storm::jani::Property> const& properties, std::function<std::unique_ptr<storm::modelchecker::CheckResult>(std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states)> const& verificationCallback, std::function<void(std::unique_ptr<storm::modelchecker::CheckResult> const&)> const& postprocessingCallback = PostprocessingIdentity()) {
-            for (auto const& property : properties) {
-                printModelCheckingProperty(property);
-                storm::utility::Stopwatch watch(true);
-                std::unique_ptr<storm::modelchecker::CheckResult> result = verificationCallback(property.getRawFormula(), property.getFilter().getStatesFormula());
-                watch.stop();
-                postprocessingCallback(result);
-                printResult<ValueType>(result, property, &watch);
-            }
-        }
-        
-        template <storm::dd::DdType DdType, typename ValueType>
-        void verifyWithAbstractionRefinementEngine(SymbolicInput const& input) {
-            STORM_LOG_ASSERT(input.model, "Expected symbolic model description.");
-            verifyProperties<ValueType>(input.properties, [&input] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
-                STORM_LOG_THROW(states->isInitialFormula(), storm::exceptions::NotSupportedException, "Abstraction-refinement can only filter initial states.");
-                return storm::api::verifyWithAbstractionRefinementEngine<DdType, ValueType>(input.model.get(), storm::api::createTask<ValueType>(formula, true));
-            });
-        }
-
-        template <typename ValueType>
-        void verifyWithExplorationEngine(SymbolicInput const& input) {
-            STORM_LOG_ASSERT(input.model, "Expected symbolic model description.");
-            STORM_LOG_THROW((std::is_same<ValueType, double>::value), storm::exceptions::NotSupportedException, "Exploration does not support other data-types than floating points.");
-            verifyProperties<ValueType>(input.properties, [&input] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
-                STORM_LOG_THROW(states->isInitialFormula(), storm::exceptions::NotSupportedException, "Exploration can only filter initial states.");
-                return storm::api::verifyWithExplorationEngine<ValueType>(input.model.get(), storm::api::createTask<ValueType>(formula, true));
-            });
-        }
-        
-        template <typename ValueType>
-        void verifyWithSparseEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            auto sparseModel = model->as<storm::models::sparse::Model<ValueType>>();
-            verifyProperties<ValueType>(input.properties,
-                                        [&sparseModel] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
-                                            bool filterForInitialStates = states->isInitialFormula();
-                                            auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
-                                            std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithSparseEngine<ValueType>(sparseModel, task);
-                                            
-                                            std::unique_ptr<storm::modelchecker::CheckResult> filter;
-                                            if (filterForInitialStates) {
-                                                filter = std::make_unique<storm::modelchecker::ExplicitQualitativeCheckResult>(sparseModel->getInitialStates());
-                                            } else {
-                                                filter = storm::api::verifyWithSparseEngine<ValueType>(sparseModel, storm::api::createTask<ValueType>(states, false));
-                                            }
-                                            if (result && filter) {
-                                                result->filter(filter->asQualitativeCheckResult());
-                                            }
-                                            return result;
-                                        });
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        void verifyWithHybridEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            verifyProperties<ValueType>(input.properties, [&model] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
-                bool filterForInitialStates = states->isInitialFormula();
-                auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
-                
-                auto symbolicModel = model->as<storm::models::symbolic::Model<DdType, ValueType>>();
-                std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithHybridEngine<DdType, ValueType>(symbolicModel, task);
-                
-                std::unique_ptr<storm::modelchecker::CheckResult> filter;
-                if (filterForInitialStates) {
-                    filter = std::make_unique<storm::modelchecker::SymbolicQualitativeCheckResult<DdType>>(symbolicModel->getReachableStates(), symbolicModel->getInitialStates());
-                } else {
-                    filter = storm::api::verifyWithHybridEngine<DdType, ValueType>(symbolicModel, storm::api::createTask<ValueType>(states, false));
-                }
-                if (result && filter) {
-                    result->filter(filter->asQualitativeCheckResult());
-                }
-                return result;
-            });
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        void verifyWithDdEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
-            verifyProperties<ValueType>(input.properties, [&model] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
-                bool filterForInitialStates = states->isInitialFormula();
-                auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
-
-                auto symbolicModel = model->as<storm::models::symbolic::Model<DdType, ValueType>>();
-                std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithDdEngine<DdType, ValueType>(model->as<storm::models::symbolic::Model<DdType, ValueType>>(), storm::api::createTask<ValueType>(formula, true));
-
-                std::unique_ptr<storm::modelchecker::CheckResult> filter;
-                if (filterForInitialStates) {
-                    filter = std::make_unique<storm::modelchecker::SymbolicQualitativeCheckResult<DdType>>(symbolicModel->getReachableStates(), symbolicModel->getInitialStates());
-                } else {
-                    filter = storm::api::verifyWithDdEngine<DdType, ValueType>(symbolicModel, storm::api::createTask<ValueType>(states, false));
-                }
-                if (result && filter) {
-                    result->filter(filter->asQualitativeCheckResult());
-                }
-                return result;
-            });
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        typename std::enable_if<DdType != storm::dd::DdType::CUDD || std::is_same<ValueType, double>::value, void>::type verifySymbolicModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
-            bool hybrid = coreSettings.getEngine() == storm::settings::modules::CoreSettings::Engine::Hybrid;
-            if (hybrid) {
-                verifyWithHybridEngine<DdType, ValueType>(model, input);
-            } else {
-                verifyWithDdEngine<DdType, ValueType>(model, input);
-            }
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        typename std::enable_if<DdType == storm::dd::DdType::CUDD && !std::is_same<ValueType, double>::value, void>::type verifySymbolicModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
-            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "CUDD does not support the selected data-type.");
-        }
-
-        template <storm::dd::DdType DdType, typename ValueType>
-        void verifyModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
-            if (model->isSparseModel()) {
-                verifyWithSparseEngine<ValueType>(model, input);
-            } else {
-                STORM_LOG_ASSERT(model->isSymbolicModel(), "Unexpected model type.");
-                verifySymbolicModel<DdType, ValueType>(model, input, coreSettings);
-            }
-        }
-        
-        template <storm::dd::DdType DdType, typename ValueType>
-        void processInputWithValueTypeAndDdlib(SymbolicInput const& input) {
-            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
-            
-            // For several engines, no model building step is performed, but the verification is started right away.
-            storm::settings::modules::CoreSettings::Engine engine = coreSettings.getEngine();
-            if (engine == storm::settings::modules::CoreSettings::Engine::AbstractionRefinement) {
-                verifyWithAbstractionRefinementEngine<DdType, ValueType>(input);
-            } else if (engine == storm::settings::modules::CoreSettings::Engine::Exploration) {
-                verifyWithExplorationEngine<ValueType>(input);
-            } else {
-                auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
-                
-                std::shared_ptr<storm::models::ModelBase> model;
-                if (!ioSettings.isNoBuildModelSet()) {
-                    model = buildModel<DdType, ValueType>(engine, input, ioSettings);
-                }
-                
-                if (model) {
-                    model->printModelInformationToStream(std::cout);
-                }
-                
-                STORM_LOG_THROW(model || input.properties.empty(), storm::exceptions::InvalidSettingsException, "No input model.");
-                
-                if (model) {
-                    auto preprocessingResult = preprocessModel<DdType, ValueType>(model, input);
-                    if (preprocessingResult.second) {
-                        model = preprocessingResult.first;
-                        model->printModelInformationToStream(std::cout);
-                    }
-                }
-                
-                if (model) {
-                    exportModel<DdType, ValueType>(model, input);
-                    
-                    if (coreSettings.isCounterexampleSet()) {
-                        generateCounterexamples<ValueType>(model, input);
-                    } else {
-                        verifyModel<DdType, ValueType>(model, input, coreSettings);
-                    }
-                }
-            }
-        }
-        
-        template <typename ValueType>
-        void processInputWithValueType(SymbolicInput const& input) {
-            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
-            
-            if (coreSettings.getDdLibraryType() == storm::dd::DdType::CUDD) {
-                processInputWithValueTypeAndDdlib<storm::dd::DdType::CUDD, ValueType>(input);
-            } else {
-                STORM_LOG_ASSERT(coreSettings.getDdLibraryType() == storm::dd::DdType::Sylvan, "Unknown DD library.");
-                processInputWithValueTypeAndDdlib<storm::dd::DdType::Sylvan, ValueType>(input);
-            }
-        }
         
         void processOptions() {
             // Start by setting some urgent options (log levels, resources, etc.)
diff --git a/src/storm-cli-utilities/model-handling.h b/src/storm-cli-utilities/model-handling.h
new file mode 100644
index 000000000..588d0d330
--- /dev/null
+++ b/src/storm-cli-utilities/model-handling.h
@@ -0,0 +1,657 @@
+#pragma once
+
+#include "storm/api/storm.h"
+
+#include "storm/utility/resources.h"
+#include "storm/utility/file.h"
+#include "storm/utility/storm-version.h"
+#include "storm/utility/macros.h"
+
+#include "storm/utility/initialize.h"
+#include "storm/utility/Stopwatch.h"
+
+#include <type_traits>
+
+
+#include "storm/storage/SymbolicModelDescription.h"
+
+#include "storm/models/ModelBase.h"
+
+#include "storm/exceptions/OptionParserException.h"
+
+#include "storm/modelchecker/results/SymbolicQualitativeCheckResult.h"
+
+#include "storm/models/sparse/StandardRewardModel.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/settings/SettingsManager.h"
+#include "storm/settings/modules/ResourceSettings.h"
+#include "storm/settings/modules/JitBuilderSettings.h"
+#include "storm/settings/modules/DebugSettings.h"
+#include "storm/settings/modules/IOSettings.h"
+#include "storm/settings/modules/CoreSettings.h"
+#include "storm/settings/modules/ResourceSettings.h"
+#include "storm/settings/modules/JaniExportSettings.h"
+
+#include "storm/utility/Stopwatch.h"
+
+namespace storm {
+    namespace cli {
+
+
+        struct SymbolicInput {
+            // The symbolic model description.
+            boost::optional<storm::storage::SymbolicModelDescription> model;
+
+            // The properties to check.
+            std::vector<storm::jani::Property> properties;
+        };
+
+        void parseSymbolicModelDescription(storm::settings::modules::IOSettings const& ioSettings, SymbolicInput& input) {
+            if (ioSettings.isPrismOrJaniInputSet()) {
+                if (ioSettings.isPrismInputSet()) {
+                    input.model = storm::api::parseProgram(ioSettings.getPrismInputFilename());
+                } else {
+                    auto janiInput = storm::api::parseJaniModel(ioSettings.getJaniInputFilename());
+                    input.model = janiInput.first;
+                    auto const& janiPropertyInput = janiInput.second;
+
+                    if (ioSettings.isJaniPropertiesSet()) {
+                        for (auto const& propName : ioSettings.getJaniProperties()) {
+                            auto propertyIt = janiPropertyInput.find(propName);
+                            STORM_LOG_THROW(propertyIt != janiPropertyInput.end(), storm::exceptions::InvalidArgumentException, "No JANI property with name '" << propName << "' is known.");
+                            input.properties.emplace_back(propertyIt->second);
+                        }
+                    }
+                }
+            }
+        }
+
+        void parseProperties(storm::settings::modules::IOSettings const& ioSettings, SymbolicInput& input, boost::optional<std::set<std::string>> const& propertyFilter) {
+            if (ioSettings.isPropertySet()) {
+                std::vector<storm::jani::Property> newProperties;
+                if (input.model) {
+                    newProperties = storm::api::parsePropertiesForSymbolicModelDescription(ioSettings.getProperty(), input.model.get(), propertyFilter);
+                } else {
+                    newProperties = storm::api::parseProperties(ioSettings.getProperty(), propertyFilter);
+                }
+
+                input.properties.insert(input.properties.end(), newProperties.begin(), newProperties.end());
+            }
+        }
+
+        SymbolicInput parseSymbolicInput() {
+            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+
+            // Parse the property filter, if any is given.
+            boost::optional<std::set<std::string>> propertyFilter = storm::api::parsePropertyFilter(ioSettings.getPropertyFilter());
+
+            SymbolicInput input;
+            parseSymbolicModelDescription(ioSettings, input);
+            parseProperties(ioSettings, input, propertyFilter);
+
+            return input;
+        }
+
+        SymbolicInput preprocessSymbolicInput(SymbolicInput const& input) {
+            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
+
+            SymbolicInput output = input;
+
+            // Substitute constant definitions in symbolic input.
+            std::string constantDefinitionString = ioSettings.getConstantDefinitionString();
+            std::map<storm::expressions::Variable, storm::expressions::Expression> constantDefinitions;
+            if (output.model) {
+                constantDefinitions = output.model.get().parseConstantDefinitions(constantDefinitionString);
+                output.model = output.model.get().preprocess(constantDefinitions);
+            }
+            if (!output.properties.empty()) {
+                output.properties = storm::api::substituteConstantsInProperties(output.properties, constantDefinitions);
+            }
+
+            // Check whether conversion for PRISM to JANI is requested or necessary.
+            if (input.model && input.model.get().isPrismProgram()) {
+                bool transformToJani = ioSettings.isPrismToJaniSet();
+                bool transformToJaniForJit = coreSettings.getEngine() == storm::settings::modules::CoreSettings::Engine::Sparse && ioSettings.isJitSet();
+                STORM_LOG_WARN_COND(transformToJani || !transformToJaniForJit, "The JIT-based model builder is only available for JANI models, automatically converting the PRISM input model.");
+                transformToJani |= transformToJaniForJit;
+
+                if (transformToJani) {
+                    storm::prism::Program const& model = output.model.get().asPrismProgram();
+                    auto modelAndRenaming = model.toJaniWithLabelRenaming(true);
+                    output.model = modelAndRenaming.first;
+
+                    if (!modelAndRenaming.second.empty()) {
+                        std::map<std::string, std::string> const& labelRenaming = modelAndRenaming.second;
+                        std::vector<storm::jani::Property> amendedProperties;
+                        for (auto const& property : output.properties) {
+                            amendedProperties.emplace_back(property.substituteLabels(labelRenaming));
+                        }
+                        output.properties = std::move(amendedProperties);
+                    }
+                }
+            }
+
+            return output;
+        }
+
+        void exportSymbolicInput(SymbolicInput const& input) {
+            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+            if (input.model && input.model.get().isJaniModel()) {
+                storm::storage::SymbolicModelDescription const& model = input.model.get();
+                if (ioSettings.isExportJaniDotSet()) {
+                    storm::api::exportJaniModelAsDot(model.asJaniModel(), ioSettings.getExportJaniDotFilename());
+                }
+
+                if (model.isJaniModel() && storm::settings::getModule<storm::settings::modules::JaniExportSettings>().isJaniFileSet()) {
+                    storm::api::exportJaniModel(model.asJaniModel(), input.properties, storm::settings::getModule<storm::settings::modules::JaniExportSettings>().getJaniFilename());
+                }
+            }
+        }
+
+        SymbolicInput parseAndPreprocessSymbolicInput() {
+            SymbolicInput input = parseSymbolicInput();
+            input = preprocessSymbolicInput(input);
+            exportSymbolicInput(input);
+            return input;
+        }
+
+        std::vector<std::shared_ptr<storm::logic::Formula const>> createFormulasToRespect(std::vector<storm::jani::Property> const& properties) {
+            std::vector<std::shared_ptr<storm::logic::Formula const>> result = storm::api::extractFormulasFromProperties(properties);
+
+            for (auto const& property : properties) {
+                if (!property.getFilter().getStatesFormula()->isInitialFormula()) {
+                    result.push_back(property.getFilter().getStatesFormula());
+                }
+            }
+
+            return result;
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::shared_ptr<storm::models::ModelBase> buildModelDd(SymbolicInput const& input) {
+            return storm::api::buildSymbolicModel<DdType, ValueType>(input.model.get(), createFormulasToRespect(input.properties), storm::settings::getModule<storm::settings::modules::IOSettings>().isBuildFullModelSet());
+        }
+
+        template <typename ValueType>
+        std::shared_ptr<storm::models::ModelBase> buildModelSparse(SymbolicInput const& input, storm::settings::modules::IOSettings const& ioSettings) {
+            auto counterexampleGeneratorSettings = storm::settings::getModule<storm::settings::modules::CounterexampleGeneratorSettings>();
+            storm::builder::BuilderOptions options(createFormulasToRespect(input.properties));
+            options.setBuildChoiceLabels(ioSettings.isBuildChoiceLabelsSet());
+            options.setBuildChoiceOrigins(counterexampleGeneratorSettings.isMinimalCommandSetGenerationSet());
+            options.setBuildAllLabels(ioSettings.isBuildFullModelSet());
+            options.setBuildAllRewardModels(ioSettings.isBuildFullModelSet());
+            if (ioSettings.isBuildFullModelSet()) {
+                options.clearTerminalStates();
+            }
+            return storm::api::buildSparseModel<ValueType>(input.model.get(), options, ioSettings.isJitSet(), storm::settings::getModule<storm::settings::modules::JitBuilderSettings>().isDoctorSet());
+        }
+
+        template <typename ValueType>
+        std::shared_ptr<storm::models::ModelBase> buildModelExplicit(storm::settings::modules::IOSettings const& ioSettings) {
+            std::shared_ptr<storm::models::ModelBase> result;
+            if (ioSettings.isExplicitSet()) {
+                result = storm::api::buildExplicitModel<ValueType>(ioSettings.getTransitionFilename(), ioSettings.getLabelingFilename(), ioSettings.isStateRewardsSet() ? boost::optional<std::string>(ioSettings.getStateRewardsFilename()) : boost::none, ioSettings.isTransitionRewardsSet() ? boost::optional<std::string>(ioSettings.getTransitionRewardsFilename()) : boost::none, ioSettings.isChoiceLabelingSet() ? boost::optional<std::string>(ioSettings.getChoiceLabelingFilename()) : boost::none);
+            } else if (ioSettings.isExplicitDRNSet()) {
+                result = storm::api::buildExplicitDRNModel<ValueType>(ioSettings.getExplicitDRNFilename());
+            } else {
+                STORM_LOG_THROW(ioSettings.isExplicitIMCASet(), storm::exceptions::InvalidSettingsException, "Unexpected explicit model input type.");
+                result = storm::api::buildExplicitIMCAModel<ValueType>(ioSettings.getExplicitIMCAFilename());
+            }
+            return result;
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::shared_ptr<storm::models::ModelBase> buildModel(storm::settings::modules::CoreSettings::Engine const& engine, SymbolicInput const& input, storm::settings::modules::IOSettings const& ioSettings) {
+            storm::utility::Stopwatch modelBuildingWatch(true);
+
+            std::shared_ptr<storm::models::ModelBase> result;
+            if (input.model) {
+                if (engine == storm::settings::modules::CoreSettings::Engine::Dd || engine == storm::settings::modules::CoreSettings::Engine::Hybrid) {
+                    result = buildModelDd<DdType, ValueType>(input);
+                } else if (engine == storm::settings::modules::CoreSettings::Engine::Sparse) {
+                    result = buildModelSparse<ValueType>(input, ioSettings);
+                }
+            } else if (ioSettings.isExplicitSet() || ioSettings.isExplicitDRNSet() || ioSettings.isExplicitIMCASet()) {
+                STORM_LOG_THROW(engine == storm::settings::modules::CoreSettings::Engine::Sparse, storm::exceptions::InvalidSettingsException, "Can only use sparse engine with explicit input.");
+                result = buildModelExplicit<ValueType>(ioSettings);
+            }
+
+            modelBuildingWatch.stop();
+            if (result) {
+                STORM_PRINT_AND_LOG("Time for model construction: " << modelBuildingWatch << "." << std::endl << std::endl);
+            }
+
+            return result;
+        }
+
+        template <typename ValueType>
+        std::shared_ptr<storm::models::sparse::Model<ValueType>> preprocessSparseMarkovAutomaton(std::shared_ptr<storm::models::sparse::MarkovAutomaton<ValueType>> const& model) {
+            std::shared_ptr<storm::models::sparse::Model<ValueType>> result = model;
+            model->close();
+            if (model->hasOnlyTrivialNondeterminism()) {
+                result = model->convertToCTMC();
+            }
+            return result;
+        }
+
+        template <typename ValueType>
+        std::shared_ptr<storm::models::sparse::Model<ValueType>> preprocessSparseModelBisimulation(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input, storm::settings::modules::BisimulationSettings const& bisimulationSettings) {
+            storm::storage::BisimulationType bisimType = storm::storage::BisimulationType::Strong;
+            if (bisimulationSettings.isWeakBisimulationSet()) {
+                bisimType = storm::storage::BisimulationType::Weak;
+            }
+
+            STORM_LOG_INFO("Performing bisimulation minimization...");
+            return storm::api::performBisimulationMinimization<ValueType>(model, createFormulasToRespect(input.properties), bisimType);
+        }
+
+        template <typename ValueType>
+        std::pair<std::shared_ptr<storm::models::sparse::Model<ValueType>>, bool> preprocessSparseModel(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input) {
+        auto generalSettings = storm::settings::getModule<storm::settings::modules::GeneralSettings>();
+        auto bisimulationSettings = storm::settings::getModule<storm::settings::modules::BisimulationSettings>();
+        auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+
+        std::pair<std::shared_ptr<storm::models::sparse::Model<ValueType>>, bool> result = std::make_pair(model, false);
+
+        if (result.first->isOfType(storm::models::ModelType::MarkovAutomaton)) {
+        result.first = preprocessSparseMarkovAutomaton(result.first->template as<storm::models::sparse::MarkovAutomaton<ValueType>>());
+        result.second = true;
+        }
+
+        if (generalSettings.isBisimulationSet()) {
+        result.first = preprocessSparseModelBisimulation(result.first, input, bisimulationSettings);
+        result.second = true;
+        }
+
+        return result;
+        }
+
+        template <typename ValueType>
+        void exportSparseModel(std::shared_ptr<storm::models::sparse::Model<ValueType>> const& model, SymbolicInput const& input) {
+            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+
+            if (ioSettings.isExportExplicitSet()) {
+                storm::api::exportSparseModelAsDrn(model, ioSettings.getExportExplicitFilename(), input.model ? input.model.get().getParameterNames() : std::vector<std::string>());
+            }
+
+            if (ioSettings.isExportDotSet()) {
+                storm::api::exportSparseModelAsDot(model, ioSettings.getExportDotFilename());
+            }
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void exportDdModel(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, SymbolicInput const& input) {
+            // Intentionally left empty.
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void exportModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            if (model->isSparseModel()) {
+                exportSparseModel<ValueType>(model->as<storm::models::sparse::Model<ValueType>>(), input);
+            } else {
+                exportDdModel<DdType, ValueType>(model->as<storm::models::symbolic::Model<DdType, ValueType>>(), input);
+            }
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::shared_ptr<storm::models::Model<ValueType>> preprocessDdModelBisimulation(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, SymbolicInput const& input, storm::settings::modules::BisimulationSettings const& bisimulationSettings) {
+            STORM_LOG_WARN_COND(!bisimulationSettings.isWeakBisimulationSet(), "Weak bisimulation is currently not supported on DDs. Falling back to strong bisimulation.");
+            
+            STORM_LOG_INFO("Performing bisimulation minimization...");
+            return storm::api::performBisimulationMinimization<DdType, ValueType>(model, createFormulasToRespect(input.properties), storm::storage::BisimulationType::Strong, bisimulationSettings.getSignatureMode());
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::pair<std::shared_ptr<storm::models::ModelBase>, bool> preprocessDdModel(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, SymbolicInput const& input) {
+            auto bisimulationSettings = storm::settings::getModule<storm::settings::modules::BisimulationSettings>();
+            auto generalSettings = storm::settings::getModule<storm::settings::modules::GeneralSettings>();
+            std::pair<std::shared_ptr<storm::models::Model<ValueType>>, bool> result = std::make_pair(model, false);
+            
+            if (generalSettings.isBisimulationSet()) {
+                result.first = preprocessDdModelBisimulation(model, input, bisimulationSettings);
+                result.second = true;
+            }
+            
+            return result;
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::pair<std::shared_ptr<storm::models::ModelBase>, bool> preprocessModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            storm::utility::Stopwatch preprocessingWatch(true);
+
+            std::pair<std::shared_ptr<storm::models::ModelBase>, bool> result = std::make_pair(model, false);
+            if (model->isSparseModel()) {
+                result = preprocessSparseModel<ValueType>(result.first->as<storm::models::sparse::Model<ValueType>>(), input);
+            } else {
+                STORM_LOG_ASSERT(model->isSymbolicModel(), "Unexpected model type.");
+                result = preprocessDdModel<DdType, ValueType>(result.first->as<storm::models::symbolic::Model<DdType, ValueType>>(), input);
+            }
+            
+            preprocessingWatch.stop();
+
+            if (result.second) {
+                STORM_PRINT_AND_LOG(std::endl << "Time for model preprocessing: " << preprocessingWatch << "." << std::endl << std::endl);
+            }
+            return result;
+        }
+
+        void printComputingCounterexample(storm::jani::Property const& property) {
+            STORM_PRINT_AND_LOG("Computing counterexample for property " << *property.getRawFormula() << " ..." << std::endl);
+        }
+
+        void printCounterexample(std::shared_ptr<storm::counterexamples::Counterexample> const& counterexample, storm::utility::Stopwatch* watch = nullptr) {
+            if (counterexample) {
+                STORM_PRINT_AND_LOG(*counterexample << std::endl);
+                if (watch) {
+                    STORM_PRINT_AND_LOG("Time for computation: " << *watch << "." << std::endl);
+                }
+            } else {
+                STORM_PRINT_AND_LOG(" failed." << std::endl);
+            }
+        }
+
+        template <typename ValueType>
+        void generateCounterexamples(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Counterexample generation is not supported for this data-type.");
+        }
+
+        template <>
+        void generateCounterexamples<double>(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            typedef double ValueType;
+
+            STORM_LOG_THROW(model->isSparseModel(), storm::exceptions::NotSupportedException, "Counterexample generation is currently only supported for sparse models.");
+            auto sparseModel = model->as<storm::models::sparse::Model<ValueType>>();
+
+            STORM_LOG_THROW(sparseModel->isOfType(storm::models::ModelType::Mdp), storm::exceptions::NotSupportedException, "Counterexample is currently only supported for MDPs.");
+            auto mdp = sparseModel->template as<storm::models::sparse::Mdp<ValueType>>();
+
+            auto counterexampleSettings = storm::settings::getModule<storm::settings::modules::CounterexampleGeneratorSettings>();
+            if (counterexampleSettings.isMinimalCommandSetGenerationSet()) {
+                STORM_LOG_THROW(input.model && input.model.get().isPrismProgram(), storm::exceptions::NotSupportedException, "Minimal command set counterexamples are only supported for PRISM model input.");
+                storm::prism::Program const& program = input.model.get().asPrismProgram();
+
+                bool useMilp = counterexampleSettings.isUseMilpBasedMinimalCommandSetGenerationSet();
+                for (auto const& property : input.properties) {
+                    std::shared_ptr<storm::counterexamples::Counterexample> counterexample;
+                    printComputingCounterexample(property);
+                    storm::utility::Stopwatch watch(true);
+                    if (useMilp) {
+                        counterexample = storm::api::computePrismHighLevelCounterexampleMilp(program, mdp, property.getRawFormula());
+                    } else {
+                        counterexample = storm::api::computePrismHighLevelCounterexampleMaxSmt(program, mdp, property.getRawFormula());
+                    }
+                    watch.stop();
+                    printCounterexample(counterexample, &watch);
+                }
+            } else {
+                STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "The selected counterexample formalism is unsupported.");
+            }
+        }
+
+        template<typename ValueType>
+        void printFilteredResult(std::unique_ptr<storm::modelchecker::CheckResult> const& result, storm::modelchecker::FilterType ft) {
+            if (result->isQuantitative()) {
+                switch (ft) {
+                    case storm::modelchecker::FilterType::VALUES:
+                        STORM_PRINT_AND_LOG(*result);
+                        break;
+                    case storm::modelchecker::FilterType::SUM:
+                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().sum());
+                        break;
+                    case storm::modelchecker::FilterType::AVG:
+                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().average());
+                        break;
+                    case storm::modelchecker::FilterType::MIN:
+                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().getMin());
+                        break;
+                    case storm::modelchecker::FilterType::MAX:
+                        STORM_PRINT_AND_LOG(result->asQuantitativeCheckResult<ValueType>().getMax());
+                        break;
+                    case storm::modelchecker::FilterType::ARGMIN:
+                    case storm::modelchecker::FilterType::ARGMAX:
+                        STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Outputting states is not supported.");
+                    case storm::modelchecker::FilterType::EXISTS:
+                    case storm::modelchecker::FilterType::FORALL:
+                    case storm::modelchecker::FilterType::COUNT:
+                        STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Filter type only defined for qualitative results.");
+                }
+            } else {
+                switch (ft) {
+                    case storm::modelchecker::FilterType::VALUES:
+                        STORM_PRINT_AND_LOG(*result << std::endl);
+                        break;
+                    case storm::modelchecker::FilterType::EXISTS:
+                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().existsTrue());
+                        break;
+                    case storm::modelchecker::FilterType::FORALL:
+                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().forallTrue());
+                        break;
+                    case storm::modelchecker::FilterType::COUNT:
+                        STORM_PRINT_AND_LOG(result->asQualitativeCheckResult().count());
+                        break;
+                    case storm::modelchecker::FilterType::ARGMIN:
+                    case storm::modelchecker::FilterType::ARGMAX:
+                        STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Outputting states is not supported.");
+                    case storm::modelchecker::FilterType::SUM:
+                    case storm::modelchecker::FilterType::AVG:
+                    case storm::modelchecker::FilterType::MIN:
+                    case storm::modelchecker::FilterType::MAX:
+                        STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Filter type only defined for quantitative results.");
+                }
+            }
+            STORM_PRINT_AND_LOG(std::endl);
+        }
+
+        void printModelCheckingProperty(storm::jani::Property const& property) {
+            STORM_PRINT_AND_LOG(std::endl << "Model checking property " << *property.getRawFormula() << " ..." << std::endl);
+        }
+
+        template<typename ValueType>
+        void printResult(std::unique_ptr<storm::modelchecker::CheckResult> const& result, storm::jani::Property const& property, storm::utility::Stopwatch* watch = nullptr) {
+            if (result) {
+                std::stringstream ss;
+                ss << "'" << *property.getFilter().getStatesFormula() << "'";
+                STORM_PRINT_AND_LOG("Result (for " << (property.getFilter().getStatesFormula()->isInitialFormula() ? "initial" : ss.str()) << " states): ");
+                printFilteredResult<ValueType>(result, property.getFilter().getFilterType());
+                if (watch) {
+                    STORM_PRINT_AND_LOG("Time for model checking: " << *watch << "." << std::endl);
+                }
+            } else {
+                STORM_PRINT_AND_LOG(" failed, property is unsupported by selected engine/settings." << std::endl);
+            }
+        }
+
+        struct PostprocessingIdentity {
+            void operator()(std::unique_ptr<storm::modelchecker::CheckResult> const&) {
+                // Intentionally left empty.
+            }
+        };
+
+        template<typename ValueType>
+        void verifyProperties(std::vector<storm::jani::Property> const& properties, std::function<std::unique_ptr<storm::modelchecker::CheckResult>(std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states)> const& verificationCallback, std::function<void(std::unique_ptr<storm::modelchecker::CheckResult> const&)> const& postprocessingCallback = PostprocessingIdentity()) {
+        for (auto const& property : properties) {
+        printModelCheckingProperty(property);
+        storm::utility::Stopwatch watch(true);
+        std::unique_ptr<storm::modelchecker::CheckResult> result = verificationCallback(property.getRawFormula(), property.getFilter().getStatesFormula());
+        watch.stop();
+        postprocessingCallback(result);
+        printResult<ValueType>(result, property, &watch);
+        }
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void verifyWithAbstractionRefinementEngine(SymbolicInput const& input) {
+            STORM_LOG_ASSERT(input.model, "Expected symbolic model description.");
+            verifyProperties<ValueType>(input.properties, [&input] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
+                STORM_LOG_THROW(states->isInitialFormula(), storm::exceptions::NotSupportedException, "Abstraction-refinement can only filter initial states.");
+                return storm::api::verifyWithAbstractionRefinementEngine<DdType, ValueType>(input.model.get(), storm::api::createTask<ValueType>(formula, true));
+            });
+        }
+
+        template <typename ValueType>
+        void verifyWithExplorationEngine(SymbolicInput const& input) {
+            STORM_LOG_ASSERT(input.model, "Expected symbolic model description.");
+            STORM_LOG_THROW((std::is_same<ValueType, double>::value), storm::exceptions::NotSupportedException, "Exploration does not support other data-types than floating points.");
+            verifyProperties<ValueType>(input.properties, [&input] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
+                STORM_LOG_THROW(states->isInitialFormula(), storm::exceptions::NotSupportedException, "Exploration can only filter initial states.");
+                return storm::api::verifyWithExplorationEngine<ValueType>(input.model.get(), storm::api::createTask<ValueType>(formula, true));
+            });
+        }
+
+        template <typename ValueType>
+        void verifyWithSparseEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            auto sparseModel = model->as<storm::models::sparse::Model<ValueType>>();
+            verifyProperties<ValueType>(input.properties,
+                                        [&sparseModel] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
+                                            bool filterForInitialStates = states->isInitialFormula();
+                                            auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
+                                            std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithSparseEngine<ValueType>(sparseModel, task);
+
+                                            std::unique_ptr<storm::modelchecker::CheckResult> filter;
+                                            if (filterForInitialStates) {
+                                                filter = std::make_unique<storm::modelchecker::ExplicitQualitativeCheckResult>(sparseModel->getInitialStates());
+                                            } else {
+                                                filter = storm::api::verifyWithSparseEngine<ValueType>(sparseModel, storm::api::createTask<ValueType>(states, false));
+                                            }
+                                            if (result && filter) {
+                                                result->filter(filter->asQualitativeCheckResult());
+                                            }
+                                            return result;
+                                        });
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void verifyWithHybridEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            verifyProperties<ValueType>(input.properties, [&model] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
+                bool filterForInitialStates = states->isInitialFormula();
+                auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
+
+                auto symbolicModel = model->as<storm::models::symbolic::Model<DdType, ValueType>>();
+                std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithHybridEngine<DdType, ValueType>(symbolicModel, task);
+
+                std::unique_ptr<storm::modelchecker::CheckResult> filter;
+                if (filterForInitialStates) {
+                    filter = std::make_unique<storm::modelchecker::SymbolicQualitativeCheckResult<DdType>>(symbolicModel->getReachableStates(), symbolicModel->getInitialStates());
+                } else {
+                    filter = storm::api::verifyWithHybridEngine<DdType, ValueType>(symbolicModel, storm::api::createTask<ValueType>(states, false));
+                }
+                if (result && filter) {
+                    result->filter(filter->asQualitativeCheckResult());
+                }
+                return result;
+            });
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void verifyWithDdEngine(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input) {
+            verifyProperties<ValueType>(input.properties, [&model] (std::shared_ptr<storm::logic::Formula const> const& formula, std::shared_ptr<storm::logic::Formula const> const& states) {
+                bool filterForInitialStates = states->isInitialFormula();
+                auto task = storm::api::createTask<ValueType>(formula, filterForInitialStates);
+
+                auto symbolicModel = model->as<storm::models::symbolic::Model<DdType, ValueType>>();
+                std::unique_ptr<storm::modelchecker::CheckResult> result = storm::api::verifyWithDdEngine<DdType, ValueType>(model->as<storm::models::symbolic::Model<DdType, ValueType>>(), storm::api::createTask<ValueType>(formula, true));
+
+                std::unique_ptr<storm::modelchecker::CheckResult> filter;
+                if (filterForInitialStates) {
+                    filter = std::make_unique<storm::modelchecker::SymbolicQualitativeCheckResult<DdType>>(symbolicModel->getReachableStates(), symbolicModel->getInitialStates());
+                } else {
+                    filter = storm::api::verifyWithDdEngine<DdType, ValueType>(symbolicModel, storm::api::createTask<ValueType>(states, false));
+                }
+                if (result && filter) {
+                    result->filter(filter->asQualitativeCheckResult());
+                }
+                return result;
+            });
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        typename std::enable_if<DdType != storm::dd::DdType::CUDD || std::is_same<ValueType, double>::value, void>::type verifySymbolicModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
+            bool hybrid = coreSettings.getEngine() == storm::settings::modules::CoreSettings::Engine::Hybrid;
+            if (hybrid) {
+                verifyWithHybridEngine<DdType, ValueType>(model, input);
+            } else {
+                verifyWithDdEngine<DdType, ValueType>(model, input);
+            }
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        typename std::enable_if<DdType == storm::dd::DdType::CUDD && !std::is_same<ValueType, double>::value, void>::type verifySymbolicModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
+            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "CUDD does not support the selected data-type.");
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void verifyModel(std::shared_ptr<storm::models::ModelBase> const& model, SymbolicInput const& input, storm::settings::modules::CoreSettings const& coreSettings) {
+            if (model->isSparseModel()) {
+                verifyWithSparseEngine<ValueType>(model, input);
+            } else {
+                STORM_LOG_ASSERT(model->isSymbolicModel(), "Unexpected model type.");
+                verifySymbolicModel<DdType, ValueType>(model, input, coreSettings);
+            }
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::shared_ptr<storm::models::ModelBase> buildPreprocessExportModelWithValueTypeAndDdlib(SymbolicInput const& input, storm::settings::modules::CoreSettings::Engine engine) {
+            auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+            std::shared_ptr<storm::models::ModelBase> model;
+            if (!ioSettings.isNoBuildModelSet()) {
+                model = buildModel<DdType, ValueType>(engine, input, ioSettings);
+            }
+
+            if (model) {
+                model->printModelInformationToStream(std::cout);
+            }
+
+            STORM_LOG_THROW(model || input.properties.empty(), storm::exceptions::InvalidSettingsException, "No input model.");
+
+            if (model) {
+                auto preprocessingResult = preprocessModel<DdType, ValueType>(model, input);
+                if (preprocessingResult.second) {
+                    model = preprocessingResult.first;
+                    model->printModelInformationToStream(std::cout);
+                }
+                exportModel<DdType, ValueType>(model, input);
+            }
+            return model;
+        }
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        void processInputWithValueTypeAndDdlib(SymbolicInput const& input) {
+            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
+
+            // For several engines, no model building step is performed, but the verification is started right away.
+            storm::settings::modules::CoreSettings::Engine engine = coreSettings.getEngine();
+            if (engine == storm::settings::modules::CoreSettings::Engine::AbstractionRefinement) {
+                verifyWithAbstractionRefinementEngine<DdType, ValueType>(input);
+            } else if (engine == storm::settings::modules::CoreSettings::Engine::Exploration) {
+                verifyWithExplorationEngine<ValueType>(input);
+            } else {
+                std::shared_ptr<storm::models::ModelBase> model = buildPreprocessExportModelWithValueTypeAndDdlib<DdType, ValueType>(input, engine);
+
+                if (model) {
+                    if (coreSettings.isCounterexampleSet()) {
+                        auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+                        generateCounterexamples<ValueType>(model, input);
+                    } else {
+                        auto ioSettings = storm::settings::getModule<storm::settings::modules::IOSettings>();
+                        verifyModel<DdType, ValueType>(model, input, coreSettings);
+                    }
+                }
+            }
+        }
+
+        template <typename ValueType>
+        void processInputWithValueType(SymbolicInput const& input) {
+            auto coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
+
+            if (coreSettings.getDdLibraryType() == storm::dd::DdType::CUDD) {
+                processInputWithValueTypeAndDdlib<storm::dd::DdType::CUDD, ValueType>(input);
+            } else {
+                STORM_LOG_ASSERT(coreSettings.getDdLibraryType() == storm::dd::DdType::Sylvan, "Unknown DD library.");
+                processInputWithValueTypeAndDdlib<storm::dd::DdType::Sylvan, ValueType>(input);
+            }
+        }
+
+}
+}
diff --git a/src/storm-pars-cli/storm-pars.cpp b/src/storm-pars-cli/storm-pars.cpp
index 1ff2fabb6..5fa744c59 100644
--- a/src/storm-pars-cli/storm-pars.cpp
+++ b/src/storm-pars-cli/storm-pars.cpp
@@ -306,15 +306,8 @@ namespace storm {
 
             if (parSettings.onlyObtainConstraints()) {
                 STORM_LOG_THROW(parSettings.exportResultToFile(), storm::exceptions::InvalidSettingsException, "When computing constraints, export path has to be specified.");
-                if (model->isOfType(storm::models::ModelType::Dtmc)) {
-                    auto dtmc = model->template as<storm::models::sparse::Dtmc<ValueType>>();
-                    storm::api::exportParametricResultToFile<ValueType>(boost::none, storm::analysis::ConstraintCollector<ValueType>(*dtmc),parSettings.exportResultPath());
-                    return;
-                } else {
-                    STORM_LOG_THROW(parSettings.exportResultToFile(), storm::exceptions::NotImplementedException, "Constraints for MDPs and CTMCs not implemented.");
-
-                }
-
+                storm::api::exportParametricResultToFile<ValueType>(boost::none, storm::analysis::ConstraintCollector<ValueType>(*(model->as<storm::models::sparse::Model<ValueType>>())), parSettings.exportResultPath());
+                return;
             }
 
             if (model) {
diff --git a/src/storm/abstraction/AbstractionInformation.cpp b/src/storm/abstraction/AbstractionInformation.cpp
index c74cb60cb..5e25b5530 100644
--- a/src/storm/abstraction/AbstractionInformation.cpp
+++ b/src/storm/abstraction/AbstractionInformation.cpp
@@ -499,7 +499,7 @@ namespace storm {
             allSuccessorLocationVariables.insert(newMetaVariable.second);
             successorVariables.insert(newMetaVariable.second);
             extendedPredicateDdVariables.emplace_back(newMetaVariable);
-            allLocationIdentities &= ddManager->template getIdentity<uint64_t>(newMetaVariable.first).equals(ddManager->template getIdentity<uint64_t>(newMetaVariable.second)) && ddManager->getRange(newMetaVariable.first) && ddManager->getRange(newMetaVariable.second);
+            allLocationIdentities &= ddManager->getIdentity(newMetaVariable.first, newMetaVariable.second);
             return std::make_pair(locationVariablePairs.back(), locationVariablePairs.size() - 1);
         }
         
diff --git a/src/storm/abstraction/MenuGame.cpp b/src/storm/abstraction/MenuGame.cpp
index a62a5d769..6252697bc 100644
--- a/src/storm/abstraction/MenuGame.cpp
+++ b/src/storm/abstraction/MenuGame.cpp
@@ -29,7 +29,7 @@ namespace storm {
                                             std::set<storm::expressions::Variable> const& player2Variables,
                                             std::set<storm::expressions::Variable> const& allNondeterminismVariables,
                                             std::set<storm::expressions::Variable> const& probabilisticBranchingVariables,
-                                            std::map<storm::expressions::Expression, storm::dd::Bdd<Type>> const& expressionToBddMap) : storm::models::symbolic::StochasticTwoPlayerGame<Type, ValueType>(manager, reachableStates, initialStates, deadlockStates, transitionMatrix.sumAbstract(probabilisticBranchingVariables), rowVariables, nullptr, columnVariables, nullptr, rowColumnMetaVariablePairs, player1Variables, player2Variables, allNondeterminismVariables), extendedTransitionMatrix(transitionMatrix), probabilisticBranchingVariables(probabilisticBranchingVariables), expressionToBddMap(expressionToBddMap), bottomStates(bottomStates) {
+                                            std::map<storm::expressions::Expression, storm::dd::Bdd<Type>> const& expressionToBddMap) : storm::models::symbolic::StochasticTwoPlayerGame<Type, ValueType>(manager, reachableStates, initialStates, deadlockStates, transitionMatrix.sumAbstract(probabilisticBranchingVariables), rowVariables, nullptr, columnVariables, rowColumnMetaVariablePairs, player1Variables, player2Variables, allNondeterminismVariables), extendedTransitionMatrix(transitionMatrix), probabilisticBranchingVariables(probabilisticBranchingVariables), expressionToBddMap(expressionToBddMap), bottomStates(bottomStates) {
             // Intentionally left empty.
         }
         
diff --git a/src/storm/adapters/RationalFunctionAdapter.h b/src/storm/adapters/RationalFunctionAdapter.h
index ba9858fe0..f403103f5 100644
--- a/src/storm/adapters/RationalFunctionAdapter.h
+++ b/src/storm/adapters/RationalFunctionAdapter.h
@@ -7,7 +7,6 @@
 #include <carl/core/VariablePool.h>
 #include <carl/core/FactorizedPolynomial.h>
 #include <carl/core/Relation.h>
-#include <carl/core/SimpleConstraint.h>
 #include <carl/util/stringparser.h>
 
 namespace carl {
@@ -58,5 +57,5 @@ namespace storm {
     
     typedef carl::RationalFunction<Polynomial, true> RationalFunction;
     typedef carl::Interval<double> Interval;
-    template<typename T> using ArithConstraint = carl::SimpleConstraint<T>;
 }
+
diff --git a/src/storm/analysis/GraphConditions.cpp b/src/storm/analysis/GraphConditions.cpp
index bf3beeb0c..be8073bf7 100644
--- a/src/storm/analysis/GraphConditions.cpp
+++ b/src/storm/analysis/GraphConditions.cpp
@@ -1,4 +1,6 @@
 
+#include "storm/models/sparse/MarkovAutomaton.h"
+#include "storm/models/sparse/Ctmc.h"
 #include "GraphConditions.h"
 #include "storm/utility/constants.h"
 #include "storm/exceptions/NotImplementedException.h"
@@ -9,8 +11,8 @@ namespace storm {
 
 
         template <typename ValueType>
-        ConstraintCollector<ValueType>::ConstraintCollector(storm::models::sparse::Dtmc<ValueType> const& dtmc) {
-            process(dtmc);
+        ConstraintCollector<ValueType>::ConstraintCollector(storm::models::sparse::Model<ValueType> const& model) {
+            process(model);
         }
 
         template <typename ValueType>
@@ -50,10 +52,12 @@ namespace storm {
         }
 
         template <typename ValueType>
-        void ConstraintCollector<ValueType>::process(storm::models::sparse::Dtmc<ValueType> const& dtmc) {
-            for(uint_fast64_t state = 0; state < dtmc.getNumberOfStates(); ++state) {
+        void ConstraintCollector<ValueType>::process(storm::models::sparse::Model<ValueType> const& model) {
+            for(uint_fast64_t action = 0; action < model.getTransitionMatrix().getRowCount(); ++action) {
                 ValueType sum = storm::utility::zero<ValueType>();
-                for (auto const& transition : dtmc.getRows(state)) {
+
+                for (auto transitionIt = model.getTransitionMatrix().begin(action); transitionIt != model.getTransitionMatrix().end(action); ++transitionIt) {
+                    auto const& transition = *transitionIt;
                     sum += transition.getValue();
                     if (!storm::utility::isConstant(transition.getValue())) {
                         auto const& transitionVars = transition.getValue().gatherVariables();
@@ -90,9 +94,17 @@ namespace storm {
                     // Assert: sum == 1
                     wellformedConstraintSet.emplace((sum.nominator() - sum.denominator()).polynomialWithCoefficient(), storm::CompareRelation::EQ);
                 }
+            }
 
+            if (model.getType() == storm::models::ModelType::Ctmc) {
+                auto const& exitRateVector = static_cast<storm::models::sparse::Ctmc<ValueType> const&>(model).getExitRateVector();
+                wellformedRequiresNonNegativeEntries(exitRateVector);
+            } else if (model.getType() == storm::models::ModelType::MarkovAutomaton) {
+                auto const& exitRateVector = static_cast<storm::models::sparse::MarkovAutomaton<ValueType> const&>(model).getExitRates();
+                wellformedRequiresNonNegativeEntries(exitRateVector);
             }
-            for(auto const& rewModelEntry : dtmc.getRewardModels()) {
+
+            for(auto const& rewModelEntry : model.getRewardModels()) {
                 if (rewModelEntry.second.hasStateRewards()) {
                     wellformedRequiresNonNegativeEntries(rewModelEntry.second.getStateRewardVector());
                 }
@@ -117,13 +129,13 @@ namespace storm {
                         }
                     }
                 }
-
             }
+
         }
 
         template <typename ValueType>
-        void ConstraintCollector<ValueType>::operator()(storm::models::sparse::Dtmc<ValueType> const& dtmc) {
-            process(dtmc);
+        void ConstraintCollector<ValueType>::operator()(storm::models::sparse::Model<ValueType> const& model) {
+            process(model);
         }
 
         template class ConstraintCollector<storm::RationalFunction>;
diff --git a/src/storm/analysis/GraphConditions.h b/src/storm/analysis/GraphConditions.h
index 394f11b31..a901dfa68 100644
--- a/src/storm/analysis/GraphConditions.h
+++ b/src/storm/analysis/GraphConditions.h
@@ -38,12 +38,12 @@ namespace storm {
             void wellformedRequiresNonNegativeEntries(std::vector<ValueType> const&);
         public:
             /*!
-             * Constructs a constraint collector for the given DTMC. The constraints are built and ready for
+             * Constructs a constraint collector for the given Model. The constraints are built and ready for
              * retrieval after the construction.
              *
-             * @param dtmc The DTMC for which to create the constraints.
+             * @param model The Model for which to create the constraints.
              */
-            ConstraintCollector(storm::models::sparse::Dtmc<ValueType> const& dtmc);
+            ConstraintCollector(storm::models::sparse::Model<ValueType> const& model);
             
             /*!
              * Returns the set of wellformed-ness constraints.
@@ -66,18 +66,18 @@ namespace storm {
             std::set<storm::RationalFunctionVariable> const& getVariables() const;
             
             /*!
-             * Constructs the constraints for the given DTMC.
+             * Constructs the constraints for the given Model.
              *
-             * @param dtmc The DTMC for which to create the constraints.
+             * @param model The DTMC for which to create the constraints.
              */
-            void process(storm::models::sparse::Dtmc<ValueType> const& dtmc);
+            void process(storm::models::sparse::Model<ValueType> const& model);
             
             /*!
-             * Constructs the constraints for the given DTMC by calling the process method.
+             * Constructs the constraints for the given Model by calling the process method.
              *
-             * @param dtmc The DTMC for which to create the constraints.
+             * @param model The Model for which to create the constraints.
              */
-            void operator()(storm::models::sparse::Dtmc<ValueType> const& dtmc);
+            void operator()(storm::models::sparse::Model<ValueType> const& model);
             
         };
         
diff --git a/src/storm/api/bisimulation.h b/src/storm/api/bisimulation.h
index be95e335d..aaaa67618 100644
--- a/src/storm/api/bisimulation.h
+++ b/src/storm/api/bisimulation.h
@@ -3,6 +3,9 @@
 #include "storm/storage/bisimulation/DeterministicModelBisimulationDecomposition.h"
 #include "storm/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h"
 
+#include "storm/storage/dd/DdType.h"
+#include "storm/storage/dd/BisimulationDecomposition.h"
+
 #include "storm/utility/macros.h"
 #include "storm/exceptions/NotSupportedException.h"
 
@@ -40,6 +43,7 @@ namespace storm {
             
             STORM_LOG_THROW(model->isOfType(storm::models::ModelType::Dtmc) || model->isOfType(storm::models::ModelType::Ctmc) || model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::NotSupportedException, "Bisimulation minimization is currently only available for DTMCs, CTMCs and MDPs.");
 
+            // Try to get rid of non state-rewards to easy bisimulation computation.
             model->reduceToStateBasedRewards();
 
             if (model->isOfType(storm::models::ModelType::Dtmc)) {
@@ -51,5 +55,25 @@ namespace storm {
             }
         }
         
+        template <storm::dd::DdType DdType, typename ValueType>
+        typename std::enable_if<DdType == storm::dd::DdType::Sylvan || std::is_same<ValueType, double>::value, std::shared_ptr<storm::models::Model<ValueType>>>::type performBisimulationMinimization(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const& bisimulationType = storm::storage::BisimulationType::Strong, storm::dd::bisimulation::SignatureMode const& mode = storm::dd::bisimulation::SignatureMode::Eager) {
+            
+            STORM_LOG_THROW(model->isOfType(storm::models::ModelType::Dtmc) || model->isOfType(storm::models::ModelType::Ctmc) || model->isOfType(storm::models::ModelType::Mdp), storm::exceptions::NotSupportedException, "Symbolic bisimulation minimization is currently only available for DTMCs and CTMCs.");
+            STORM_LOG_THROW(bisimulationType == storm::storage::BisimulationType::Strong, storm::exceptions::NotSupportedException, "Currently only strong bisimulation is supported.");
+
+            // Try to get rid of non state-rewards to easy bisimulation computation.
+            model->reduceToStateBasedRewards();
+            
+            storm::dd::BisimulationDecomposition<DdType, ValueType> decomposition(*model, formulas, bisimulationType);
+            decomposition.compute(mode);
+            return decomposition.getQuotient();
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        typename std::enable_if<DdType != storm::dd::DdType::Sylvan && !std::is_same<ValueType, double>::value, std::shared_ptr<storm::models::Model<ValueType>>>::type performBisimulationMinimization(std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const& bisimulationType = storm::storage::BisimulationType::Strong, storm::dd::bisimulation::SignatureMode const& mode = storm::dd::bisimulation::SignatureMode::Eager) {
+            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Symbolic bisimulation minimization is not supported for this combination of DD library and value type.");
+            return nullptr;
+        }
+
     }
 }
diff --git a/src/storm/api/builder.h b/src/storm/api/builder.h
index ca21f99fc..715e6e1ac 100644
--- a/src/storm/api/builder.h
+++ b/src/storm/api/builder.h
@@ -30,11 +30,16 @@ namespace storm {
     namespace api {
         
         template<storm::dd::DdType LibraryType, typename ValueType>
-        std::shared_ptr<storm::models::symbolic::Model<LibraryType, ValueType>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas) {
+        std::shared_ptr<storm::models::symbolic::Model<LibraryType, ValueType>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, bool buildFullModel = false) {
             if (model.isPrismProgram()) {
                 typename storm::builder::DdPrismModelBuilder<LibraryType, ValueType>::Options options;
                 options = typename storm::builder::DdPrismModelBuilder<LibraryType, ValueType>::Options(formulas);
                 
+                if (buildFullModel) {
+                    options.buildAllLabels = true;
+                    options.buildAllRewardModels = true;
+                }
+                
                 storm::builder::DdPrismModelBuilder<LibraryType, ValueType> builder;
                 return builder.build(model.asPrismProgram(), options);
             } else {
@@ -42,18 +47,23 @@ namespace storm {
                 typename storm::builder::DdJaniModelBuilder<LibraryType, ValueType>::Options options;
                 options = typename storm::builder::DdJaniModelBuilder<LibraryType, ValueType>::Options(formulas);
                 
+                if (buildFullModel) {
+                    options.buildAllLabels = true;
+                    options.buildAllRewardModels = true;
+                }
+                
                 storm::builder::DdJaniModelBuilder<LibraryType, ValueType> builder;
                 return builder.build(model.asJaniModel(), options);
             }
         }
         
         template<>
-        inline std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, storm::RationalNumber>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas) {
+        inline std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, storm::RationalNumber>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, bool buildFullModel) {
             STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "CUDD does not support rational numbers.");
         }
 
         template<>
-        inline std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, storm::RationalFunction>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas) {
+        inline std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, storm::RationalFunction>> buildSymbolicModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, bool buildFullModel) {
             STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "CUDD does not support rational functions.");
         }
 
@@ -89,8 +99,6 @@ namespace storm {
         std::shared_ptr<storm::models::sparse::Model<ValueType>> buildSparseModel(storm::storage::SymbolicModelDescription const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, bool jit = false, bool doctor = false) {
             storm::builder::BuilderOptions options(formulas);
             return buildSparseModel<ValueType>(model, options, jit, doctor);
-
-
         }
         
         template<typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
@@ -140,6 +148,5 @@ namespace storm {
             return storm::parser::ImcaMarkovAutomatonParser<double>::parseImcaFile(imcaFile);
         }
 
-
     }
 }
diff --git a/src/storm/builder/DdJaniModelBuilder.cpp b/src/storm/builder/DdJaniModelBuilder.cpp
index 6e56e9c68..f5ae8e13a 100644
--- a/src/storm/builder/DdJaniModelBuilder.cpp
+++ b/src/storm/builder/DdJaniModelBuilder.cpp
@@ -141,7 +141,7 @@ namespace storm {
         template <storm::dd::DdType Type, typename ValueType>
         class ParameterCreator {
         public:
-            void create(storm::jani::Model const& model, storm::adapters::AddExpressionAdapter<Type, ValueType>& rowExpressionAdapter, storm::adapters::AddExpressionAdapter<Type, ValueType>& columnExpressionAdapter) {
+            void create(storm::jani::Model const& model, storm::adapters::AddExpressionAdapter<Type, ValueType>& rowExpressionAdapter) {
                 // Intentionally left empty: no support for parameters for this data type.
             }
             
@@ -160,14 +160,13 @@ namespace storm {
                 // Intentionally left empty.
             }
             
-            void create(storm::jani::Model const& model, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& rowExpressionAdapter, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& columnExpressionAdapter) {
+            void create(storm::jani::Model const& model, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& rowExpressionAdapter) {
                 for (auto const& constant : model.getConstants()) {
                     if (!constant.isDefined()) {
                         carl::Variable carlVariable = carl::freshRealVariable(constant.getExpressionVariable().getName());
                         parameters.insert(carlVariable);
                         auto rf = convertVariableToPolynomial(carlVariable);
                         rowExpressionAdapter.setValue(constant.getExpressionVariable(), rf);
-                        columnExpressionAdapter.setValue(constant.getExpressionVariable(), rf);
                     }
                 }
             }
@@ -202,8 +201,7 @@ namespace storm {
             CompositionVariables() : manager(std::make_shared<storm::dd::DdManager<Type>>()),
             variableToRowMetaVariableMap(std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>()),
             rowExpressionAdapter(std::make_shared<storm::adapters::AddExpressionAdapter<Type, ValueType>>(manager, variableToRowMetaVariableMap)),
-            variableToColumnMetaVariableMap(std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>()),
-            columnExpressionAdapter(std::make_shared<storm::adapters::AddExpressionAdapter<Type, ValueType>>(manager, variableToColumnMetaVariableMap)) {
+            variableToColumnMetaVariableMap(std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>()) {
                 // Intentionally left empty.
             }
             
@@ -217,7 +215,6 @@ namespace storm {
             // The meta variables for the column encoding.
             std::set<storm::expressions::Variable> columnMetaVariables;
             std::shared_ptr<std::map<storm::expressions::Variable, storm::expressions::Variable>> variableToColumnMetaVariableMap;
-            std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter;
             
             // All pairs of row/column meta variables.
             std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> rowColumnMetaVariablePairs;
@@ -371,8 +368,8 @@ namespace storm {
                     
                     // Add the identity and ranges of the location variables to the ones of the automaton.
                     std::pair<storm::expressions::Variable, storm::expressions::Variable> const& locationVariables = result.automatonToLocationDdVariableMap[automaton.getName()];
-                    storm::dd::Add<Type, ValueType> variableIdentity = result.manager->template getIdentity<ValueType>(locationVariables.first).equals(result.manager->template getIdentity<ValueType>(locationVariables.second)).template toAdd<ValueType>() * result.manager->getRange(locationVariables.first).template toAdd<ValueType>() * result.manager->getRange(locationVariables.second).template toAdd<ValueType>();
-                    identity &= variableIdentity.toBdd();
+                    storm::dd::Bdd<Type> variableIdentity = result.manager->getIdentity(locationVariables.first, locationVariables.second);
+                    identity &= variableIdentity;
                     range &= result.manager->getRange(locationVariables.first);
                     
                     // Then create variables for the variables of the automaton.
@@ -392,7 +389,7 @@ namespace storm {
                 }
                 
                 ParameterCreator<Type, ValueType> parameterCreator;
-                parameterCreator.create(model, *result.rowExpressionAdapter, *result.columnExpressionAdapter);
+                parameterCreator.create(model, *result.rowExpressionAdapter);
                 if (std::is_same<ValueType, storm::RationalFunction>::value) {
                     result.parameters = parameterCreator.getParameters();
                 }
@@ -423,8 +420,8 @@ namespace storm {
                 result.columnMetaVariables.insert(variablePair.second);
                 result.variableToColumnMetaVariableMap->emplace(variable.getExpressionVariable(), variablePair.second);
                 
-                storm::dd::Add<Type, ValueType> variableIdentity = result.manager->template getIdentity<ValueType>(variablePair.first).equals(result.manager->template getIdentity<ValueType>(variablePair.second)).template toAdd<ValueType>() * result.manager->getRange(variablePair.first).template toAdd<ValueType>() * result.manager->getRange(variablePair.second).template toAdd<ValueType>();
-                result.variableToIdentityMap.emplace(variable.getExpressionVariable(), variableIdentity);
+                storm::dd::Bdd<Type> variableIdentity = result.manager->getIdentity(variablePair.first, variablePair.second);
+                result.variableToIdentityMap.emplace(variable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                 result.rowColumnMetaVariablePairs.push_back(variablePair);
                 result.variableToRangeMap.emplace(variablePair.first, result.manager->getRange(variablePair.first));
                 result.variableToRangeMap.emplace(variablePair.second, result.manager->getRange(variablePair.second));
@@ -443,8 +440,8 @@ namespace storm {
                 result.columnMetaVariables.insert(variablePair.second);
                 result.variableToColumnMetaVariableMap->emplace(variable.getExpressionVariable(), variablePair.second);
                 
-                storm::dd::Add<Type, ValueType> variableIdentity = result.manager->template getIdentity<ValueType>(variablePair.first).equals(result.manager->template getIdentity<ValueType>(variablePair.second)).template toAdd<ValueType>();
-                result.variableToIdentityMap.emplace(variable.getExpressionVariable(), variableIdentity);
+                storm::dd::Bdd<Type> variableIdentity = result.manager->getIdentity(variablePair.first, variablePair.second);
+                result.variableToIdentityMap.emplace(variable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                 
                 result.variableToRangeMap.emplace(variablePair.first, result.manager->getRange(variablePair.first));
                 result.variableToRangeMap.emplace(variablePair.second, result.manager->getRange(variablePair.second));
@@ -1706,11 +1703,11 @@ namespace storm {
         std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>> createModel(storm::jani::ModelType const& modelType, CompositionVariables<Type, ValueType> const& variables, ModelComponents<Type, ValueType> const& modelComponents) {
             std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>> result;
             if (modelType == storm::jani::ModelType::DTMC) {
-                result = std::make_shared<storm::models::symbolic::Dtmc<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.columnExpressionAdapter, variables.rowColumnMetaVariablePairs, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
+                result = std::make_shared<storm::models::symbolic::Dtmc<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.rowColumnMetaVariablePairs, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
             } else if (modelType == storm::jani::ModelType::CTMC) {
-                result = std::make_shared<storm::models::symbolic::Ctmc<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.columnExpressionAdapter, variables.rowColumnMetaVariablePairs, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
+                result = std::make_shared<storm::models::symbolic::Ctmc<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.rowColumnMetaVariablePairs, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
             } else if (modelType == storm::jani::ModelType::MDP || modelType == storm::jani::ModelType::LTS) {
-                result = std::make_shared<storm::models::symbolic::Mdp<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.columnExpressionAdapter, variables.rowColumnMetaVariablePairs, variables.allNondeterminismVariables, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
+                result = std::make_shared<storm::models::symbolic::Mdp<Type, ValueType>>(variables.manager, modelComponents.reachableStates, modelComponents.initialStates, modelComponents.deadlockStates, modelComponents.transitionMatrix, variables.rowMetaVariables, variables.rowExpressionAdapter, variables.columnMetaVariables, variables.rowColumnMetaVariablePairs, variables.allNondeterminismVariables, modelComponents.labelToExpressionMap, modelComponents.rewardModels);
             } else {
                 STORM_LOG_THROW(false, storm::exceptions::WrongFormatException, "Model type '" << modelType << "' not supported.");
             }
@@ -1857,6 +1854,7 @@ namespace storm {
                 }
             } else {
                 auto const& globalVariables = model.getGlobalVariables();
+                
                 for (auto const& rewardModelName : options.getRewardModelNames()) {
                     if (globalVariables.hasVariable(rewardModelName)) {
                         result.push_back(globalVariables.getVariable(rewardModelName).getExpressionVariable());
@@ -1869,7 +1867,12 @@ namespace storm {
                 // If no reward model was yet added, but there was one that was given in the options, we try to build the
                 // standard reward model.
                 if (result.empty() && !options.getRewardModelNames().empty()) {
-                    result.push_back(globalVariables.getTransientVariables().front().getExpressionVariable());
+                    for (auto const& variable : globalVariables.getTransientVariables()) {
+                        if (variable.isRealVariable() || variable.isUnboundedIntegerVariable()) {
+                            result.push_back(variable.getExpressionVariable());
+                            break;
+                        }
+                    }
                 }
             }
             
diff --git a/src/storm/builder/DdPrismModelBuilder.cpp b/src/storm/builder/DdPrismModelBuilder.cpp
index 410472354..eea14d863 100644
--- a/src/storm/builder/DdPrismModelBuilder.cpp
+++ b/src/storm/builder/DdPrismModelBuilder.cpp
@@ -34,7 +34,7 @@ namespace storm {
         template <storm::dd::DdType Type, typename ValueType>
         class ParameterCreator {
         public:
-            void create(storm::prism::Program const& program, storm::adapters::AddExpressionAdapter<Type, ValueType>& rowExpressionAdapter, storm::adapters::AddExpressionAdapter<Type, ValueType>& columnExpressionAdapter) {
+            void create(storm::prism::Program const& program, storm::adapters::AddExpressionAdapter<Type, ValueType>& rowExpressionAdapter) {
                 // Intentionally left empty: no support for parameters for this data type.
             }
             
@@ -53,14 +53,13 @@ namespace storm {
                 // Intentionally left empty.
             }
             
-            void create(storm::prism::Program const& program, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& rowExpressionAdapter, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& columnExpressionAdapter) {
+            void create(storm::prism::Program const& program, storm::adapters::AddExpressionAdapter<Type, storm::RationalFunction>& rowExpressionAdapter) {
                 for (auto const& constant : program.getConstants()) {
                     if (!constant.isDefined()) {
                         carl::Variable carlVariable = carl::freshRealVariable(constant.getExpressionVariable().getName());
                         parameters.insert(carlVariable);
                         auto rf = convertVariableToPolynomial(carlVariable);
                         rowExpressionAdapter.setValue(constant.getExpressionVariable(), rf);
-                        columnExpressionAdapter.setValue(constant.getExpressionVariable(), rf);
                     }
                 }
             }
@@ -93,14 +92,14 @@ namespace storm {
         template <storm::dd::DdType Type, typename ValueType>
         class DdPrismModelBuilder<Type, ValueType>::GenerationInformation {
         public:
-            GenerationInformation(storm::prism::Program const& program) : program(program), manager(std::make_shared<storm::dd::DdManager<Type>>()), rowMetaVariables(), variableToRowMetaVariableMap(std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>()), rowExpressionAdapter(std::make_shared<storm::adapters::AddExpressionAdapter<Type, ValueType>>(manager, variableToRowMetaVariableMap)), columnMetaVariables(), variableToColumnMetaVariableMap((std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>())), columnExpressionAdapter(std::make_shared<storm::adapters::AddExpressionAdapter<Type, ValueType>>(manager, variableToColumnMetaVariableMap)), rowColumnMetaVariablePairs(), nondeterminismMetaVariables(), variableToIdentityMap(), allGlobalVariables(), moduleToIdentityMap(), parameters() {
+            GenerationInformation(storm::prism::Program const& program) : program(program), manager(std::make_shared<storm::dd::DdManager<Type>>()), rowMetaVariables(), variableToRowMetaVariableMap(std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>()), rowExpressionAdapter(std::make_shared<storm::adapters::AddExpressionAdapter<Type, ValueType>>(manager, variableToRowMetaVariableMap)), columnMetaVariables(), variableToColumnMetaVariableMap((std::make_shared<std::map<storm::expressions::Variable, storm::expressions::Variable>>())), rowColumnMetaVariablePairs(), nondeterminismMetaVariables(), variableToIdentityMap(), allGlobalVariables(), moduleToIdentityMap(), parameters() {
                 
                 // Initializes variables and identity DDs.
                 createMetaVariablesAndIdentities();
                 
                 // Initialize the parameters (if any).
                 ParameterCreator<Type, ValueType> parameterCreator;
-                parameterCreator.create(this->program, *this->rowExpressionAdapter, *this->columnExpressionAdapter);
+                parameterCreator.create(this->program, *this->rowExpressionAdapter);
                 if (std::is_same<ValueType, storm::RationalFunction>::value) {
                     this->parameters = parameterCreator.getParameters();
                 }
@@ -120,7 +119,6 @@ namespace storm {
             // The meta variables for the column encoding.
             std::set<storm::expressions::Variable> columnMetaVariables;
             std::shared_ptr<std::map<storm::expressions::Variable, storm::expressions::Variable>> variableToColumnMetaVariableMap;
-            std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter;
             
             // All pairs of row/column meta variables.
             std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> rowColumnMetaVariablePairs;
@@ -191,8 +189,8 @@ namespace storm {
                     columnMetaVariables.insert(variablePair.second);
                     variableToColumnMetaVariableMap->emplace(integerVariable.getExpressionVariable(), variablePair.second);
                     
-                    storm::dd::Add<Type, ValueType> variableIdentity = manager->template getIdentity<ValueType>(variablePair.first).equals(manager->template getIdentity<ValueType>(variablePair.second)).template toAdd<ValueType>() * manager->getRange(variablePair.first).template toAdd<ValueType>() * manager->getRange(variablePair.second).template toAdd<ValueType>();
-                    variableToIdentityMap.emplace(integerVariable.getExpressionVariable(), variableIdentity);
+                    storm::dd::Bdd<Type> variableIdentity = manager->getIdentity(variablePair.first, variablePair.second);
+                    variableToIdentityMap.emplace(integerVariable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                     rowColumnMetaVariablePairs.push_back(variablePair);
                     
                     allGlobalVariables.insert(integerVariable.getExpressionVariable());
@@ -208,8 +206,8 @@ namespace storm {
                     columnMetaVariables.insert(variablePair.second);
                     variableToColumnMetaVariableMap->emplace(booleanVariable.getExpressionVariable(), variablePair.second);
                     
-                    storm::dd::Add<Type, ValueType> variableIdentity = manager->template getIdentity<ValueType>(variablePair.first).equals(manager->template getIdentity<ValueType>(variablePair.second)).template toAdd<ValueType>();
-                    variableToIdentityMap.emplace(booleanVariable.getExpressionVariable(), variableIdentity);
+                    storm::dd::Bdd<Type> variableIdentity = manager->getIdentity(variablePair.first, variablePair.second);
+                    variableToIdentityMap.emplace(booleanVariable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                     
                     rowColumnMetaVariablePairs.push_back(variablePair);
                     allGlobalVariables.insert(booleanVariable.getExpressionVariable());
@@ -232,7 +230,7 @@ namespace storm {
                         columnMetaVariables.insert(variablePair.second);
                         variableToColumnMetaVariableMap->emplace(integerVariable.getExpressionVariable(), variablePair.second);
                         
-                        storm::dd::Bdd<Type> variableIdentity = manager->template getIdentity<ValueType>(variablePair.first).equals(manager->template getIdentity<ValueType>(variablePair.second)) && manager->getRange(variablePair.first) && manager->getRange(variablePair.second);
+                        storm::dd::Bdd<Type> variableIdentity = manager->getIdentity(variablePair.first, variablePair.second);
                         variableToIdentityMap.emplace(integerVariable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                         moduleIdentity &= variableIdentity;
                         moduleRange &= manager->getRange(variablePair.first);
@@ -249,7 +247,7 @@ namespace storm {
                         columnMetaVariables.insert(variablePair.second);
                         variableToColumnMetaVariableMap->emplace(booleanVariable.getExpressionVariable(), variablePair.second);
                         
-                        storm::dd::Bdd<Type> variableIdentity = manager->template getIdentity<ValueType>(variablePair.first).equals(manager->template getIdentity<ValueType>(variablePair.second)) && manager->getRange(variablePair.first) && manager->getRange(variablePair.second);
+                        storm::dd::Bdd<Type> variableIdentity = manager->getIdentity(variablePair.first, variablePair.second);
                         variableToIdentityMap.emplace(booleanVariable.getExpressionVariable(), variableIdentity.template toAdd<ValueType>());
                         moduleIdentity &= variableIdentity;
                         moduleRange &= manager->getRange(variablePair.first);
@@ -1059,10 +1057,21 @@ namespace storm {
         }
         
         template <storm::dd::DdType Type, typename ValueType>
-        storm::dd::Add<Type, ValueType> DdPrismModelBuilder<Type, ValueType>::createSystemFromModule(GenerationInformation& generationInfo, ModuleDecisionDiagram const& module) {
+        storm::dd::Add<Type, ValueType> DdPrismModelBuilder<Type, ValueType>::createSystemFromModule(GenerationInformation& generationInfo, ModuleDecisionDiagram& module) {
+            storm::dd::Add<Type, ValueType> result;
+            
+            // Make sure all actions contain all necessary meta variables.
+            module.independentAction.ensureContainsVariables(generationInfo.rowMetaVariables, generationInfo.columnMetaVariables);
+            for (auto& synchronizingAction : module.synchronizingActionToDecisionDiagramMap) {
+                synchronizingAction.second.ensureContainsVariables(generationInfo.rowMetaVariables, generationInfo.columnMetaVariables);
+            }
+            
+
+
+            
             // If the model is an MDP, we need to encode the nondeterminism using additional variables.
             if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::MDP) {
-                storm::dd::Add<Type, ValueType> result = generationInfo.manager->template getAddZero<ValueType>();
+                result = generationInfo.manager->template getAddZero<ValueType>();
                 
                 // First, determine the highest number of nondeterminism variables that is used in any action and make
                 // all actions use the same amout of nondeterminism variables.
@@ -1082,6 +1091,7 @@ namespace storm {
                 for (uint_fast64_t i = module.independentAction.numberOfUsedNondeterminismVariables; i < numberOfUsedNondeterminismVariables; ++i) {
                     nondeterminismEncoding *= generationInfo.manager->getEncoding(generationInfo.nondeterminismMetaVariables[i], 0).template toAdd<ValueType>();
                 }
+
                 result = identityEncoding * module.independentAction.transitionsDd * nondeterminismEncoding;
                 
                 // Add variables to synchronized action DDs.
@@ -1114,8 +1124,6 @@ namespace storm {
                 for (auto const& synchronizingAction : synchronizingActionToDdMap) {
                     result += synchronizingAction.second;
                 }
-                
-                return result;
             } else if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::DTMC || generationInfo.program.getModelType() == storm::prism::Program::ModelType::CTMC) {
                 // Simply add all actions, but make sure to include the missing global variable identities.
                 
@@ -1128,8 +1136,7 @@ namespace storm {
                     identityEncoding *= generationInfo.variableToIdentityMap.at(variable);
                 }
 
-                storm::dd::Add<Type, ValueType> result = identityEncoding * module.independentAction.transitionsDd;
-                
+                result = identityEncoding * module.independentAction.transitionsDd;
                 for (auto const& synchronizingAction : module.synchronizingActionToDecisionDiagramMap) {
                     // Compute missing global variable identities in synchronizing actions.
                     missingIdentities = std::set<storm::expressions::Variable>();
@@ -1142,10 +1149,10 @@ namespace storm {
                     
                     result += identityEncoding * synchronizingAction.second.transitionsDd;
                 }
-                return result;
             } else {
                 STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Illegal model type.");
             }
+            return result;
         }
         
         template <storm::dd::DdType Type, typename ValueType>
@@ -1241,8 +1248,8 @@ namespace storm {
                             stateActionDd = transitionMatrix.notZero().existsAbstract(generationInfo.columnMetaVariables).template toAdd<ValueType>();
                         }
                         stateActionRewardDd *= stateActionDd.get();
-                    } else if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::CTMC) {
-                        // For CTMCs, we need to multiply the entries with the exit rate of the corresponding action.
+                    } else if (generationInfo.program.getModelType() == storm::prism::Program::ModelType::DTMC || generationInfo.program.getModelType() == storm::prism::Program::ModelType::CTMC) {
+                        // For DTMCs and CTMC, we need to multiply the entries with the multiplicity/exit rate of the corresponding action.
                         stateActionRewardDd *= actionDd.transitionsDd.sumAbstract(generationInfo.columnMetaVariables);
                     }
                     
@@ -1358,7 +1365,7 @@ namespace storm {
                         } else {
                             STORM_LOG_THROW(labelName == "init" || labelName == "deadlock", storm::exceptions::InvalidArgumentException, "Terminal states refer to illegal label '" << labelName << "'.");
                         }
-                        }
+                    }
                     
                     if (terminalExpression.isInitialized()) {
                         // If the expression refers to constants of the model, we need to substitute them.
@@ -1484,11 +1491,11 @@ namespace storm {
             
             std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>> result;
             if (program.getModelType() == storm::prism::Program::ModelType::DTMC) {
-                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Dtmc<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.columnExpressionAdapter, generationInfo.rowColumnMetaVariablePairs, labelToExpressionMapping, rewardModels));
+                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Dtmc<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.rowColumnMetaVariablePairs, labelToExpressionMapping, rewardModels));
             } else if (program.getModelType() == storm::prism::Program::ModelType::CTMC) {
-                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Ctmc<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, system.stateActionDd, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.columnExpressionAdapter, generationInfo.rowColumnMetaVariablePairs, labelToExpressionMapping, rewardModels));
+                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Ctmc<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, system.stateActionDd, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.rowColumnMetaVariablePairs, labelToExpressionMapping, rewardModels));
             } else if (program.getModelType() == storm::prism::Program::ModelType::MDP) {
-                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Mdp<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.columnExpressionAdapter, generationInfo.rowColumnMetaVariablePairs, generationInfo.allNondeterminismVariables, labelToExpressionMapping, rewardModels));
+                result = std::shared_ptr<storm::models::symbolic::Model<Type, ValueType>>(new storm::models::symbolic::Mdp<Type, ValueType>(generationInfo.manager, reachableStates, initialStates, deadlockStates, transitionMatrix, generationInfo.rowMetaVariables, generationInfo.rowExpressionAdapter, generationInfo.columnMetaVariables, generationInfo.rowColumnMetaVariablePairs, generationInfo.allNondeterminismVariables, labelToExpressionMapping, rewardModels));
             } else {
                 STORM_LOG_THROW(false, storm::exceptions::InvalidArgumentException, "Invalid model type.");
             }
diff --git a/src/storm/builder/DdPrismModelBuilder.h b/src/storm/builder/DdPrismModelBuilder.h
index f3b7adc31..57d990b23 100644
--- a/src/storm/builder/DdPrismModelBuilder.h
+++ b/src/storm/builder/DdPrismModelBuilder.h
@@ -130,6 +130,12 @@ namespace storm {
                     // Intentionally left empty.
                 }
                 
+                void ensureContainsVariables(std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables) {
+                    guardDd.addMetaVariables(rowMetaVariables);
+                    transitionsDd.addMetaVariables(rowMetaVariables);
+                    transitionsDd.addMetaVariables(columnMetaVariables);
+                }
+                
                 ActionDecisionDiagram(ActionDecisionDiagram const& other) = default;
                 ActionDecisionDiagram& operator=(ActionDecisionDiagram const& other) = default;
                 
@@ -228,7 +234,7 @@ namespace storm {
 
             static storm::dd::Add<Type, ValueType> getSynchronizationDecisionDiagram(GenerationInformation& generationInfo, uint_fast64_t actionIndex = 0);
             
-            static storm::dd::Add<Type, ValueType> createSystemFromModule(GenerationInformation& generationInfo, ModuleDecisionDiagram const& module);
+            static storm::dd::Add<Type, ValueType> createSystemFromModule(GenerationInformation& generationInfo, ModuleDecisionDiagram& module);
             
             static std::unordered_map<std::string, storm::models::symbolic::StandardRewardModel<Type, ValueType>> createRewardModelDecisionDiagrams(std::vector<std::reference_wrapper<storm::prism::RewardModel const>> const& selectedRewardModels, SystemResult& system, GenerationInformation& generationInfo, ModuleDecisionDiagram const& globalModule, storm::dd::Add<Type, ValueType> const& reachableStatesAdd, storm::dd::Add<Type, ValueType> const& transitionMatrix);
 
diff --git a/src/storm/builder/ExplicitModelBuilder.cpp b/src/storm/builder/ExplicitModelBuilder.cpp
index 9de544e7b..665190e6c 100644
--- a/src/storm/builder/ExplicitModelBuilder.cpp
+++ b/src/storm/builder/ExplicitModelBuilder.cpp
@@ -306,7 +306,7 @@ namespace storm {
             
             // initialize the model components with the obtained information.
             storm::storage::sparse::ModelComponents<ValueType, RewardModelType> modelComponents(transitionMatrixBuilder.build(), buildStateLabeling(), std::unordered_map<std::string, RewardModelType>(), !generator->isDiscreteTimeModel(), std::move(markovianStates));
-
+            
             // Now finalize all reward models.
             for (auto& rewardModelBuilder : rewardModelBuilders) {
                 modelComponents.rewardModels.emplace(rewardModelBuilder.getName(), rewardModelBuilder.build(modelComponents.transitionMatrix.getRowCount(), modelComponents.transitionMatrix.getColumnCount(), modelComponents.transitionMatrix.getRowGroupCount()));
diff --git a/src/storm/builder/jit/ExplicitJitJaniModelBuilder.cpp b/src/storm/builder/jit/ExplicitJitJaniModelBuilder.cpp
index 67ff3be5d..8f23d14ce 100644
--- a/src/storm/builder/jit/ExplicitJitJaniModelBuilder.cpp
+++ b/src/storm/builder/jit/ExplicitJitJaniModelBuilder.cpp
@@ -99,6 +99,7 @@ namespace storm {
                 } else {
                     carlIncludeDirectory = STORM_CARL_INCLUDE_DIR;
                 }
+                sparseppIncludeDirectory = STORM_BUILD_DIR "/include/resources/3rdparty/sparsepp/";
                 
                 // Register all transient variables as transient.
                 for (auto const& variable : this->model.getGlobalVariables().getTransientVariables()) {
@@ -1675,7 +1676,7 @@ namespace storm {
 #include "storm/adapters/RationalFunctionAdapter.h"
 {% endif %}
                 
-#include "resources/3rdparty/sparsepp/sparsepp.h"
+#include <sparsepp/spp.h>
                 
 #include "storm/builder/jit/StateSet.h"
 #include "storm/builder/jit/JitModelBuilderInterface.h"
@@ -2462,7 +2463,7 @@ namespace storm {
                 dynamicLibraryPath += DYLIB_EXTENSION;
                 std::string dynamicLibraryFilename = boost::filesystem::absolute(dynamicLibraryPath).string();
                 
-                std::string command = compiler + " " + sourceFilename + " " + compilerFlags + " -I" + stormIncludeDirectory + " -I" + boostIncludeDirectory + " -I" + carlIncludeDirectory + " -o " + dynamicLibraryFilename;
+                std::string command = compiler + " " + sourceFilename + " " + compilerFlags + " -I" + stormIncludeDirectory + " -I" + sparseppIncludeDirectory + " -I" + boostIncludeDirectory + " -I" + carlIncludeDirectory + " -o " + dynamicLibraryFilename;
                 boost::optional<std::string> error = execute(command);
                 
                 if (error) {
diff --git a/src/storm/builder/jit/ExplicitJitJaniModelBuilder.h b/src/storm/builder/jit/ExplicitJitJaniModelBuilder.h
index 9e4cea5f5..7d4f1365a 100644
--- a/src/storm/builder/jit/ExplicitJitJaniModelBuilder.h
+++ b/src/storm/builder/jit/ExplicitJitJaniModelBuilder.h
@@ -192,6 +192,9 @@ namespace storm {
                 /// The include directory of carl.
                 std::string carlIncludeDirectory;
                 
+                /// The include directory of sparsepp.
+                std::string sparseppIncludeDirectory;
+                
                 /// A cache that is used by carl.
                 std::shared_ptr<carl::Cache<carl::PolynomialFactorizationPair<RawPolynomial>>> cache;
             };
diff --git a/src/storm/generator/PrismNextStateGenerator.cpp b/src/storm/generator/PrismNextStateGenerator.cpp
index 6b80e8c52..bf9d73876 100644
--- a/src/storm/generator/PrismNextStateGenerator.cpp
+++ b/src/storm/generator/PrismNextStateGenerator.cpp
@@ -216,7 +216,6 @@ namespace storm {
             for (auto& choice : allLabeledChoices) {
                 allChoices.push_back(std::move(choice));
             }
-            
             std::size_t totalNumberOfChoices = allChoices.size();
             
             // If there is not a single choice, we return immediately, because the state has no behavior (other than
@@ -283,9 +282,9 @@ namespace storm {
             for (auto& choice : allChoices) {
                 result.addChoice(std::move(choice));
             }
-            
+
             this->postprocess(result);
-                        
+            
             return result;
         }
         
@@ -394,7 +393,7 @@ namespace storm {
                     if (!this->evaluator->asBool(command.getGuardExpression())) {
                         continue;
                     }
-                                        
+                    
                     result.push_back(Choice<ValueType>(command.getActionIndex(), command.isMarkovian()));
                     Choice<ValueType>& choice = result.back();
                     
diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
index e0cd132ee..b574cabd1 100644
--- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
+++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp
@@ -264,7 +264,7 @@ namespace storm {
                 storm::storage::SparseMatrix<ValueType> uniformizedMatrix = computeUniformizedMatrix(rateMatrix, storm::storage::BitVector(numberOfStates, true), uniformizationRate, exitRateVector);
                 
                 // Compute the total state reward vector.
-                std::vector<ValueType> totalRewardVector = rewardModel.getTotalRewardVector(rateMatrix, exitRateVector, false);
+                std::vector<ValueType> totalRewardVector = rewardModel.getTotalRewardVector(rateMatrix, exitRateVector);
                 
                 // Finally, compute the transient probabilities.
                 return computeTransientProbabilities<ValueType, true>(uniformizedMatrix, nullptr, timeBound, uniformizationRate, totalRewardVector, linearEquationSolverFactory);
@@ -359,7 +359,7 @@ namespace storm {
                 // Only compute the result if the model has a state-based reward model.
                 STORM_LOG_THROW(!rewardModel.empty(), storm::exceptions::InvalidPropertyException, "Missing reward model for formula. Skipping formula.");
 
-                return computeLongRunAverageRewards(probabilityMatrix, rewardModel.getTotalRewardVector(probabilityMatrix, *exitRateVector, true), exitRateVector, linearEquationSolverFactory);
+                return computeLongRunAverageRewards(probabilityMatrix, rewardModel.getTotalRewardVector(probabilityMatrix, *exitRateVector), exitRateVector, linearEquationSolverFactory);
             }
             
             template <typename ValueType>
diff --git a/src/storm/modelchecker/exploration/SparseExplorationModelChecker.cpp b/src/storm/modelchecker/exploration/SparseExplorationModelChecker.cpp
index de099d655..e13192034 100644
--- a/src/storm/modelchecker/exploration/SparseExplorationModelChecker.cpp
+++ b/src/storm/modelchecker/exploration/SparseExplorationModelChecker.cpp
@@ -440,8 +440,9 @@ namespace storm {
                 // duplicate work is the following. Optimally, we would only do the MEC decomposition, because we need
                 // it anyway. However, when only detecting (accepting) MECs, we do not infer which of the other states
                 // (not contained in MECs) also have probability 0/1.
-                statesWithProbability0 = storm::utility::graph::performProb0A(transposedMatrix, allStates, targetStates);
                 targetStates.set(sink, true);
+                statesWithProbability0 = storm::utility::graph::performProb0A(transposedMatrix, allStates, targetStates);
+                targetStates.set(sink, false);
                 statesWithProbability1 = storm::utility::graph::performProb1E(relevantStatesMatrix, relevantStatesMatrix.getRowGroupIndices(), transposedMatrix, allStates, targetStates);
                 
                 storm::storage::MaximalEndComponentDecomposition<ValueType> mecDecomposition(relevantStatesMatrix, relevantStatesMatrix.transpose(true));
@@ -449,7 +450,8 @@ namespace storm {
                 STORM_LOG_TRACE("Successfully computed MEC decomposition. Found " << (mecDecomposition.size() > 1 ? (mecDecomposition.size() - 1) : 0) << " MEC(s).");
                 
                 // If the decomposition contains only the MEC consisting of the sink state, we count it as 'failed'.
-                if (mecDecomposition.size() > 1) {
+                STORM_LOG_ASSERT(mecDecomposition.size() > 0, "Expected at least one MEC (the trivial sink MEC).");
+                if (mecDecomposition.size() == 1) {
                     ++stats.failedEcDetections;
                 } else {
                     stats.totalNumberOfEcDetected += mecDecomposition.size() - 1;
@@ -476,12 +478,23 @@ namespace storm {
             }
             
             // Set the bounds of the identified states.
+            STORM_LOG_ASSERT((statesWithProbability0 & statesWithProbability1).empty(), "States with probability 0 and 1 overlap.");
             for (auto state : statesWithProbability0) {
+                // Skip the sink state as it is not contained in the original system.
+                if (state == sink) {
+                    continue;
+                }
+                
                 StateType originalState = relevantStates[state];
                 bounds.setUpperBoundForState(originalState, explorationInformation, storm::utility::zero<ValueType>());
                 explorationInformation.addTerminalState(originalState);
             }
             for (auto state : statesWithProbability1) {
+                // Skip the sink state as it is not contained in the original system.
+                if (state == sink) {
+                    continue;
+                }
+
                 StateType originalState = relevantStates[state];
                 bounds.setLowerBoundForState(originalState, explorationInformation, storm::utility::one<ValueType>());
                 explorationInformation.addTerminalState(originalState);
diff --git a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
index 2b186fdca..93c1674b0 100644
--- a/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
+++ b/src/storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.cpp
@@ -394,10 +394,8 @@ namespace storm {
             MDPSparseModelCheckingHelperReturnType<ValueType> SparseMdpPrctlHelper<ValueType>::computeReachabilityRewardsHelper(storm::solver::SolveGoal const& goal, storm::storage::SparseMatrix<ValueType> const& transitionMatrix, storm::storage::SparseMatrix<ValueType> const& backwardTransitions, std::function<std::vector<ValueType>(uint_fast64_t, storm::storage::SparseMatrix<ValueType> const&, storm::storage::BitVector const&)> const& totalStateRewardVectorGetter, storm::storage::BitVector const& targetStates, bool qualitative, bool produceScheduler, storm::solver::MinMaxLinearEquationSolverFactory<ValueType> const& minMaxLinearEquationSolverFactory, ModelCheckerHint const& hint) {
                 
                 std::vector<ValueType> result(transitionMatrix.getRowGroupCount(), storm::utility::zero<ValueType>());
-
                 std::vector<uint_fast64_t> const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices();
                 
-                
                 // Determine which states have a reward that is infinity or less than infinity.
                 storm::storage::BitVector maybeStates, infinityStates;
                 if (hint.isExplicitModelCheckerHint() && hint.template asExplicitModelCheckerHint<ValueType>().getComputeOnlyMaybeStates()) {
@@ -503,7 +501,6 @@ namespace storm {
                 }
                 STORM_LOG_ASSERT((!produceScheduler && !scheduler) || (!scheduler->isPartialScheduler() && scheduler->isDeterministicScheduler() && scheduler->isMemorylessScheduler()), "Unexpected format of obtained scheduler.");
 
-                
                 return MDPSparseModelCheckingHelperReturnType<ValueType>(std::move(result), std::move(scheduler));
             }
             
diff --git a/src/storm/models/Model.h b/src/storm/models/Model.h
new file mode 100644
index 000000000..4f17fdf24
--- /dev/null
+++ b/src/storm/models/Model.h
@@ -0,0 +1,23 @@
+#pragma once
+
+#include "storm/models/ModelBase.h"
+
+namespace storm {
+    namespace models {
+        
+        template<typename ValueType>
+        class Model : public ModelBase {
+        public:
+            /*!
+             * Constructs a model of the given type.
+             *
+             * @param modelType The type of the model.
+             */
+            Model(ModelType const& modelType) : ModelBase(modelType) {
+                // Intentionally left empty.
+            }
+        };
+
+        
+    }
+}
diff --git a/src/storm/models/ModelBase.h b/src/storm/models/ModelBase.h
index cbef9a9dc..614ebdcd5 100644
--- a/src/storm/models/ModelBase.h
+++ b/src/storm/models/ModelBase.h
@@ -38,6 +38,17 @@ namespace storm {
                 return std::dynamic_pointer_cast<ModelType>(this->shared_from_this());
             }
             
+            /*!
+             * Casts the model into the model type given by the template parameter.
+             *
+             * @return A shared pointer of the requested type that points to the model if the cast succeeded and a null
+             * pointer otherwise.
+             */
+            template <typename ModelType>
+            std::shared_ptr<ModelType const> as() const {
+                return std::dynamic_pointer_cast<ModelType const>(this->shared_from_this());
+            }
+            
             /*!
              *	@brief Return the actual type of the model.
              *
@@ -111,6 +122,14 @@ namespace storm {
              */
             virtual bool isExact() const;
             
+            /*!
+             * Converts the transition rewards of all reward models to state-based rewards. For deterministic models,
+             * this reduces the rewards to state rewards only. For nondeterminstic models, the reward models will
+             * contain state rewards and state-action rewards. Note that this transformation does not preserve all
+             * properties, but it preserves expected rewards.
+             */
+            virtual void reduceToStateBasedRewards() = 0;
+            
         private:
             // The type of the model.
             ModelType modelType;
diff --git a/src/storm/models/sparse/Ctmc.cpp b/src/storm/models/sparse/Ctmc.cpp
index bdfb406bb..9b36ded21 100644
--- a/src/storm/models/sparse/Ctmc.cpp
+++ b/src/storm/models/sparse/Ctmc.cpp
@@ -72,6 +72,13 @@ namespace storm {
                 return exitRates;
             }
             
+            template<typename ValueType, typename RewardModelType>
+            void Ctmc<ValueType, RewardModelType>::reduceToStateBasedRewards() {
+                for (auto& rewardModel : this->getRewardModels()) {
+                    rewardModel.second.reduceToStateBasedRewards(this->getTransitionMatrix(), true, &exitRates);
+                }
+            }
+            
             template class Ctmc<double>;
 
 #ifdef STORM_HAVE_CARL
diff --git a/src/storm/models/sparse/Ctmc.h b/src/storm/models/sparse/Ctmc.h
index c6afaac16..ed5a46329 100644
--- a/src/storm/models/sparse/Ctmc.h
+++ b/src/storm/models/sparse/Ctmc.h
@@ -62,6 +62,8 @@ namespace storm {
                  */
                 std::vector<ValueType>& getExitRateVector();
 
+                virtual void reduceToStateBasedRewards() override;
+                
             private:
                 /*!
                  * Computes the exit rate vector based on the given rate matrix.
diff --git a/src/storm/models/sparse/DeterministicModel.cpp b/src/storm/models/sparse/DeterministicModel.cpp
index 27c923bfa..5e2b85fe4 100644
--- a/src/storm/models/sparse/DeterministicModel.cpp
+++ b/src/storm/models/sparse/DeterministicModel.cpp
@@ -59,13 +59,6 @@ namespace storm {
                 }
             }
             
-            template<typename ValueType, typename RewardModelType>
-            void DeterministicModel<ValueType, RewardModelType>::reduceToStateBasedRewards() {
-                for (auto& rewardModel : this->getRewardModels()) {
-                    rewardModel.second.reduceToStateBasedRewards(this->getTransitionMatrix(), true);
-                }
-            }
-            
             template class DeterministicModel<double>;
 #ifdef STORM_HAVE_CARL
             template class DeterministicModel<storm::RationalNumber>;
diff --git a/src/storm/models/sparse/DeterministicModel.h b/src/storm/models/sparse/DeterministicModel.h
index f2dd82e12..13486ef5c 100644
--- a/src/storm/models/sparse/DeterministicModel.h
+++ b/src/storm/models/sparse/DeterministicModel.h
@@ -29,8 +29,6 @@ namespace storm {
 
                 DeterministicModel(DeterministicModel<ValueType, RewardModelType>&& other) = default;
                 DeterministicModel<ValueType, RewardModelType>& operator=(DeterministicModel<ValueType, RewardModelType>&& model) = default;
-
-                virtual void reduceToStateBasedRewards() override;
                 
                 virtual void writeDotToStream(std::ostream& outStream, bool includeLabeling = true, storm::storage::BitVector const* subsystem = nullptr, std::vector<ValueType> const* firstValue = nullptr, std::vector<ValueType> const* secondValue = nullptr, std::vector<uint_fast64_t> const* stateColoring = nullptr, std::vector<std::string> const* colors = nullptr, std::vector<uint_fast64_t>* scheduler = nullptr, bool finalizeOutput = true) const override;
             };
diff --git a/src/storm/models/sparse/Dtmc.cpp b/src/storm/models/sparse/Dtmc.cpp
index 2fa945420..29b31babe 100644
--- a/src/storm/models/sparse/Dtmc.cpp
+++ b/src/storm/models/sparse/Dtmc.cpp
@@ -35,6 +35,12 @@ namespace storm {
                 // Intentionally left empty
             }
    
+            template<typename ValueType, typename RewardModelType>
+            void Dtmc<ValueType, RewardModelType>::reduceToStateBasedRewards() {
+                for (auto& rewardModel : this->getRewardModels()) {
+                    rewardModel.second.reduceToStateBasedRewards(this->getTransitionMatrix(), true);
+                }
+            }
             
             template class Dtmc<double>;
 
diff --git a/src/storm/models/sparse/Dtmc.h b/src/storm/models/sparse/Dtmc.h
index ecc3ecba8..6809b686c 100644
--- a/src/storm/models/sparse/Dtmc.h
+++ b/src/storm/models/sparse/Dtmc.h
@@ -50,6 +50,7 @@ namespace storm {
                 Dtmc(Dtmc<ValueType, RewardModelType>&& dtmc) = default;
                 Dtmc& operator=(Dtmc<ValueType, RewardModelType>&& dtmc) = default;
 
+                virtual void reduceToStateBasedRewards() override;
 
             };
             
diff --git a/src/storm/models/sparse/Mdp.cpp b/src/storm/models/sparse/Mdp.cpp
index 38226702d..081940600 100644
--- a/src/storm/models/sparse/Mdp.cpp
+++ b/src/storm/models/sparse/Mdp.cpp
@@ -37,27 +37,7 @@ namespace storm {
                 // Intentionally left empty
             }
    
-            template <typename ValueType, typename RewardModelType>
-            Mdp<ValueType, RewardModelType> Mdp<ValueType, RewardModelType>::restrictChoices(storm::storage::BitVector const& enabledChoices) const {
-                storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(this->getTransitionMatrix().restrictRows(enabledChoices));
-                newComponents.stateLabeling = this->getStateLabeling();
-                for (auto const& rewardModel : this->getRewardModels()) {
-                    newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledChoices));
-                }
-                if (this->hasChoiceLabeling()) {
-                    newComponents.choiceLabeling = this->getChoiceLabeling().getSubLabeling(enabledChoices);
-                }
-                newComponents.stateValuations = this->getOptionalStateValuations();
-                if (this->hasChoiceOrigins()) {
-                    newComponents.choiceOrigins = this->getChoiceOrigins()->selectChoices(enabledChoices);
-                }
-                return Mdp<ValueType, RewardModelType>(std::move(newComponents));
-            }
 
-            template<typename ValueType, typename RewardModelType>
-            uint_least64_t Mdp<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const {
-                return this->getNondeterministicChoiceIndices()[stateactPair.getState()]+stateactPair.getAction();
-            }
 
             template class Mdp<double>;
 
diff --git a/src/storm/models/sparse/Mdp.h b/src/storm/models/sparse/Mdp.h
index e65770275..9e5ea7509 100644
--- a/src/storm/models/sparse/Mdp.h
+++ b/src/storm/models/sparse/Mdp.h
@@ -1,9 +1,7 @@
 #ifndef STORM_MODELS_SPARSE_MDP_H_
 #define STORM_MODELS_SPARSE_MDP_H_
 
-#include "storm/storage/StateActionPair.h"
 #include "storm/models/sparse/NondeterministicModel.h"
-#include "storm/utility/OsDetection.h"
 
 namespace storm {
     namespace models {
@@ -50,19 +48,6 @@ namespace storm {
                 
                 Mdp(Mdp<ValueType, RewardModelType>&& other) = default;
                 Mdp& operator=(Mdp<ValueType, RewardModelType>&& other) = default;
-
-                /*!
-                 * Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
-                 * 
-                 * @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
-                 * @return A subMDP.
-                 */
-                Mdp<ValueType, RewardModelType> restrictChoices(storm::storage::BitVector const& enabledActions) const;
-
-                /*!
-                 *  For a state/action pair, get the choice index referring to the state-action pair.
-                 */
-                uint_fast64_t getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const;
             };
             
         } // namespace sparse
diff --git a/src/storm/models/sparse/Model.cpp b/src/storm/models/sparse/Model.cpp
index a1ca205a7..062a7fca6 100644
--- a/src/storm/models/sparse/Model.cpp
+++ b/src/storm/models/sparse/Model.cpp
@@ -14,17 +14,17 @@
 namespace storm {
     namespace models {
         namespace sparse {
-            
+
             template <typename ValueType, typename RewardModelType>
             Model<ValueType, RewardModelType>::Model(ModelType modelType, storm::storage::sparse::ModelComponents<ValueType, RewardModelType> const& components)
-                    : ModelBase(modelType), transitionMatrix(components.transitionMatrix), stateLabeling(components.stateLabeling), rewardModels(components.rewardModels),
+            : storm::models::Model<ValueType>(modelType), transitionMatrix(components.transitionMatrix), stateLabeling(components.stateLabeling), rewardModels(components.rewardModels),
                       choiceLabeling(components.choiceLabeling), stateValuations(components.stateValuations), choiceOrigins(components.choiceOrigins) {
                 assertValidityOfComponents(components);
             }
             
             template <typename ValueType, typename RewardModelType>
             Model<ValueType, RewardModelType>::Model(ModelType modelType, storm::storage::sparse::ModelComponents<ValueType, RewardModelType>&& components)
-                    : ModelBase(modelType), transitionMatrix(std::move(components.transitionMatrix)), stateLabeling(std::move(components.stateLabeling)), rewardModels(std::move(components.rewardModels)),
+            : storm::models::Model<ValueType>(modelType), transitionMatrix(std::move(components.transitionMatrix)), stateLabeling(std::move(components.stateLabeling)), rewardModels(std::move(components.rewardModels)),
                       choiceLabeling(std::move(components.choiceLabeling)), stateValuations(std::move(components.stateValuations)), choiceOrigins(std::move(components.choiceOrigins)) {
                 assertValidityOfComponents(components);
             }
diff --git a/src/storm/models/sparse/Model.h b/src/storm/models/sparse/Model.h
index 2659d5a46..5e52a9e12 100644
--- a/src/storm/models/sparse/Model.h
+++ b/src/storm/models/sparse/Model.h
@@ -5,7 +5,7 @@
 #include <unordered_map>
 #include <boost/optional.hpp>
 
-#include "storm/models/ModelBase.h"
+#include "storm/models/Model.h"
 #include "storm/models/sparse/StateLabeling.h"
 #include "storm/models/sparse/ChoiceLabeling.h"
 #include "storm/storage/sparse/ModelComponents.h"
@@ -30,8 +30,7 @@ namespace storm {
              * Base class for all sparse models.
              */
             template<class CValueType, class CRewardModelType = StandardRewardModel<CValueType>>
-            class Model : public storm::models::ModelBase {
-            
+            class Model : public storm::models::Model<CValueType> {
             public:
                 typedef CValueType ValueType;
                 typedef CRewardModelType RewardModelType;
@@ -293,16 +292,7 @@ namespace storm {
                  * @return The choice origins, if they're saved.
                  */
                 boost::optional<std::shared_ptr<storm::storage::sparse::ChoiceOrigins>>&  getOptionalChoiceOrigins();
-                
-                
-                /*!
-                 * Converts the transition rewards of all reward models to state-based rewards. For deterministic models,
-                 * this reduces the rewards to state rewards only. For nondeterminstic models, the reward models will
-                 * contain state rewards and state-action rewards. Note that this transformation does not preserve all
-                 * properties, but it preserves expected rewards.
-                 */
-                virtual void reduceToStateBasedRewards() = 0;
-                                
+                                                
                 /*!
                  * Prints information about the model to the specified stream.
                  *
diff --git a/src/storm/models/sparse/NondeterministicModel.cpp b/src/storm/models/sparse/NondeterministicModel.cpp
index 3382bfd95..acdbd3885 100644
--- a/src/storm/models/sparse/NondeterministicModel.cpp
+++ b/src/storm/models/sparse/NondeterministicModel.cpp
@@ -185,7 +185,12 @@ namespace storm {
                     outStream << "}" << std::endl;
                 }
             }
-            
+
+            template<typename ValueType, typename RewardModelType>
+            uint_least64_t NondeterministicModel<ValueType, RewardModelType>::getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const {
+                return this->getNondeterministicChoiceIndices()[stateactPair.getState()]+stateactPair.getAction();
+            }
+
             template class NondeterministicModel<double>;
 
 #ifdef STORM_HAVE_CARL
diff --git a/src/storm/models/sparse/NondeterministicModel.h b/src/storm/models/sparse/NondeterministicModel.h
index ab0a21a04..dd1335718 100644
--- a/src/storm/models/sparse/NondeterministicModel.h
+++ b/src/storm/models/sparse/NondeterministicModel.h
@@ -2,7 +2,7 @@
 #define STORM_MODELS_SPARSE_NONDETERMINISTICMODEL_H_
 
 #include "storm/models/sparse/Model.h"
-#include "storm/utility/OsDetection.h"
+#include "storm/storage/StateActionPair.h"
 
 namespace storm {
     
@@ -54,7 +54,11 @@ namespace storm {
                 uint_fast64_t getNumberOfChoices(uint_fast64_t state) const;
                 
                 virtual void reduceToStateBasedRewards() override;
-                
+
+                /*!
+                 *  For a state/action pair, get the choice index referring to the state-action pair.
+                 */
+                uint_fast64_t getChoiceIndex(storm::storage::StateActionPair const& stateactPair) const;
                 /*!
                  * Applies the given scheduler to this model.
                  * @param scheduler the considered scheduler.
diff --git a/src/storm/models/sparse/StandardRewardModel.cpp b/src/storm/models/sparse/StandardRewardModel.cpp
index 721608047..4385c46e1 100644
--- a/src/storm/models/sparse/StandardRewardModel.cpp
+++ b/src/storm/models/sparse/StandardRewardModel.cpp
@@ -165,7 +165,7 @@ namespace storm {
             
             template<typename ValueType>
             template<typename MatrixValueType>
-            void StandardRewardModel<ValueType>::reduceToStateBasedRewards(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, bool reduceToStateRewards) {
+            void StandardRewardModel<ValueType>::reduceToStateBasedRewards(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, bool reduceToStateRewards, std::vector<MatrixValueType> const* weights) {
                 if (this->hasTransitionRewards()) {
                     if (this->hasStateActionRewards()) {
                         storm::utility::vector::addVectors<ValueType>(this->getStateActionRewardVector(), transitionMatrix.getPointwiseProductRowSumVector(this->getTransitionRewardMatrix()), this->getStateActionRewardVector());
@@ -177,10 +177,21 @@ namespace storm {
                 
                 if (reduceToStateRewards && this->hasStateActionRewards()) {
                     STORM_LOG_THROW(transitionMatrix.getRowGroupCount() == this->getStateActionRewardVector().size(), storm::exceptions::InvalidOperationException, "The reduction to state rewards is only possible if the size of the action reward vector equals the number of states.");
-                    if (this->hasStateRewards()) {
-                        storm::utility::vector::addVectors<ValueType>(this->getStateActionRewardVector(), this->getStateRewardVector(), this->getStateRewardVector());
+                    if (weights) {
+                        if (this->hasStateRewards()) {
+                            storm::utility::vector::applyPointwise<ValueType, MatrixValueType, ValueType>(this->getStateActionRewardVector(), *weights, this->getStateRewardVector(),
+                                                                   [] (ValueType const& sar, MatrixValueType const& w, ValueType const& sr) -> ValueType {
+                                                                       return sr + w * sar; });
+                        } else {
+                            this->optionalStateRewardVector = std::move(this->optionalStateActionRewardVector);
+                            storm::utility::vector::applyPointwise<ValueType, MatrixValueType, ValueType>(this->optionalStateRewardVector.get(), *weights, this->optionalStateRewardVector.get(), [] (ValueType const& r, MatrixValueType const& w) { return w * r; } );
+                        }
                     } else {
-                        this->optionalStateRewardVector = std::move(this->optionalStateActionRewardVector);
+                        if (this->hasStateRewards()) {
+                            storm::utility::vector::addVectors<ValueType>(this->getStateActionRewardVector(), this->getStateRewardVector(), this->getStateRewardVector());
+                        } else {
+                            this->optionalStateRewardVector = std::move(this->optionalStateActionRewardVector);
+                        }
                     }
                     this->optionalStateActionRewardVector = boost::none;
                 }
@@ -201,7 +212,7 @@ namespace storm {
             
             template<typename ValueType>
             template<typename MatrixValueType>
-            std::vector<ValueType> StandardRewardModel<ValueType>::getTotalRewardVector(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::vector<MatrixValueType> const& weights, bool scaleTransAndActions) const {
+            std::vector<ValueType> StandardRewardModel<ValueType>::getTotalRewardVector(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::vector<MatrixValueType> const& weights) const {
                 std::vector<ValueType> result;
                 if (this->hasTransitionRewards()) {
                     result = transitionMatrix.getPointwiseProductRowSumVector(this->getTransitionRewardMatrix());
@@ -382,13 +393,13 @@ namespace storm {
             // Explicitly instantiate the class.
             template std::vector<double> StandardRewardModel<double>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
             template std::vector<double> StandardRewardModel<double>::getTotalRewardVector(uint_fast64_t numberOfRows, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::BitVector const& filter) const;
-            template std::vector<double> StandardRewardModel<double>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& weights, bool scaleTransAndActions) const;
+            template std::vector<double> StandardRewardModel<double>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& weights) const;
             template std::vector<double> StandardRewardModel<double>::getTotalActionRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix,  std::vector<double> const& stateRewardWeights) const;
             template storm::storage::BitVector StandardRewardModel<double>::getStatesWithZeroReward(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
             template storm::storage::BitVector StandardRewardModel<double>::getChoicesWithZeroReward(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
             template double StandardRewardModel<double>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<double> const& transitionMatrix, double const& stateRewardWeight, double const& actionRewardWeight) const;
 
-            template void StandardRewardModel<double>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards);
+            template void StandardRewardModel<double>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards, std::vector<double> const* weights);
             template void StandardRewardModel<double>::setStateActionReward(uint_fast64_t choiceIndex, double const & newValue);
             template void StandardRewardModel<double>::setStateReward(uint_fast64_t state, double const & newValue);
             template class StandardRewardModel<double>;
@@ -396,9 +407,9 @@ namespace storm {
             
             template std::vector<float> StandardRewardModel<float>::getTotalRewardVector(uint_fast64_t numberOfRows, storm::storage::SparseMatrix<float> const& transitionMatrix, storm::storage::BitVector const& filter) const;
             template std::vector<float> StandardRewardModel<float>::getTotalRewardVector(storm::storage::SparseMatrix<float> const& transitionMatrix) const;
-            template std::vector<float> StandardRewardModel<float>::getTotalRewardVector(storm::storage::SparseMatrix<float> const& transitionMatrix, std::vector<float> const& weights, bool scaleTransAndActions) const;
+            template std::vector<float> StandardRewardModel<float>::getTotalRewardVector(storm::storage::SparseMatrix<float> const& transitionMatrix, std::vector<float> const& weights) const;
             template std::vector<float> StandardRewardModel<float>::getTotalActionRewardVector(storm::storage::SparseMatrix<float> const& transitionMatrix,  std::vector<float> const& stateRewardWeights) const;
-            template void StandardRewardModel<float>::reduceToStateBasedRewards(storm::storage::SparseMatrix<float> const& transitionMatrix, bool reduceToStateRewards);
+            template void StandardRewardModel<float>::reduceToStateBasedRewards(storm::storage::SparseMatrix<float> const& transitionMatrix, bool reduceToStateRewards, std::vector<float> const* weights);
             template void StandardRewardModel<float>::setStateActionReward(uint_fast64_t choiceIndex, float const & newValue);
             template void StandardRewardModel<float>::setStateReward(uint_fast64_t state, float const & newValue);
             template class StandardRewardModel<float>;
@@ -407,12 +418,12 @@ namespace storm {
 #ifdef STORM_HAVE_CARL
             template std::vector<storm::RationalNumber> StandardRewardModel<storm::RationalNumber>::getTotalRewardVector(uint_fast64_t numberOfRows, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::storage::BitVector const& filter) const;
             template std::vector<storm::RationalNumber> StandardRewardModel<storm::RationalNumber>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
-            template std::vector<storm::RationalNumber> StandardRewardModel<storm::RationalNumber>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& weights, bool scaleTransAndActions) const;
+            template std::vector<storm::RationalNumber> StandardRewardModel<storm::RationalNumber>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, std::vector<storm::RationalNumber> const& weights) const;
             template std::vector<storm::RationalNumber> StandardRewardModel<storm::RationalNumber>::getTotalActionRewardVector(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix,  std::vector<storm::RationalNumber> const& stateRewardWeights) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getStatesWithZeroReward(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalNumber>::getChoicesWithZeroReward(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix) const;
             template storm::RationalNumber StandardRewardModel<storm::RationalNumber>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, storm::RationalNumber const& stateRewardWeight, storm::RationalNumber const& actionRewardWeight) const;
-            template void StandardRewardModel<storm::RationalNumber>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, bool reduceToStateRewards);
+            template void StandardRewardModel<storm::RationalNumber>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalNumber> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalNumber> const* weights);
             template void StandardRewardModel<storm::RationalNumber>::setStateActionReward(uint_fast64_t choiceIndex, storm::RationalNumber const & newValue);
             template void StandardRewardModel<storm::RationalNumber>::setStateReward(uint_fast64_t state, storm::RationalNumber const & newValue);
             template class StandardRewardModel<storm::RationalNumber>;
@@ -420,13 +431,13 @@ namespace storm {
 
             template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalRewardVector(uint_fast64_t numberOfRows, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, storm::storage::BitVector const& filter) const;
             template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix) const;
-            template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, std::vector<storm::RationalFunction> const& weights, bool scaleTransAndActions) const;
+            template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, std::vector<storm::RationalFunction> const& weights) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalFunction>::getStatesWithZeroReward(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix) const;
             template storm::storage::BitVector StandardRewardModel<storm::RationalFunction>::getChoicesWithZeroReward(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix) const;
 
             template std::vector<storm::RationalFunction> StandardRewardModel<storm::RationalFunction>::getTotalActionRewardVector(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix,  std::vector<storm::RationalFunction> const& stateRewardWeights) const;
             template storm::RationalFunction StandardRewardModel<storm::RationalFunction>::getTotalStateActionReward(uint_fast64_t stateIndex, uint_fast64_t choiceIndex, storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, storm::RationalFunction const& stateRewardWeight, storm::RationalFunction const& actionRewardWeight) const;
-            template void StandardRewardModel<storm::RationalFunction>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, bool reduceToStateRewards);
+            template void StandardRewardModel<storm::RationalFunction>::reduceToStateBasedRewards(storm::storage::SparseMatrix<storm::RationalFunction> const& transitionMatrix, bool reduceToStateRewards, std::vector<storm::RationalFunction> const* weights);
             template void StandardRewardModel<storm::RationalFunction>::setStateActionReward(uint_fast64_t choiceIndex, storm::RationalFunction const & newValue);
             template void StandardRewardModel<storm::RationalFunction>::setStateReward(uint_fast64_t state, storm::RationalFunction const & newValue);
             template class StandardRewardModel<storm::RationalFunction>;
@@ -434,13 +445,13 @@ namespace storm {
 
             template std::vector<storm::Interval> StandardRewardModel<storm::Interval>::getTotalRewardVector(uint_fast64_t numberOfRows, storm::storage::SparseMatrix<double> const& transitionMatrix, storm::storage::BitVector const& filter) const;
             template std::vector<storm::Interval> StandardRewardModel<storm::Interval>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix) const;
-            template std::vector<storm::Interval> StandardRewardModel<storm::Interval>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& weights, bool scaleTransAndActions) const;
+            template std::vector<storm::Interval> StandardRewardModel<storm::Interval>::getTotalRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix, std::vector<double> const& weights) const;
             template std::vector<storm::Interval> StandardRewardModel<storm::Interval>::getTotalActionRewardVector(storm::storage::SparseMatrix<double> const& transitionMatrix,  std::vector<double> const& stateRewardWeights) const;
             template void StandardRewardModel<storm::Interval>::setStateActionReward(uint_fast64_t choiceIndex, double const & newValue);
             template void StandardRewardModel<storm::Interval>::setStateActionReward(uint_fast64_t choiceIndex, storm::Interval const & newValue);
             template void StandardRewardModel<storm::Interval>::setStateReward(uint_fast64_t state, double const & newValue);
             template void StandardRewardModel<storm::Interval>::setStateReward(uint_fast64_t state, storm::Interval const & newValue);
-            template void StandardRewardModel<storm::Interval>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards);
+            template void StandardRewardModel<storm::Interval>::reduceToStateBasedRewards(storm::storage::SparseMatrix<double> const& transitionMatrix, bool reduceToStateRewards, std::vector<double> const* weights);
             template class StandardRewardModel<storm::Interval>;
             template std::ostream& operator<<<storm::Interval>(std::ostream& out, StandardRewardModel<storm::Interval> const& rewardModel);
 #endif
diff --git a/src/storm/models/sparse/StandardRewardModel.h b/src/storm/models/sparse/StandardRewardModel.h
index c58f38312..33303f380 100644
--- a/src/storm/models/sparse/StandardRewardModel.h
+++ b/src/storm/models/sparse/StandardRewardModel.h
@@ -23,9 +23,9 @@ namespace storm {
                  * @param optionalStateActionRewardVector The reward values associated with state-action pairs.
                  * @param optionalTransitionRewardMatrix The reward values associated with the transitions of the model.
                  */
-                StandardRewardModel(boost::optional<std::vector<ValueType>> const& optionalStateRewardVector = boost::optional<std::vector<ValueType>>(),
-                                    boost::optional<std::vector<ValueType>> const& optionalStateActionRewardVector = boost::optional<std::vector<ValueType>>(),
-                                    boost::optional<storm::storage::SparseMatrix<ValueType>> const& optionalTransitionRewardMatrix = boost::optional<storm::storage::SparseMatrix<ValueType>>());
+                StandardRewardModel(boost::optional<std::vector<ValueType>> const& optionalStateRewardVector = boost::none,
+                                    boost::optional<std::vector<ValueType>> const& optionalStateActionRewardVector = boost::none,
+                                    boost::optional<storm::storage::SparseMatrix<ValueType>> const& optionalTransitionRewardMatrix = boost::none);
                 
                 /*!
                  * Constructs a reward model by moving the given data.
@@ -34,9 +34,9 @@ namespace storm {
                  * @param optionalStateActionRewardVector The reward values associated with state-action pairs.
                  * @param optionalTransitionRewardMatrix The reward values associated with the transitions of the model.
                  */
-                StandardRewardModel(boost::optional<std::vector<ValueType>>&& optionalStateRewardVector = boost::optional<std::vector<ValueType>>(),
-                            boost::optional<std::vector<ValueType>>&& optionalStateActionRewardVector = boost::optional<std::vector<ValueType>>(),
-                            boost::optional<storm::storage::SparseMatrix<ValueType>>&& optionalTransitionRewardMatrix = boost::optional<storm::storage::SparseMatrix<ValueType>>());
+                StandardRewardModel(boost::optional<std::vector<ValueType>>&& optionalStateRewardVector = boost::none,
+                                    boost::optional<std::vector<ValueType>>&& optionalStateActionRewardVector = boost::none,
+                                    boost::optional<storm::storage::SparseMatrix<ValueType>>&& optionalTransitionRewardMatrix = boost::none);
                 
                 StandardRewardModel(StandardRewardModel<ValueType> const& dtmc) = default;
                 StandardRewardModel& operator=(StandardRewardModel<ValueType> const& dtmc) = default;
@@ -191,9 +191,13 @@ namespace storm {
                  * but not all reward-based properties.
                  *
                  * @param transitionMatrix The transition matrix that is used to weight the rewards in the reward matrix.
+                 * @param reduceToStateRewards If set, the state-action rewards and the state rewards are summed so the
+                 * model only has a state reward vector left.
+                 * @param weights If given and if the reduction to state rewards only is enabled, this vector is used to
+                 * weight the state-action and transition rewards
                  */
                 template<typename MatrixValueType>
-                void reduceToStateBasedRewards(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, bool reduceToStateRewards = false);
+                void reduceToStateBasedRewards(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, bool reduceToStateRewards = false, std::vector<MatrixValueType> const* weights = nullptr);
                 
                 /*!
                  * Creates a vector representing the complete reward vector based on the state-, state-action- and
@@ -211,12 +215,10 @@ namespace storm {
                  *
                  * @param transitionMatrix The matrix that is used to weight the values of the transition reward matrix.
                  * @param weights A vector used for scaling the entries of transition and/or state-action rewards (if present).
-                 * @param scaleTransAndActions If true both transition rewards and state-action rewards are scaled by the
-                 * weights. Otherwise, only the state-action rewards are scaled.
                  * @return The full state-action reward vector.
                  */
                 template<typename MatrixValueType>
-                std::vector<ValueType> getTotalRewardVector(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::vector<MatrixValueType> const& weights, bool scaleTransAndActions) const;
+                std::vector<ValueType> getTotalRewardVector(storm::storage::SparseMatrix<MatrixValueType> const& transitionMatrix, std::vector<MatrixValueType> const& weights) const;
                 
                 /*!
                  * Creates a vector representing the complete reward vector based on the state-, state-action- and
diff --git a/src/storm/models/symbolic/Ctmc.cpp b/src/storm/models/symbolic/Ctmc.cpp
index 409a1fcd2..e94079efc 100644
--- a/src/storm/models/symbolic/Ctmc.cpp
+++ b/src/storm/models/symbolic/Ctmc.cpp
@@ -21,11 +21,10 @@ namespace storm {
                                         std::set<storm::expressions::Variable> const& rowVariables,
                                         std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                         std::set<storm::expressions::Variable> const& columnVariables,
-                                        std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                         std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                         std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                         std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
                 // Intentionally left empty.
             }
 
@@ -39,11 +38,41 @@ namespace storm {
                                         std::set<storm::expressions::Variable> const& rowVariables,
                                         std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                         std::set<storm::expressions::Variable> const& columnVariables,
-                                        std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                         std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                         std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                         std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels), exitRates(exitRateVector) {
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels), exitRates(exitRateVector) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            Ctmc<Type, ValueType>::Ctmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                        storm::dd::Bdd<Type> reachableStates,
+                                        storm::dd::Bdd<Type> initialStates,
+                                        storm::dd::Bdd<Type> deadlockStates,
+                                        storm::dd::Add<Type, ValueType> transitionMatrix,
+                                        std::set<storm::expressions::Variable> const& rowVariables,
+                                        std::set<storm::expressions::Variable> const& columnVariables,
+                                        std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                        std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                        std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, labelToBddMap, rewardModels) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            Ctmc<Type, ValueType>::Ctmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                        storm::dd::Bdd<Type> reachableStates,
+                                        storm::dd::Bdd<Type> initialStates,
+                                        storm::dd::Bdd<Type> deadlockStates,
+                                        storm::dd::Add<Type, ValueType> transitionMatrix,
+                                        boost::optional<storm::dd::Add<Type, ValueType>> exitRateVector,
+                                        std::set<storm::expressions::Variable> const& rowVariables,
+                                        std::set<storm::expressions::Variable> const& columnVariables,
+                                        std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                        std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                        std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Ctmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, labelToBddMap, rewardModels), exitRates(exitRateVector) {
                 // Intentionally left empty.
             }
 
diff --git a/src/storm/models/symbolic/Ctmc.h b/src/storm/models/symbolic/Ctmc.h
index c0c4054a3..68c4e7b35 100644
--- a/src/storm/models/symbolic/Ctmc.h
+++ b/src/storm/models/symbolic/Ctmc.h
@@ -36,8 +36,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
                  * @param rewardModels The reward models associated with the model.
@@ -50,7 +48,6 @@ namespace storm {
                      std::set<storm::expressions::Variable> const& rowVariables,
                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                      std::set<storm::expressions::Variable> const& columnVariables,
-                     std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                      std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                      std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
@@ -68,8 +65,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
                  * @param rewardModels The reward models associated with the model.
@@ -83,11 +78,62 @@ namespace storm {
                      std::set<storm::expressions::Variable> const& rowVariables,
                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                      std::set<storm::expressions::Variable> const& columnVariables,
-                     std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                      std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                      std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
 
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                Ctmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                     storm::dd::Bdd<Type> reachableStates,
+                     storm::dd::Bdd<Type> initialStates,
+                     storm::dd::Bdd<Type> deadlockStates,
+                     storm::dd::Add<Type, ValueType> transitionMatrix,
+                     std::set<storm::expressions::Variable> const& rowVariables,
+                     std::set<storm::expressions::Variable> const& columnVariables,
+                     std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                     std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                     std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param exitRateVector The vector specifying the exit rates for the states.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                Ctmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                     storm::dd::Bdd<Type> reachableStates,
+                     storm::dd::Bdd<Type> initialStates,
+                     storm::dd::Bdd<Type> deadlockStates,
+                     storm::dd::Add<Type, ValueType> transitionMatrix,
+                     boost::optional<storm::dd::Add<Type, ValueType>> exitRateVector,
+                     std::set<storm::expressions::Variable> const& rowVariables,
+                     std::set<storm::expressions::Variable> const& columnVariables,
+                     std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                     std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                     std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
                 /*!
                  * Retrieves the exit rate vector of the CTMC.
                  *
diff --git a/src/storm/models/symbolic/DeterministicModel.cpp b/src/storm/models/symbolic/DeterministicModel.cpp
index 4a2f629f8..5eba1bbc9 100644
--- a/src/storm/models/symbolic/DeterministicModel.cpp
+++ b/src/storm/models/symbolic/DeterministicModel.cpp
@@ -22,14 +22,36 @@ namespace storm {
                                                                     std::set<storm::expressions::Variable> const& rowVariables,
                                                                     std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                                                     std::set<storm::expressions::Variable> const& columnVariables,
-                                                                    std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                                                     std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                                                     std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                                                     std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
+            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
                 // Intentionally left empty.
             }
             
+            template<storm::dd::DdType Type, typename ValueType>
+            DeterministicModel<Type, ValueType>::DeterministicModel(storm::models::ModelType const& modelType,
+                                                                    std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                                                    storm::dd::Bdd<Type> reachableStates,
+                                                                    storm::dd::Bdd<Type> initialStates,
+                                                                    storm::dd::Bdd<Type> deadlockStates,
+                                                                    storm::dd::Add<Type, ValueType> transitionMatrix,
+                                                                    std::set<storm::expressions::Variable> const& rowVariables,
+                                                                    std::set<storm::expressions::Variable> const& columnVariables,
+                                                                    std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                                                    std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                                                    std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, labelToBddMap, rewardModels) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            void DeterministicModel<Type, ValueType>::reduceToStateBasedRewards() {
+                for (auto& rewardModel : this->getRewardModels()) {
+                    rewardModel.second.reduceToStateBasedRewards(this->getTransitionMatrix(), this->getRowVariables(), this->getColumnVariables(), true);
+                }
+            }
+            
             // Explicitly instantiate the template class.
             template class DeterministicModel<storm::dd::DdType::CUDD>;
             template class DeterministicModel<storm::dd::DdType::Sylvan>;
diff --git a/src/storm/models/symbolic/DeterministicModel.h b/src/storm/models/symbolic/DeterministicModel.h
index af8bc1fc1..a8fde85b4 100644
--- a/src/storm/models/symbolic/DeterministicModel.h
+++ b/src/storm/models/symbolic/DeterministicModel.h
@@ -37,8 +37,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
                  * @param rewardModels The reward models associated with the model.
@@ -52,10 +50,38 @@ namespace storm {
                                    std::set<storm::expressions::Variable> const& rowVariables,
                                    std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                    std::set<storm::expressions::Variable> const& columnVariables,
-                                   std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                    std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                    std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                                    std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param modelType The type of the model.
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                DeterministicModel(storm::models::ModelType const& modelType,
+                                   std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                   storm::dd::Bdd<Type> reachableStates,
+                                   storm::dd::Bdd<Type> initialStates,
+                                   storm::dd::Bdd<Type> deadlockStates,
+                                   storm::dd::Add<Type, ValueType> transitionMatrix,
+                                   std::set<storm::expressions::Variable> const& rowVariables,
+                                   std::set<storm::expressions::Variable> const& columnVariables,
+                                   std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                   std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                                   std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
+                virtual void reduceToStateBasedRewards() override;
             };
             
         } // namespace symbolic
diff --git a/src/storm/models/symbolic/Dtmc.cpp b/src/storm/models/symbolic/Dtmc.cpp
index 08ade71f9..fdd478b71 100644
--- a/src/storm/models/symbolic/Dtmc.cpp
+++ b/src/storm/models/symbolic/Dtmc.cpp
@@ -21,11 +21,25 @@ namespace storm {
                                         std::set<storm::expressions::Variable> const& rowVariables,
                                         std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                         std::set<storm::expressions::Variable> const& columnVariables,
-                                        std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                         std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                         std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                         std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Dtmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Dtmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            Dtmc<Type, ValueType>::Dtmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                        storm::dd::Bdd<Type> reachableStates,
+                                        storm::dd::Bdd<Type> initialStates,
+                                        storm::dd::Bdd<Type> deadlockStates,
+                                        storm::dd::Add<Type, ValueType> transitionMatrix,
+                                        std::set<storm::expressions::Variable> const& rowVariables,
+                                        std::set<storm::expressions::Variable> const& columnVariables,
+                                        std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                        std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                        std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : DeterministicModel<Type, ValueType>(storm::models::ModelType::Dtmc, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, labelToBddMap, rewardModels) {
                 // Intentionally left empty.
             }
             
diff --git a/src/storm/models/symbolic/Dtmc.h b/src/storm/models/symbolic/Dtmc.h
index 538ff4977..ebe837e1c 100644
--- a/src/storm/models/symbolic/Dtmc.h
+++ b/src/storm/models/symbolic/Dtmc.h
@@ -36,8 +36,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
                  * @param rewardModels The reward models associated with the model.
@@ -50,10 +48,34 @@ namespace storm {
                      std::set<storm::expressions::Variable> const& rowVariables,
                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                      std::set<storm::expressions::Variable> const& columnVariables,
-                     std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                      std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                      std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                Dtmc(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                     storm::dd::Bdd<Type> reachableStates,
+                     storm::dd::Bdd<Type> initialStates,
+                     storm::dd::Bdd<Type> deadlockStates,
+                     storm::dd::Add<Type, ValueType> transitionMatrix,
+                     std::set<storm::expressions::Variable> const& rowVariables,
+                     std::set<storm::expressions::Variable> const& columnVariables,
+                     std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                     std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                     std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
             };
             
         } // namespace symbolic
diff --git a/src/storm/models/symbolic/Mdp.cpp b/src/storm/models/symbolic/Mdp.cpp
index d9df2263b..ee72d085e 100644
--- a/src/storm/models/symbolic/Mdp.cpp
+++ b/src/storm/models/symbolic/Mdp.cpp
@@ -21,12 +21,27 @@ namespace storm {
                                       std::set<storm::expressions::Variable> const& rowVariables,
                                       std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                       std::set<storm::expressions::Variable> const& columnVariables,
-                                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                       std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                       std::set<storm::expressions::Variable> const& nondeterminismVariables,
                                       std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                       std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::Mdp, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, nondeterminismVariables, labelToExpressionMap, rewardModels) {
+            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::Mdp, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, nondeterminismVariables, labelToExpressionMap, rewardModels) {
+                // Intentionally left empty.
+            }
+
+            template<storm::dd::DdType Type, typename ValueType>
+            Mdp<Type, ValueType>::Mdp(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                      storm::dd::Bdd<Type> reachableStates,
+                                      storm::dd::Bdd<Type> initialStates,
+                                      storm::dd::Bdd<Type> deadlockStates,
+                                      storm::dd::Add<Type, ValueType> transitionMatrix,
+                                      std::set<storm::expressions::Variable> const& rowVariables,
+                                      std::set<storm::expressions::Variable> const& columnVariables,
+                                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                      std::set<storm::expressions::Variable> const& nondeterminismVariables,
+                                      std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                      std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::Mdp, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, nondeterminismVariables, labelToBddMap, rewardModels) {
                 // Intentionally left empty.
             }
             
diff --git a/src/storm/models/symbolic/Mdp.h b/src/storm/models/symbolic/Mdp.h
index ab5b59b99..771f0bd05 100644
--- a/src/storm/models/symbolic/Mdp.h
+++ b/src/storm/models/symbolic/Mdp.h
@@ -37,8 +37,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param nondeterminismVariables The meta variables used to encode the nondeterminism in the model.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
@@ -52,12 +50,39 @@ namespace storm {
                     std::set<storm::expressions::Variable> const& rowVariables,
                     std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                     std::set<storm::expressions::Variable> const& columnVariables,
-                    std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                     std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                     std::set<storm::expressions::Variable> const& nondeterminismVariables,
                     std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                     std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
                 
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param modelType The type of the model.
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param nondeterminismVariables The meta variables used to encode the nondeterminism in the model.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                Mdp(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                    storm::dd::Bdd<Type> reachableStates,
+                    storm::dd::Bdd<Type> initialStates,
+                    storm::dd::Bdd<Type> deadlockStates,
+                    storm::dd::Add<Type, ValueType> transitionMatrix,
+                    std::set<storm::expressions::Variable> const& rowVariables,
+                    std::set<storm::expressions::Variable> const& columnVariables,
+                    std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                    std::set<storm::expressions::Variable> const& nondeterminismVariables,
+                    std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                    std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
             };
             
         } // namespace symbolic
diff --git a/src/storm/models/symbolic/Model.cpp b/src/storm/models/symbolic/Model.cpp
index c2ca3650e..f468cc377 100644
--- a/src/storm/models/symbolic/Model.cpp
+++ b/src/storm/models/symbolic/Model.cpp
@@ -7,16 +7,13 @@
 
 #include "storm/adapters/AddExpressionAdapter.h"
 
-#include "storm/storage/dd/DdManager.h"
-#include "storm/storage/dd/Add.h"
-#include "storm/storage/dd/Bdd.h"
-
 #include "storm/models/symbolic/StandardRewardModel.h"
 
 #include "storm/utility/macros.h"
 #include "storm/utility/dd.h"
 
 #include "storm/exceptions/NotSupportedException.h"
+#include "storm/exceptions/WrongFormatException.h"
 
 namespace storm {
     namespace models {
@@ -31,12 +28,31 @@ namespace storm {
                                           std::set<storm::expressions::Variable> const& rowVariables,
                                           std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                           std::set<storm::expressions::Variable> const& columnVariables,
-                                          std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                           std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                           std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                           std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : ModelBase(modelType), manager(manager), reachableStates(reachableStates), initialStates(initialStates), deadlockStates(deadlockStates), transitionMatrix(transitionMatrix), rowVariables(rowVariables), rowExpressionAdapter(rowExpressionAdapter), columnVariables(columnVariables), columnExpressionAdapter(columnExpressionAdapter), rowColumnMetaVariablePairs(rowColumnMetaVariablePairs), labelToExpressionMap(labelToExpressionMap), rewardModels(rewardModels) {
-                // Intentionally left empty.
+            : storm::models::Model<ValueType>(modelType), manager(manager), reachableStates(reachableStates), transitionMatrix(transitionMatrix), rowVariables(rowVariables), rowExpressionAdapter(rowExpressionAdapter), columnVariables(columnVariables), rowColumnMetaVariablePairs(rowColumnMetaVariablePairs), labelToExpressionMap(labelToExpressionMap), rewardModels(rewardModels) {
+                this->labelToBddMap.emplace("init", initialStates);
+                this->labelToBddMap.emplace("deadlock", deadlockStates);
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            Model<Type, ValueType>::Model(storm::models::ModelType const& modelType,
+                                          std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                          storm::dd::Bdd<Type> reachableStates,
+                                          storm::dd::Bdd<Type> initialStates,
+                                          storm::dd::Bdd<Type> deadlockStates,
+                                          storm::dd::Add<Type, ValueType> transitionMatrix,
+                                          std::set<storm::expressions::Variable> const& rowVariables,
+                                          std::set<storm::expressions::Variable> const& columnVariables,
+                                          std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                          std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                          std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : storm::models::Model<ValueType>(modelType), manager(manager), reachableStates(reachableStates), transitionMatrix(transitionMatrix), rowVariables(rowVariables), rowExpressionAdapter(nullptr), columnVariables(columnVariables), rowColumnMetaVariablePairs(rowColumnMetaVariablePairs), labelToBddMap(labelToBddMap), rewardModels(rewardModels) {
+                STORM_LOG_THROW(this->labelToBddMap.find("init") == this->labelToBddMap.end(), storm::exceptions::WrongFormatException, "Illegal custom label 'init'.");
+                STORM_LOG_THROW(this->labelToBddMap.find("deadlock") == this->labelToBddMap.end(), storm::exceptions::WrongFormatException, "Illegal custom label 'deadlock'.");
+                this->labelToBddMap.emplace("init", initialStates);
+                this->labelToBddMap.emplace("deadlock", deadlockStates);
             }
             
             template<storm::dd::DdType Type, typename ValueType>
@@ -50,12 +66,7 @@ namespace storm {
             }
             
             template<storm::dd::DdType Type, typename ValueType>
-            storm::dd::DdManager<Type> const& Model<Type, ValueType>::getManager() const {
-                return *manager;
-            }
-            
-            template<storm::dd::DdType Type, typename ValueType>
-            storm::dd::DdManager<Type>& Model<Type, ValueType>::getManager() {
+            storm::dd::DdManager<Type>& Model<Type, ValueType>::getManager() const {
                 return *manager;
             }
             
@@ -71,18 +82,33 @@ namespace storm {
             
             template<storm::dd::DdType Type, typename ValueType>
             storm::dd::Bdd<Type> const& Model<Type, ValueType>::getInitialStates() const {
-                return initialStates;
+                return labelToBddMap.at("init");
             }
-
+            
             template<storm::dd::DdType Type, typename ValueType>
             storm::dd::Bdd<Type> const& Model<Type, ValueType>::getDeadlockStates() const {
-                return deadlockStates;
+                return labelToBddMap.at("deadlock");
             }
             
             template<storm::dd::DdType Type, typename ValueType>
             storm::dd::Bdd<Type> Model<Type, ValueType>::getStates(std::string const& label) const {
-                STORM_LOG_THROW(labelToExpressionMap.find(label) != labelToExpressionMap.end(), storm::exceptions::IllegalArgumentException, "The label " << label << " is invalid for the labeling of the model.");
-                return this->getStates(labelToExpressionMap.at(label));
+                // First check whether we have a BDD for this label.
+                auto bddIt = labelToBddMap.find(label);
+                if (bddIt != labelToBddMap.end()) {
+                    return bddIt->second;
+                } else {
+                    // If not, check for an expression we can translate.
+                    auto expressionIt = labelToExpressionMap.find(label);
+                    STORM_LOG_THROW(expressionIt != labelToExpressionMap.end(), storm::exceptions::IllegalArgumentException, "The label " << label << " is invalid for the labeling of the model.");
+                    return this->getStates(expressionIt->second);
+                }
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            storm::expressions::Expression Model<Type, ValueType>::getExpression(std::string const& label) const {
+                auto expressionIt = labelToExpressionMap.find(label);
+                STORM_LOG_THROW(expressionIt != labelToExpressionMap.end(), storm::exceptions::IllegalArgumentException, "Cannot retrieve the expression for the label " << label << ".");
+                return expressionIt->second;
             }
             
             template<storm::dd::DdType Type, typename ValueType>
@@ -92,17 +118,32 @@ namespace storm {
                 } else if (expression.isFalse()) {
                     return manager->getBddZero();
                 }
+                
+                // Look up the string equivalent of the expression.
+                std::stringstream stream;
+                stream << expression;
+                auto bddIt = labelToBddMap.find(stream.str());
+                if (bddIt != labelToBddMap.end()) {
+                    return bddIt->second;
+                }
+                
+                // Finally try to translate the expression with an adapter.
                 STORM_LOG_THROW(rowExpressionAdapter != nullptr, storm::exceptions::InvalidOperationException, "Cannot create BDD for expression without expression adapter.");
                 return rowExpressionAdapter->translateExpression(expression).toBdd() && this->reachableStates;
             }
             
             template<storm::dd::DdType Type, typename ValueType>
             bool Model<Type, ValueType>::hasLabel(std::string const& label) const {
-                auto labelIt = labelToExpressionMap.find(label);
-                if (labelIt != labelToExpressionMap.end()) {
+                auto bddIt = labelToBddMap.find(label);
+                if (bddIt != labelToBddMap.end()) {
+                    return true;
+                }
+                
+                auto expressionIt = labelToExpressionMap.find(label);
+                if (expressionIt != labelToExpressionMap.end()) {
                     return true;
                 } else {
-                    return label == "init" || label == "deadlock";
+                    return false;
                 }
             }
             
@@ -117,7 +158,7 @@ namespace storm {
             }
             
             template<storm::dd::DdType Type, typename ValueType>
-            storm::dd::Bdd<Type> Model<Type, ValueType>::getQualitativeTransitionMatrix() const {
+            storm::dd::Bdd<Type> Model<Type, ValueType>::getQualitativeTransitionMatrix(bool) const {
                 return this->getTransitionMatrix().notZero();
             }
             
@@ -131,6 +172,25 @@ namespace storm {
                 return columnVariables;
             }
             
+            template<storm::dd::DdType Type, typename ValueType>
+            std::set<storm::expressions::Variable> Model<Type, ValueType>::getRowAndNondeterminismVariables() const {
+                std::set<storm::expressions::Variable> result;
+                std::set_union(this->getRowVariables().begin(), this->getRowVariables().end(), this->getNondeterminismVariables().begin(), this->getNondeterminismVariables().end(), std::inserter(result, result.begin()));
+                return result;
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            std::set<storm::expressions::Variable> Model<Type, ValueType>::getColumnAndNondeterminismVariables() const {
+                std::set<storm::expressions::Variable> result;
+                std::set_union(this->getColumnVariables().begin(), this->getColumnVariables().end(), this->getNondeterminismVariables().begin(), this->getNondeterminismVariables().end(), std::inserter(result, result.begin()));
+                return result;
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            std::set<storm::expressions::Variable> const& Model<Type, ValueType>::getNondeterminismVariables() const {
+                return emptyVariableSet;
+            }
+            
             template<storm::dd::DdType Type, typename ValueType>
             std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& Model<Type, ValueType>::getRowColumnMetaVariablePairs() const {
                 return rowColumnMetaVariablePairs;
@@ -148,7 +208,7 @@ namespace storm {
             
             template<storm::dd::DdType Type, typename ValueType>
             storm::dd::Add<Type, ValueType> Model<Type, ValueType>::getRowColumnIdentity() const {
-                return storm::utility::dd::getRowColumnDiagonal<Type, ValueType>(this->getManager(), this->getRowColumnMetaVariablePairs());
+                return (storm::utility::dd::getRowColumnDiagonal<Type>(this->getManager(), this->getRowColumnMetaVariablePairs()) && this->getReachableStates()).template toAdd<ValueType>();
             }
             
             template<storm::dd::DdType Type, typename ValueType>
@@ -178,13 +238,19 @@ namespace storm {
                 STORM_LOG_THROW(this->hasUniqueRewardModel(), storm::exceptions::InvalidOperationException, "Cannot retrieve unique reward model, because there is no unique one.");
                 return this->rewardModels.cbegin()->second;
             }
-
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            std::string const& Model<Type, ValueType>::getUniqueRewardModelName() const {
+                STORM_LOG_THROW(this->hasUniqueRewardModel(), storm::exceptions::InvalidOperationException, "Cannot retrieve name of unique reward model, because there is no unique one.");
+                return this->rewardModels.cbegin()->first;
+            }
+            
             template<storm::dd::DdType Type, typename ValueType>
             typename Model<Type, ValueType>::RewardModelType& Model<Type, ValueType>::getUniqueRewardModel() {
                 STORM_LOG_THROW(this->hasUniqueRewardModel(), storm::exceptions::InvalidOperationException, "Cannot retrieve unique reward model, because there is no unique one.");
                 return this->rewardModels.begin()->second;
             }
-
+            
             template<storm::dd::DdType Type, typename ValueType>
             bool Model<Type, ValueType>::hasUniqueRewardModel() const {
                 return this->rewardModels.size() == 1;
@@ -194,18 +260,23 @@ namespace storm {
             bool Model<Type, ValueType>::hasRewardModel() const {
                 return !this->rewardModels.empty();
             }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            std::unordered_map<std::string, typename Model<Type, ValueType>::RewardModelType>& Model<Type, ValueType>::getRewardModels() {
+                return this->rewardModels;
+            }
 
             template<storm::dd::DdType Type, typename ValueType>
             std::unordered_map<std::string, typename Model<Type, ValueType>::RewardModelType> const& Model<Type, ValueType>::getRewardModels() const {
                 return this->rewardModels;
             }
-            
+
             template<storm::dd::DdType Type, typename ValueType>
             void Model<Type, ValueType>::printModelInformationToStream(std::ostream& out) const {
                 this->printModelInformationHeaderToStream(out);
                 this->printModelInformationFooterToStream(out);
             }
-
+            
             template<storm::dd::DdType Type, typename ValueType>
             std::vector<std::string> Model<Type, ValueType>::getLabels() const {
                 std::vector<std::string> labels;
@@ -228,7 +299,10 @@ namespace storm {
                 this->printRewardModelsInformationToStream(out);
                 this->printDdVariableInformationToStream(out);
                 out << std::endl;
-                out << "Labels: \t" << this->labelToExpressionMap.size() << std::endl;
+                out << "Labels: \t" << (this->labelToExpressionMap.size() + this->labelToBddMap.size()) << std::endl;
+                for (auto const& label : labelToBddMap) {
+                    out << "   * " << label.first << " -> " << label.second.getNonZeroCount() << " state(s) (" << label.second.getNodeCount() << " nodes)" << std::endl;
+                }
                 for (auto const& label : labelToExpressionMap) {
                     out << "   * " << label.first << std::endl;
                 }
@@ -277,7 +351,7 @@ namespace storm {
             std::set<storm::RationalFunctionVariable> const& Model<Type, ValueType>::getParameters() const {
                 STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "This value type does not support parameters.");
             }
-
+            
             template<>
             void Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::addParameters(std::set<storm::RationalFunctionVariable> const& parameters) {
                 this->parameters.insert(parameters.begin(), parameters.end());
@@ -287,13 +361,13 @@ namespace storm {
             std::set<storm::RationalFunctionVariable> const& Model<storm::dd::DdType::Sylvan, storm::RationalFunction>::getParameters() const {
                 return parameters;
             }
-
+            
             // Explicitly instantiate the template class.
             template class Model<storm::dd::DdType::CUDD, double>;
             template class Model<storm::dd::DdType::Sylvan, double>;
             
             template class Model<storm::dd::DdType::Sylvan, storm::RationalNumber>;
-			template class Model<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            template class Model<storm::dd::DdType::Sylvan, storm::RationalFunction>;
         } // namespace symbolic
     } // namespace models
 } // namespace storm
diff --git a/src/storm/models/symbolic/Model.h b/src/storm/models/symbolic/Model.h
index 0d2dac286..1bebd7fe5 100644
--- a/src/storm/models/symbolic/Model.h
+++ b/src/storm/models/symbolic/Model.h
@@ -9,7 +9,9 @@
 #include "storm/storage/expressions/Expression.h"
 #include "storm/storage/expressions/Variable.h"
 #include "storm/storage/dd/DdType.h"
-#include "storm/models/ModelBase.h"
+#include "storm/storage/dd/Add.h"
+#include "storm/storage/dd/Bdd.h"
+#include "storm/models/Model.h"
 #include "storm/utility/OsDetection.h"
 
 #include "storm-config.h"
@@ -21,12 +23,6 @@ namespace storm {
         template<storm::dd::DdType Type>
         class Dd;
         
-        template<storm::dd::DdType Type, typename ValueType>
-        class Add;
-        
-        template<storm::dd::DdType Type>
-        class Bdd;
-        
         template<storm::dd::DdType Type>
         class DdManager;
         
@@ -47,7 +43,7 @@ namespace storm {
              * Base class for all symbolic models.
              */
             template<storm::dd::DdType Type, typename CValueType = double>
-            class Model : public storm::models::ModelBase {
+            class Model : public storm::models::Model<CValueType> {
             public:
                 typedef CValueType ValueType;
                 
@@ -75,8 +71,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
                  * @param rewardModels The reward models associated with the model.
@@ -90,11 +84,37 @@ namespace storm {
                       std::set<storm::expressions::Variable> const& rowVariables,
                       std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                       std::set<storm::expressions::Variable> const& columnVariables,
-                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                       std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                       std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                       std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
                 
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param modelType The type of the model.
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                Model(storm::models::ModelType const& modelType,
+                      std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                      storm::dd::Bdd<Type> reachableStates,
+                      storm::dd::Bdd<Type> initialStates,
+                      storm::dd::Bdd<Type> deadlockStates,
+                      storm::dd::Add<Type, ValueType> transitionMatrix,
+                      std::set<storm::expressions::Variable> const& rowVariables,
+                      std::set<storm::expressions::Variable> const& columnVariables,
+                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                      std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                      std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
                 virtual uint_fast64_t getNumberOfStates() const override;
                 
                 virtual uint_fast64_t getNumberOfTransitions() const override;
@@ -104,14 +124,7 @@ namespace storm {
                  *
                  * @return The manager responsible for the DDs that represent this model.
                  */
-                storm::dd::DdManager<Type> const& getManager() const;
-
-                /*!
-                 * Retrieves the manager responsible for the DDs that represent this model.
-                 *
-                 * @return The manager responsible for the DDs that represent this model.
-                 */
-                storm::dd::DdManager<Type>& getManager();
+                storm::dd::DdManager<Type>& getManager() const;
 
                 /*!
                  * Retrieves the manager responsible for the DDs that represent this model.
@@ -143,10 +156,18 @@ namespace storm {
                  * Returns the sets of states labeled with the given label.
                  *
                  * @param label The label for which to get the labeled states.
-                 * @return The set of states labeled with the requested label in the form of a bit vector.
+                 * @return The set of states labeled with the requested label.
                  */
                 virtual storm::dd::Bdd<Type> getStates(std::string const& label) const;
                 
+                /*!
+                 * Returns the expression for the given label.
+                 *
+                 * @param label The label for which to get the expression.
+                 * @return The expression characterizing the requested label.
+                 */
+                virtual storm::expressions::Expression getExpression(std::string const& label) const;
+                
                 /*!
                  * Returns the set of states labeled satisfying the given expression (that must be of boolean type).
                  *
@@ -181,9 +202,10 @@ namespace storm {
                  * Retrieves the matrix qualitatively (i.e. without probabilities) representing the transitions of the
                  * model.
                  *
+                 * @param keepNondeterminism If false, the matrix will abstract from the nondeterminism variables.
                  * @return A matrix representing the qualitative transitions of the model.
                  */
-                storm::dd::Bdd<Type> getQualitativeTransitionMatrix() const;
+                virtual storm::dd::Bdd<Type> getQualitativeTransitionMatrix(bool keepNondeterminism = true) const;
                 
                 /*!
                  * Retrieves the meta variables used to encode the rows of the transition matrix and the vector indices.
@@ -199,6 +221,27 @@ namespace storm {
                  */
                 std::set<storm::expressions::Variable> const& getColumnVariables() const;
                 
+                /*!
+                 * Retrieves all meta variables used to encode rows and nondetermism.
+                 *
+                 * @return All meta variables used to encode rows and nondetermism.
+                 */
+                std::set<storm::expressions::Variable> getRowAndNondeterminismVariables() const;
+
+                /*!
+                 * Retrieves all meta variables used to encode columns and nondetermism.
+                 *
+                 * @return All meta variables used to encode columns and nondetermism.
+                 */
+                std::set<storm::expressions::Variable> getColumnAndNondeterminismVariables() const;
+
+                /*!
+                 * Retrieves all meta variables used to encode the nondeterminism.
+                 *
+                 * @return All meta variables used to encode the nondeterminism.
+                 */
+                virtual std::set<storm::expressions::Variable> const& getNondeterminismVariables() const;
+
                 /*!
                  * Retrieves the pairs of row and column meta variables.
                  *
@@ -234,6 +277,13 @@ namespace storm {
                  */
                 RewardModelType const& getUniqueRewardModel() const;
 
+                /*!
+                 * Retrieves the name of the unique reward model, if there exists exactly one. Otherwise, an exception is thrown.
+                 *
+                 * @return The name of the unique reward model.
+                 */
+                std::string const& getUniqueRewardModelName() const;
+
                 /*!
                  * Retrieves the unique reward model, if there exists exactly one. Otherwise, an exception is thrown.
                  *
@@ -255,6 +305,7 @@ namespace storm {
                  */
                 bool hasRewardModel() const;
 
+                std::unordered_map<std::string, RewardModelType>& getRewardModels();
                 std::unordered_map<std::string, RewardModelType> const& getRewardModels() const;
                 
                 /*!
@@ -275,7 +326,6 @@ namespace storm {
                 std::set<storm::RationalFunctionVariable> const& getParameters() const;
                 
             protected:
-                
                 /*!
                  * Sets the transition matrix of the model.
                  *
@@ -325,13 +375,7 @@ namespace storm {
                 
                 // A vector representing the reachable states of the model.
                 storm::dd::Bdd<Type> reachableStates;
-                
-                // A vector representing the initial states of the model.
-                storm::dd::Bdd<Type> initialStates;
-                
-                // A vector representing the deadlock states of the model.
-                storm::dd::Bdd<Type> deadlockStates;
-                
+                                
                 // A matrix representing transition relation.
                 storm::dd::Add<Type, ValueType> transitionMatrix;
                 
@@ -344,9 +388,6 @@ namespace storm {
                 // The meta variables used to encode the columns of the transition matrix.
                 std::set<storm::expressions::Variable> columnVariables;
                 
-                // An adapter that can translate expressions to DDs over the column meta variables.
-                std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter;
-                
                 // A vector holding all pairs of row and column meta variable pairs. This is used to swap the variables
                 // in the DDs from row to column variables and vice versa.
                 std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> rowColumnMetaVariablePairs;
@@ -354,11 +395,17 @@ namespace storm {
                 // A mapping from labels to expressions defining them.
                 std::map<std::string, storm::expressions::Expression> labelToExpressionMap;
                 
+                // A mapping from labels to BDDs characterizing the labeled states.
+                std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap;
+                
                 // The reward models associated with the model.
                 std::unordered_map<std::string, RewardModelType> rewardModels;
                 
                 // The parameters. Only meaningful for models over rational functions.
                 std::set<storm::RationalFunctionVariable> parameters;
+                
+                // An empty variable set that can be used when references to non-existing sets need to be returned.
+                std::set<storm::expressions::Variable> emptyVariableSet;
             };
             
         } // namespace symbolic
diff --git a/src/storm/models/symbolic/NondeterministicModel.cpp b/src/storm/models/symbolic/NondeterministicModel.cpp
index 7856a942e..25cea032d 100644
--- a/src/storm/models/symbolic/NondeterministicModel.cpp
+++ b/src/storm/models/symbolic/NondeterministicModel.cpp
@@ -23,15 +23,29 @@ namespace storm {
                                                                           std::set<storm::expressions::Variable> const& rowVariables,
                                                                           std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                                                           std::set<storm::expressions::Variable> const& columnVariables,
-                                                                          std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                                                           std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                                                           std::set<storm::expressions::Variable> const& nondeterminismVariables,
                                                                           std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                                                           std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels), nondeterminismVariables(nondeterminismVariables) {
-                
-                // Prepare the mask of illegal nondeterministic choices.
-                illegalMask = !(transitionMatrix.notZero().existsAbstract(this->getColumnVariables())) && reachableStates;
+            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, labelToExpressionMap, rewardModels), nondeterminismVariables(nondeterminismVariables) {
+                createIllegalMask();
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            NondeterministicModel<Type, ValueType>::NondeterministicModel(storm::models::ModelType const& modelType,
+                                  std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                  storm::dd::Bdd<Type> reachableStates,
+                                  storm::dd::Bdd<Type> initialStates,
+                                  storm::dd::Bdd<Type> deadlockStates,
+                                  storm::dd::Add<Type, ValueType> transitionMatrix,
+                                  std::set<storm::expressions::Variable> const& rowVariables,
+                                  std::set<storm::expressions::Variable> const& columnVariables,
+                                  std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                  std::set<storm::expressions::Variable> const& nondeterminismVariables,
+                                  std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                  std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : Model<Type, ValueType>(modelType, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, labelToBddMap, rewardModels), nondeterminismVariables(nondeterminismVariables) {
+                createIllegalMask();
             }
             
             template<storm::dd::DdType Type, typename ValueType>
@@ -76,6 +90,28 @@ namespace storm {
                 out << ", nondeterminism: " << this->getNondeterminismVariables().size() << " meta variables (" << nondeterminismVariableCount << " DD variables)";
             }
             
+            template<storm::dd::DdType Type, typename ValueType>
+            void NondeterministicModel<Type, ValueType>::reduceToStateBasedRewards() {
+                for (auto& rewardModel : this->getRewardModels()) {
+                    rewardModel.second.reduceToStateBasedRewards(this->getTransitionMatrix(), this->getRowVariables(), this->getColumnVariables(), false);
+                }
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            void NondeterministicModel<Type, ValueType>::createIllegalMask() {
+                // Prepare the mask of illegal nondeterministic choices.
+                illegalMask = !(this->getTransitionMatrix().notZero().existsAbstract(this->getColumnVariables())) && this->getReachableStates();
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            storm::dd::Bdd<Type> NondeterministicModel<Type, ValueType>::getQualitativeTransitionMatrix(bool keepNondeterminism) const {
+                if (!keepNondeterminism) {
+                    return this->getTransitionMatrix().notZero().existsAbstract(this->getNondeterminismVariables());
+                } else {
+                    return Model<Type, ValueType>::getQualitativeTransitionMatrix(keepNondeterminism);
+                }
+            }
+            
             // Explicitly instantiate the template class.
             template class NondeterministicModel<storm::dd::DdType::CUDD, double>;
             template class NondeterministicModel<storm::dd::DdType::Sylvan, double>;
diff --git a/src/storm/models/symbolic/NondeterministicModel.h b/src/storm/models/symbolic/NondeterministicModel.h
index cec5b0cb8..c75874644 100644
--- a/src/storm/models/symbolic/NondeterministicModel.h
+++ b/src/storm/models/symbolic/NondeterministicModel.h
@@ -37,8 +37,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param nondeterminismVariables The meta variables used to encode the nondeterminism in the model.
                  * @param labelToExpressionMap A mapping from label names to their defining expressions.
@@ -53,12 +51,40 @@ namespace storm {
                                       std::set<storm::expressions::Variable> const& rowVariables,
                                       std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                       std::set<storm::expressions::Variable> const& columnVariables,
-                                      std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                       std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                       std::set<storm::expressions::Variable> const& nondeterminismVariables,
                                       std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                                       std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
                 
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param modelType The type of the model.
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param nondeterminismVariables The meta variables used to encode the nondeterminism in the model.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                NondeterministicModel(storm::models::ModelType const& modelType,
+                                      std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                      storm::dd::Bdd<Type> reachableStates,
+                                      storm::dd::Bdd<Type> initialStates,
+                                      storm::dd::Bdd<Type> deadlockStates,
+                                      storm::dd::Add<Type, ValueType> transitionMatrix,
+                                      std::set<storm::expressions::Variable> const& rowVariables,
+                                      std::set<storm::expressions::Variable> const& columnVariables,
+                                      std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                      std::set<storm::expressions::Variable> const& nondeterminismVariables,
+                                      std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                                      std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
                 /*!
                  * Retrieves the number of nondeterministic choices in the model.
                  *
@@ -71,8 +97,8 @@ namespace storm {
                  *
                  * @return The meta variables used to encode the nondeterminism in the model.
                  */
-                std::set<storm::expressions::Variable> const& getNondeterminismVariables() const;
-                
+                virtual std::set<storm::expressions::Variable> const& getNondeterminismVariables() const override;
+
                 /*!
                  * Retrieves a BDD characterizing all illegal nondeterminism encodings in the model.
                  *
@@ -87,8 +113,19 @@ namespace storm {
                  */
                 storm::dd::Bdd<Type> getIllegalSuccessorMask() const;
                 
+                /*!
+                 * Retrieves the matrix qualitatively (i.e. without probabilities) representing the transitions of the
+                 * model.
+                 *
+                 * @param keepNondeterminism If false, the matrix will abstract from the nondeterminism variables.
+                 * @return A matrix representing the qualitative transitions of the model.
+                 */
+                virtual storm::dd::Bdd<Type> getQualitativeTransitionMatrix(bool keepNondeterminism = true) const override;
+                
                 virtual void printModelInformationToStream(std::ostream& out) const override;
                 
+                virtual void reduceToStateBasedRewards() override;
+                
             protected:
             
                 virtual void printDdVariableInformationToStream(std::ostream& out) const override;
@@ -97,6 +134,7 @@ namespace storm {
                 storm::dd::Bdd<Type> illegalMask;
                 
             private:
+                void createIllegalMask();
                 
                 // The meta variables encoding the nondeterminism in the model.
                 std::set<storm::expressions::Variable> nondeterminismVariables;
diff --git a/src/storm/models/symbolic/StandardRewardModel.cpp b/src/storm/models/symbolic/StandardRewardModel.cpp
index a91040069..8fe296308 100644
--- a/src/storm/models/symbolic/StandardRewardModel.cpp
+++ b/src/storm/models/symbolic/StandardRewardModel.cpp
@@ -6,6 +6,8 @@
 
 #include "storm/adapters/RationalFunctionAdapter.h"
 
+#include "storm/exceptions/InvalidOperationException.h"
+
 namespace storm {
     namespace models {
         namespace symbolic {
@@ -157,6 +159,28 @@ namespace storm {
                 return StandardRewardModel<Type, ValueType>(modifiedStateRewardVector, this->optionalStateActionRewardVector, this->optionalTransitionRewardMatrix);
             }
             
+            template <storm::dd::DdType Type, typename ValueType>
+            void StandardRewardModel<Type, ValueType>::reduceToStateBasedRewards(storm::dd::Add<Type, ValueType> const& transitionMatrix, std::set<storm::expressions::Variable> const& rowVariables, std::set<storm::expressions::Variable> const& columnVariables, bool reduceToStateRewards) {
+                if (this->hasTransitionRewards()) {
+                    if (this->hasStateActionRewards()) {
+                        this->optionalStateActionRewardVector.get() += transitionMatrix.multiplyMatrix(this->getTransitionRewardMatrix(), columnVariables);
+                        this->optionalTransitionRewardMatrix = boost::none;
+                    } else {
+                        this->optionalStateActionRewardVector = transitionMatrix.multiplyMatrix(this->getTransitionRewardMatrix(), columnVariables);
+                    }
+                }
+                
+                if (reduceToStateRewards && this->hasStateActionRewards()) {
+                    STORM_LOG_THROW(this->getStateActionRewardVector().getContainedMetaVariables() == rowVariables, storm::exceptions::InvalidOperationException, "The reduction to state rewards is only possible if the state-action rewards do not depend on nondeterminism variables.");
+                    if (this->hasStateRewards()) {
+                        this->optionalStateRewardVector = this->optionalStateRewardVector.get() + this->getStateActionRewardVector();
+                    } else {
+                        this->optionalStateRewardVector = this->getStateActionRewardVector();
+                    }
+                    this->optionalStateActionRewardVector = boost::none;
+                }
+            }
+            
             template class StandardRewardModel<storm::dd::DdType::CUDD, double>;
             template class StandardRewardModel<storm::dd::DdType::Sylvan, double>;
 
diff --git a/src/storm/models/symbolic/StandardRewardModel.h b/src/storm/models/symbolic/StandardRewardModel.h
index a4e9fb573..a7ab321f4 100644
--- a/src/storm/models/symbolic/StandardRewardModel.h
+++ b/src/storm/models/symbolic/StandardRewardModel.h
@@ -189,6 +189,16 @@ namespace storm {
                  */
                 StandardRewardModel<Type, ValueType> divideStateRewardVector(storm::dd::Add<Type, ValueType> const& divisor) const;
                 
+                /*!
+                 * Reduces the transition-based rewards to state-action rewards by taking the average of each row. If
+                 * the corresponding flag is set, the state-action rewards and the state rewards are summed so the model
+                 * only has a state reward vector left. Note that this transformation only  preserves expected rewards,
+                 * but not all reward-based properties.
+                 *
+                 * @param transitionMatrix The transition matrix that is used to weight the rewards in the reward matrix.
+                 */
+                void reduceToStateBasedRewards(storm::dd::Add<Type, ValueType> const& transitionMatrix, std::set<storm::expressions::Variable> const& rowVariables, std::set<storm::expressions::Variable> const& columnVariables, bool reduceToStateRewards);
+                
             private:
                 // The state reward vector.
                 boost::optional<storm::dd::Add<Type, ValueType>> optionalStateRewardVector;
diff --git a/src/storm/models/symbolic/StochasticTwoPlayerGame.cpp b/src/storm/models/symbolic/StochasticTwoPlayerGame.cpp
index 346a6a396..c8d932b5c 100644
--- a/src/storm/models/symbolic/StochasticTwoPlayerGame.cpp
+++ b/src/storm/models/symbolic/StochasticTwoPlayerGame.cpp
@@ -22,23 +22,44 @@ namespace storm {
                                                                               std::set<storm::expressions::Variable> const& rowVariables,
                                                                               std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                                                               std::set<storm::expressions::Variable> const& columnVariables,
-                                                                              std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                                                               std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                                                               std::set<storm::expressions::Variable> const& player1Variables,
                                                                               std::set<storm::expressions::Variable> const& player2Variables,
                                                                               std::set<storm::expressions::Variable> const& nondeterminismVariables,
                                                                               std::map<std::string, storm::expressions::Expression> labelToExpressionMap,
                                                                               std::unordered_map<std::string, RewardModelType> const& rewardModels)
-            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::S2pg, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, columnExpressionAdapter, rowColumnMetaVariablePairs, nondeterminismVariables, labelToExpressionMap, rewardModels), player1Variables(player1Variables), player2Variables(player2Variables) {
-                
+            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::S2pg, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, rowExpressionAdapter, columnVariables, rowColumnMetaVariablePairs, nondeterminismVariables, labelToExpressionMap, rewardModels), player1Variables(player1Variables), player2Variables(player2Variables) {
+                createIllegalMasks();
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            StochasticTwoPlayerGame<Type, ValueType>::StochasticTwoPlayerGame(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                                                              storm::dd::Bdd<Type> reachableStates,
+                                                                              storm::dd::Bdd<Type> initialStates,
+                                                                              storm::dd::Bdd<Type> deadlockStates,
+                                                                              storm::dd::Add<Type, ValueType> transitionMatrix,
+                                                                              std::set<storm::expressions::Variable> const& rowVariables,
+                                                                              std::set<storm::expressions::Variable> const& columnVariables,
+                                                                              std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                                                              std::set<storm::expressions::Variable> const& player1Variables,
+                                                                              std::set<storm::expressions::Variable> const& player2Variables,
+                                                                              std::set<storm::expressions::Variable> const& nondeterminismVariables,
+                                                                              std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap,
+                                                                              std::unordered_map<std::string, RewardModelType> const& rewardModels)
+            : NondeterministicModel<Type, ValueType>(storm::models::ModelType::S2pg, manager, reachableStates, initialStates, deadlockStates, transitionMatrix, rowVariables, columnVariables, rowColumnMetaVariablePairs, nondeterminismVariables, labelToBddMap, rewardModels), player1Variables(player1Variables), player2Variables(player2Variables) {
+                createIllegalMasks();
+            }
+            
+            template<storm::dd::DdType Type, typename ValueType>
+            void StochasticTwoPlayerGame<Type, ValueType>::createIllegalMasks() {
                 // Compute legal player 1 mask.
-                illegalPlayer1Mask = transitionMatrix.notZero().existsAbstract(this->getColumnVariables()).existsAbstract(this->getPlayer2Variables());
+                this->illegalPlayer1Mask = this->getTransitionMatrix().notZero().existsAbstract(this->getColumnVariables()).existsAbstract(this->getPlayer2Variables());
                 
                 // Correct the mask for player 2. This is necessary, because it is not yet restricted to the legal choices of player 1.
-                illegalPlayer2Mask = this->getIllegalMask() && illegalPlayer1Mask;
+                illegalPlayer2Mask = this->getIllegalMask() && this->illegalPlayer1Mask;
                 
                 // Then set the illegal mask for player 1 correctly.
-                illegalPlayer1Mask = !illegalPlayer1Mask && reachableStates;
+                this->illegalPlayer1Mask = !illegalPlayer1Mask && this->getReachableStates();
             }
             
             template<storm::dd::DdType Type, typename ValueType>
diff --git a/src/storm/models/symbolic/StochasticTwoPlayerGame.h b/src/storm/models/symbolic/StochasticTwoPlayerGame.h
index 15383b056..d3f009593 100644
--- a/src/storm/models/symbolic/StochasticTwoPlayerGame.h
+++ b/src/storm/models/symbolic/StochasticTwoPlayerGame.h
@@ -36,8 +36,6 @@ namespace storm {
                  * @param rowExpressionAdapter An object that can be used to translate expressions in terms of the row
                  * meta variables.
                  * @param columVariables The set of column meta variables used in the DDs.
-                 * @param columnExpressionAdapter An object that can be used to translate expressions in terms of the
-                 * column meta variables.
                  * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
                  * @param player1Variables The meta variables used to encode the nondeterministic choices of player 1.
                  * @param player2Variables The meta variables used to encode the nondeterministic choices of player 2.
@@ -53,7 +51,6 @@ namespace storm {
                                         std::set<storm::expressions::Variable> const& rowVariables,
                                         std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> rowExpressionAdapter,
                                         std::set<storm::expressions::Variable> const& columnVariables,
-                                        std::shared_ptr<storm::adapters::AddExpressionAdapter<Type, ValueType>> columnExpressionAdapter,
                                         std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
                                         std::set<storm::expressions::Variable> const& player1Variables,
                                         std::set<storm::expressions::Variable> const& player2Variables,
@@ -61,6 +58,37 @@ namespace storm {
                                         std::map<std::string, storm::expressions::Expression> labelToExpressionMap = std::map<std::string, storm::expressions::Expression>(),
                                         std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
                 
+                /*!
+                 * Constructs a model from the given data.
+                 *
+                 * @param manager The manager responsible for the decision diagrams.
+                 * @param reachableStates A DD representing the reachable states.
+                 * @param initialStates A DD representing the initial states of the model.
+                 * @param deadlockStates A DD representing the deadlock states of the model.
+                 * @param transitionMatrix The matrix representing the transitions in the model.
+                 * @param rowVariables The set of row meta variables used in the DDs.
+                 * @param columVariables The set of column meta variables used in the DDs.
+                 * @param rowColumnMetaVariablePairs All pairs of row/column meta variables.
+                 * @param player1Variables The meta variables used to encode the nondeterministic choices of player 1.
+                 * @param player2Variables The meta variables used to encode the nondeterministic choices of player 2.
+                 * @param allNondeterminismVariables The meta variables used to encode the nondeterminism in the model.
+                 * @param labelToBddMap A mapping from label names to their defining BDDs.
+                 * @param rewardModels The reward models associated with the model.
+                 */
+                StochasticTwoPlayerGame(std::shared_ptr<storm::dd::DdManager<Type>> manager,
+                                        storm::dd::Bdd<Type> reachableStates,
+                                        storm::dd::Bdd<Type> initialStates,
+                                        storm::dd::Bdd<Type> deadlockStates,
+                                        storm::dd::Add<Type, ValueType> transitionMatrix,
+                                        std::set<storm::expressions::Variable> const& rowVariables,
+                                        std::set<storm::expressions::Variable> const& columnVariables,
+                                        std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs,
+                                        std::set<storm::expressions::Variable> const& player1Variables,
+                                        std::set<storm::expressions::Variable> const& player2Variables,
+                                        std::set<storm::expressions::Variable> const& allNondeterminismVariables,
+                                        std::map<std::string, storm::dd::Bdd<Type>> labelToBddMap = std::map<std::string, storm::dd::Bdd<Type>>(),
+                                        std::unordered_map<std::string, RewardModelType> const& rewardModels = std::unordered_map<std::string, RewardModelType>());
+                
                 /*!
                  * Retrieeves the set of meta variables used to encode the nondeterministic choices of player 1.
                  *
@@ -90,6 +118,11 @@ namespace storm {
                 storm::dd::Bdd<Type> getIllegalPlayer2Mask() const;
                 
             private:
+                /*!
+                 * Prepare all illegal masks.
+                 */
+                void createIllegalMasks();
+                
                 // A mask that characterizes all illegal player 1 choices.
                 storm::dd::Bdd<Type> illegalPlayer1Mask;
 
diff --git a/src/storm/permissivesched/PermissiveSchedulers.h b/src/storm/permissivesched/PermissiveSchedulers.h
index eb09d4b24..cc29f7113 100644
--- a/src/storm/permissivesched/PermissiveSchedulers.h
+++ b/src/storm/permissivesched/PermissiveSchedulers.h
@@ -2,6 +2,7 @@
 #ifndef PERMISSIVESCHEDULERS_H
 #define	PERMISSIVESCHEDULERS_H
 
+#include <storm/transformer/ChoiceSelector.h>
 #include "../logic/ProbabilityOperatorFormula.h"
 #include "../models/sparse/Mdp.h"
 #include "../models/sparse/StandardRewardModel.h"
@@ -38,7 +39,8 @@ namespace storm {
 
 
             storm::models::sparse::Mdp<double, RM> apply() const {
-                return mdp.restrictChoices(enabledChoices);
+                storm::transformer::ChoiceSelector<double, RM> cs(mdp);
+                return *(cs.transform(enabledChoices)->template as<storm::models::sparse::Mdp<double, RM>>());
             }
 
             template<typename T>
diff --git a/src/storm/settings/Argument.cpp b/src/storm/settings/Argument.cpp
index ddb39c0ab..555bcdb9d 100644
--- a/src/storm/settings/Argument.cpp
+++ b/src/storm/settings/Argument.cpp
@@ -12,12 +12,12 @@ namespace storm {
     namespace settings {
         
         template<typename T>
-        Argument<T>::Argument(std::string const& name, std::string const& description, std::vector<std::shared_ptr<ArgumentValidator<T>>> const& validators): ArgumentBase(name, description), argumentValue(), argumentType(inferToEnumType<T>()), validators(validators), isOptional(false), defaultValue(), hasDefaultValue(false) {
+        Argument<T>::Argument(std::string const& name, std::string const& description, std::vector<std::shared_ptr<ArgumentValidator<T>>> const& validators): ArgumentBase(name, description), argumentValue(), argumentType(inferToEnumType<T>()), validators(validators), isOptional(false), defaultValue(), hasDefaultValue(false), wasSetFromDefaultValueFlag(false) {
             // Intentionally left empty.
         }
         
         template<typename T>
-        Argument<T>::Argument(std::string const& name, std::string const& description, std::vector<std::shared_ptr<ArgumentValidator<T>>> const& validators, bool isOptional, T defaultValue): ArgumentBase(name, description), argumentValue(), argumentType(inferToEnumType<T>()), validators(validators), isOptional(isOptional), defaultValue(), hasDefaultValue(true) {
+        Argument<T>::Argument(std::string const& name, std::string const& description, std::vector<std::shared_ptr<ArgumentValidator<T>>> const& validators, bool isOptional, T defaultValue): ArgumentBase(name, description), argumentValue(), argumentType(inferToEnumType<T>()), validators(validators), isOptional(isOptional), defaultValue(), hasDefaultValue(true), wasSetFromDefaultValueFlag(false) {
             this->setDefaultValue(defaultValue);
         }
         
@@ -71,6 +71,12 @@ namespace storm {
             STORM_LOG_THROW(this->hasDefaultValue, storm::exceptions::IllegalFunctionCallException, "Unable to set value from default value, because the argument " << name << " has none.");
             bool result = this->setFromTypeValue(this->defaultValue, false);
             STORM_LOG_THROW(result, storm::exceptions::IllegalArgumentValueException, "Unable to assign default value to argument " << name << ", because it was rejected.");
+            this->wasSetFromDefaultValueFlag = true;
+        }
+        
+        template<typename T>
+        bool Argument<T>::wasSetFromDefaultValue() const {
+            return wasSetFromDefaultValueFlag;
         }
         
         template<typename T>
diff --git a/src/storm/settings/Argument.h b/src/storm/settings/Argument.h
index 080d06409..2074557cf 100644
--- a/src/storm/settings/Argument.h
+++ b/src/storm/settings/Argument.h
@@ -85,6 +85,8 @@ namespace storm {
             
             void setFromDefaultValue() override;
             
+            virtual bool wasSetFromDefaultValue() const override;
+
             virtual std::string getValueAsString() const override;
             
             virtual int_fast64_t getValueAsInteger() const override;
@@ -116,6 +118,9 @@ namespace storm {
             // A flag indicating whether a default value has been provided.
             bool hasDefaultValue;
             
+            // A flag indicating whether the argument was set from the default value.
+            bool wasSetFromDefaultValueFlag;
+            
             /*!
              * Sets the default value of the argument to the provided value.
              *
diff --git a/src/storm/settings/ArgumentBase.h b/src/storm/settings/ArgumentBase.h
index 6808bd815..b48a46183 100644
--- a/src/storm/settings/ArgumentBase.h
+++ b/src/storm/settings/ArgumentBase.h
@@ -80,6 +80,8 @@ namespace storm {
              */
 			virtual void setFromDefaultValue() = 0;
 
+            virtual bool wasSetFromDefaultValue() const = 0;
+            
             /*!
              * Tries to set the value of the argument from the given string.
              *
@@ -134,7 +136,7 @@ namespace storm {
             
             friend std::ostream& operator<<(std::ostream& out, ArgumentBase const& argument);
             
-		protected:            
+		protected:
             // A flag indicating whether the argument has been set.
 			bool hasBeenSet;
 
diff --git a/src/storm/settings/SettingsManager.cpp b/src/storm/settings/SettingsManager.cpp
index 493ea71a2..e38c5daf2 100644
--- a/src/storm/settings/SettingsManager.cpp
+++ b/src/storm/settings/SettingsManager.cpp
@@ -271,7 +271,7 @@ namespace storm {
             return moduleIterator->second->getPrintLengthOfLongestOption();
         }
         
-        void SettingsManager::addModule(std::unique_ptr<modules::ModuleSettings>&& moduleSettings) {
+        void SettingsManager::addModule(std::unique_ptr<modules::ModuleSettings>&& moduleSettings, bool doRegister) {
             auto moduleIterator = this->modules.find(moduleSettings->getModuleName());
             STORM_LOG_THROW(moduleIterator == this->modules.end(), storm::exceptions::IllegalFunctionCallException, "Unable to register module '" << moduleSettings->getModuleName() << "' because a module with the same name already exists.");
             
@@ -281,12 +281,15 @@ namespace storm {
             this->modules.emplace(moduleSettings->getModuleName(), std::move(moduleSettings));
             auto iterator = this->modules.find(moduleName);
             std::unique_ptr<modules::ModuleSettings> const& settings = iterator->second;
-            
-            // Now register the options of the module.
-            this->moduleOptions.emplace(moduleName, std::vector<std::shared_ptr<Option>>());
-            for (auto const& option : settings->getOptions()) {
-                this->addOption(option);
+
+            if (doRegister) {
+                // Now register the options of the module.
+                this->moduleOptions.emplace(moduleName, std::vector<std::shared_ptr<Option>>());
+                for (auto const& option : settings->getOptions()) {
+                    this->addOption(option);
+                }
             }
+
         }
         
         void SettingsManager::addOption(std::shared_ptr<Option> const& option) {
diff --git a/src/storm/settings/SettingsManager.h b/src/storm/settings/SettingsManager.h
index 201e7f896..72ee1bd37 100644
--- a/src/storm/settings/SettingsManager.h
+++ b/src/storm/settings/SettingsManager.h
@@ -101,7 +101,7 @@ namespace storm {
              *
              * @param moduleSettings The settings of the module to add.
              */
-            void addModule(std::unique_ptr<modules::ModuleSettings>&& moduleSettings);
+            void addModule(std::unique_ptr<modules::ModuleSettings>&& moduleSettings, bool doRegister = true);
             
             /*!
              * Retrieves the settings of the module with the given name.
@@ -238,9 +238,9 @@ namespace storm {
          * Add new module to use for the settings. The new module is given as a template argument.
          */
         template<typename SettingsType>
-        void addModule() {
+        void addModule(bool doRegister = true) {
             static_assert(std::is_base_of<storm::settings::modules::ModuleSettings, SettingsType>::value, "Template argument must be derived from ModuleSettings");
-            mutableManager().addModule(std::unique_ptr<modules::ModuleSettings>(new SettingsType()));
+            mutableManager().addModule(std::unique_ptr<modules::ModuleSettings>(new SettingsType()), doRegister);
         }
         
         /*!
diff --git a/src/storm/settings/modules/BisimulationSettings.cpp b/src/storm/settings/modules/BisimulationSettings.cpp
index de0a6ad2e..679daa54d 100644
--- a/src/storm/settings/modules/BisimulationSettings.cpp
+++ b/src/storm/settings/modules/BisimulationSettings.cpp
@@ -6,16 +6,29 @@
 #include "storm/settings/Argument.h"
 #include "storm/settings/SettingsManager.h"
 
+#include "storm/exceptions/InvalidSettingsException.h"
+
 namespace storm {
     namespace settings {
         namespace modules {
             
             const std::string BisimulationSettings::moduleName = "bisimulation";
             const std::string BisimulationSettings::typeOptionName = "type";
+            const std::string BisimulationSettings::representativeOptionName = "repr";
+            const std::string BisimulationSettings::quotientFormatOptionName = "quot";
+            const std::string BisimulationSettings::signatureModeOptionName = "sigmode";
             
             BisimulationSettings::BisimulationSettings() : ModuleSettings(moduleName) {
                 std::vector<std::string> types = { "strong", "weak" };
                 this->addOption(storm::settings::OptionBuilder(moduleName, typeOptionName, true, "Sets the kind of bisimulation quotienting used.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of the type to use.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(types)).setDefaultValueString("strong").build()).build());
+                
+                std::vector<std::string> quotTypes = { "sparse", "dd" };
+                this->addOption(storm::settings::OptionBuilder(moduleName, quotientFormatOptionName, true, "Sets the format in which the quotient is extracted (only applies to DD-based bisimulation).").addArgument(storm::settings::ArgumentBuilder::createStringArgument("format", "The format of the quotient.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(quotTypes)).setDefaultValueString("dd").build()).build());
+                
+                this->addOption(storm::settings::OptionBuilder(moduleName, representativeOptionName, false, "Sets whether to use representatives in the quotient rather than block numbers.").build());
+
+                std::vector<std::string> signatureModes = { "eager", "lazy" };
+                this->addOption(storm::settings::OptionBuilder(moduleName, signatureModeOptionName, false, "Sets the signature computation mode.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("mode", "The mode to use.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(signatureModes)).setDefaultValueString("eager").build()).build());
             }
             
             bool BisimulationSettings::isStrongBisimulationSet() const {
@@ -32,6 +45,28 @@ namespace storm {
                 return false;
             }
             
+            BisimulationSettings::QuotientFormat BisimulationSettings::getQuotientFormat() const {
+                std::string quotientFormatAsString = this->getOption(quotientFormatOptionName).getArgumentByName("format").getValueAsString();
+                if (quotientFormatAsString == "sparse") {
+                    return BisimulationSettings::QuotientFormat::Sparse;
+                }
+                return BisimulationSettings::QuotientFormat::Dd;
+            }
+            
+            bool BisimulationSettings::isUseRepresentativesSet() const {
+                return this->getOption(representativeOptionName).getHasOptionBeenSet();
+            }
+            
+            storm::dd::bisimulation::SignatureMode BisimulationSettings::getSignatureMode() const {
+                std::string modeAsString = this->getOption(signatureModeOptionName).getArgumentByName("mode").getValueAsString();
+                if (modeAsString == "eager") {
+                    return storm::dd::bisimulation::SignatureMode::Eager;
+                } else if (modeAsString == "lazy") {
+                    return storm::dd::bisimulation::SignatureMode::Lazy;
+                }
+                STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unknown signature mode '" << modeAsString << ".");
+            }
+            
             bool BisimulationSettings::check() const {
                 bool optionsSet = this->getOption(typeOptionName).getHasOptionBeenSet();
                 STORM_LOG_WARN_COND(storm::settings::getModule<storm::settings::modules::GeneralSettings>().isBisimulationSet() || !optionsSet, "Bisimulation minimization is not selected, so setting options for bisimulation has no effect.");
diff --git a/src/storm/settings/modules/BisimulationSettings.h b/src/storm/settings/modules/BisimulationSettings.h
index 8c644cd9c..0d71baa86 100644
--- a/src/storm/settings/modules/BisimulationSettings.h
+++ b/src/storm/settings/modules/BisimulationSettings.h
@@ -3,6 +3,8 @@
 
 #include "storm/settings/modules/ModuleSettings.h"
 
+#include "storm/storage/dd/bisimulation/SignatureMode.h"
+
 namespace storm {
     namespace settings {
         namespace modules {
@@ -15,6 +17,8 @@ namespace storm {
                 // An enumeration of all available bisimulation types.
                 enum class BisimulationType { Strong, Weak };
                 
+                enum class QuotientFormat { Sparse, Dd };
+                
                 /*!
                  * Creates a new set of bisimulation settings.
                  */
@@ -34,6 +38,23 @@ namespace storm {
                  */
                 bool isWeakBisimulationSet() const;
 
+                /*!
+                 * Retrieves the format in which the quotient is to be extracted.
+                 * NOTE: only applies to DD-based bisimulation.
+                 */
+                QuotientFormat getQuotientFormat() const;
+                
+                /*!
+                 * Retrieves whether representatives for blocks are to be used instead of the block numbers.
+                 * NOTE: only applies to DD-based bisimulation.
+                 */
+                bool isUseRepresentativesSet() const;
+                
+                /*!
+                 * Retrieves the mode to compute signatures.
+                 */
+                storm::dd::bisimulation::SignatureMode getSignatureMode() const;
+                
                 virtual bool check() const override;
                 
                 // The name of the module.
@@ -42,6 +63,9 @@ namespace storm {
             private:
                 // Define the string names of the options as constants.
                 static const std::string typeOptionName;
+                static const std::string representativeOptionName;
+                static const std::string quotientFormatOptionName;
+                static const std::string signatureModeOptionName;
             };
         } // namespace modules
     } // namespace settings
diff --git a/src/storm/settings/modules/CoreSettings.cpp b/src/storm/settings/modules/CoreSettings.cpp
index 964518b64..234a1b9a1 100644
--- a/src/storm/settings/modules/CoreSettings.cpp
+++ b/src/storm/settings/modules/CoreSettings.cpp
@@ -89,6 +89,10 @@ namespace storm {
                 return this->getOption(eqSolverOptionName).getHasOptionBeenSet();
             }
             
+            bool CoreSettings::isEquationSolverSetFromDefaultValue() const {
+                return !this->getOption(eqSolverOptionName).getHasOptionBeenSet() || this->getOption(eqSolverOptionName).getArgumentByName("name").wasSetFromDefaultValue();
+            }
+            
             storm::solver::LpSolverType CoreSettings::getLpSolver() const {
                 std::string lpSolverName = this->getOption(lpSolverOptionName).getArgumentByName("name").getValueAsString();
                 if (lpSolverName == "gurobi") {
diff --git a/src/storm/settings/modules/CoreSettings.h b/src/storm/settings/modules/CoreSettings.h
index 4f275b3d0..b08c01028 100644
--- a/src/storm/settings/modules/CoreSettings.h
+++ b/src/storm/settings/modules/CoreSettings.h
@@ -81,6 +81,13 @@ namespace storm {
                  */
                 bool isEquationSolverSet() const;
 
+                /*!
+                 * Retrieves whether the equation solver has been set from its default value.
+                 *
+                 * @return True iff it has been set from its default value.
+                 */
+                bool isEquationSolverSetFromDefaultValue() const;
+                
                 /*!
                  * Retrieves the selected LP solver.
                  *
diff --git a/src/storm/settings/modules/CuddSettings.cpp b/src/storm/settings/modules/CuddSettings.cpp
index a10ec1bbc..633123893 100644
--- a/src/storm/settings/modules/CuddSettings.cpp
+++ b/src/storm/settings/modules/CuddSettings.cpp
@@ -17,12 +17,15 @@ namespace storm {
             const std::string CuddSettings::moduleName = "cudd";
             const std::string CuddSettings::precisionOptionName = "precision";
             const std::string CuddSettings::maximalMemoryOptionName = "maxmem";
-            const std::string CuddSettings::reorderOptionName = "reorder";
+            const std::string CuddSettings::reorderOptionName = "dynreorder";
+            const std::string CuddSettings::reorderTechniqueOptionName = "reordertechnique";
             
             CuddSettings::CuddSettings() : ModuleSettings(moduleName) {
                 this->addOption(storm::settings::OptionBuilder(moduleName, precisionOptionName, true, "Sets the precision used by Cudd.").addArgument(storm::settings::ArgumentBuilder::createDoubleArgument("value", "The precision up to which to constants are considered to be different.").setDefaultValueDouble(1e-15).addValidatorDouble(ArgumentValidatorFactory::createDoubleRangeValidatorIncluding(0.0, 1.0)).build()).build());
                 
                 this->addOption(storm::settings::OptionBuilder(moduleName, maximalMemoryOptionName, true, "Sets the upper bound of memory available to Cudd in MB.").addArgument(storm::settings::ArgumentBuilder::createUnsignedIntegerArgument("value", "The memory available to Cudd (0 means unlimited).").setDefaultValueUnsignedInteger(4096).build()).build());
+
+                this->addOption(storm::settings::OptionBuilder(moduleName, reorderOptionName, false, "Sets whether dynamic reordering is allowed.").build());
                 
                 std::vector<std::string> reorderingTechniques;
                 reorderingTechniques.push_back("none");
@@ -43,7 +46,7 @@ namespace storm {
                 reorderingTechniques.push_back("annealing");
                 reorderingTechniques.push_back("genetic");
                 reorderingTechniques.push_back("exact");
-                this->addOption(storm::settings::OptionBuilder(moduleName, reorderOptionName, true, "Sets the reordering technique used by Cudd.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("method", "Sets which technique is used by Cudd's reordering routines.").setDefaultValueString("gsift").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(reorderingTechniques)).build()).build());
+                this->addOption(storm::settings::OptionBuilder(moduleName, reorderTechniqueOptionName, true, "Sets the reordering technique used by Cudd.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("method", "Sets which technique is used by Cudd's reordering routines.").setDefaultValueString("gsift").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(reorderingTechniques)).build()).build());
             }
             
             double CuddSettings::getConstantPrecision() const {
@@ -54,8 +57,12 @@ namespace storm {
                 return this->getOption(maximalMemoryOptionName).getArgumentByName("value").getValueAsUnsignedInteger();
             }
             
+            bool CuddSettings::isReorderingEnabled() const {
+                return this->getOption(reorderOptionName).getHasOptionBeenSet();
+            }
+            
             CuddSettings::ReorderingTechnique CuddSettings::getReorderingTechnique() const {
-                std::string reorderingTechniqueAsString = this->getOption(reorderOptionName).getArgumentByName("method").getValueAsString();
+                std::string reorderingTechniqueAsString = this->getOption(reorderTechniqueOptionName).getArgumentByName("method").getValueAsString();
                 if (reorderingTechniqueAsString == "none") {
                     return CuddSettings::ReorderingTechnique::None;
                 } else if (reorderingTechniqueAsString == "random") {
diff --git a/src/storm/settings/modules/CuddSettings.h b/src/storm/settings/modules/CuddSettings.h
index 01a81389c..5c4e22da2 100644
--- a/src/storm/settings/modules/CuddSettings.h
+++ b/src/storm/settings/modules/CuddSettings.h
@@ -34,6 +34,13 @@ namespace storm {
                  */
                 uint_fast64_t getMaximalMemory() const;
                 
+                /*!
+                 * Retrieves whether dynamic reordering is enabled.
+                 *
+                 * @return True iff dynamic reordering is enabled.
+                 */
+                bool isReorderingEnabled() const;
+                
                 /*!
                  * Retrieves the reordering technique that CUDD is supposed to use.
                  *
@@ -49,6 +56,7 @@ namespace storm {
                 static const std::string precisionOptionName;
                 static const std::string maximalMemoryOptionName;
                 static const std::string reorderOptionName;
+                static const std::string reorderTechniqueOptionName;
             };
             
         } // namespace modules
diff --git a/src/storm/settings/modules/EigenEquationSolverSettings.cpp b/src/storm/settings/modules/EigenEquationSolverSettings.cpp
index 937f36026..cc3589937 100644
--- a/src/storm/settings/modules/EigenEquationSolverSettings.cpp
+++ b/src/storm/settings/modules/EigenEquationSolverSettings.cpp
@@ -26,7 +26,7 @@ namespace storm {
             
             EigenEquationSolverSettings::EigenEquationSolverSettings() : ModuleSettings(moduleName) {
                 std::vector<std::string> methods = {"sparselu", "bicgstab", "dgmres", "gmres"};
-                this->addOption(storm::settings::OptionBuilder(moduleName, techniqueOptionName, true, "The method to be used for solving linear equation systems with the eigen solver.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of the method to use.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(methods)).setDefaultValueString("sparselu").build()).build());
+                this->addOption(storm::settings::OptionBuilder(moduleName, techniqueOptionName, true, "The method to be used for solving linear equation systems with the eigen solver.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of the method to use.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(methods)).setDefaultValueString("gmres").build()).build());
                 
                 // Register available preconditioners.
                 std::vector<std::string> preconditioner = {"ilu", "diagonal", "none"};
diff --git a/src/storm/settings/modules/JitBuilderSettings.cpp b/src/storm/settings/modules/JitBuilderSettings.cpp
index b779f23f5..7dc6808e9 100644
--- a/src/storm/settings/modules/JitBuilderSettings.cpp
+++ b/src/storm/settings/modules/JitBuilderSettings.cpp
@@ -34,8 +34,8 @@ namespace storm {
                                 .addArgument(storm::settings::ArgumentBuilder::createStringArgument("dir", "The directory containing the carl headers.").build()).build());
                 this->addOption(storm::settings::OptionBuilder(moduleName, compilerFlagsOptionName, false, "The flags passed to the compiler.")
                                 .addArgument(storm::settings::ArgumentBuilder::createStringArgument("flags", "The compiler flags.").build()).build());
-                this->addOption(storm::settings::OptionBuilder(moduleName, optimizationLevelOptionName, false, "The optimization level to use.")
-                                .addArgument(storm::settings::ArgumentBuilder::createUnsignedIntegerArgument("level", "The compiler flags.").setDefaultValueUnsignedInteger(3).build()).build());
+                this->addOption(storm::settings::OptionBuilder(moduleName, optimizationLevelOptionName, false, "Sets the optimization level.")
+                                .addArgument(storm::settings::ArgumentBuilder::createUnsignedIntegerArgument("level", "The level to use.").setDefaultValueUnsignedInteger(3).build()).build());
             }
             
             bool JitBuilderSettings::isCompilerSet() const {
diff --git a/src/storm/settings/modules/MinMaxEquationSolverSettings.cpp b/src/storm/settings/modules/MinMaxEquationSolverSettings.cpp
index 398eaaeac..de9ce1746 100644
--- a/src/storm/settings/modules/MinMaxEquationSolverSettings.cpp
+++ b/src/storm/settings/modules/MinMaxEquationSolverSettings.cpp
@@ -50,6 +50,10 @@ namespace storm {
                 STORM_LOG_THROW(false, storm::exceptions::IllegalArgumentValueException, "Unknown min/max equation solving technique '" << minMaxEquationSolvingTechnique << "'.");
             }
             
+            bool MinMaxEquationSolverSettings::isMinMaxEquationSolvingMethodSetFromDefaultValue() const {
+                return !this->getOption(solvingMethodOptionName).getArgumentByName("name").getHasBeenSet() || this->getOption(solvingMethodOptionName).getArgumentByName("name").wasSetFromDefaultValue();
+            }
+            
             bool MinMaxEquationSolverSettings::isMinMaxEquationSolvingMethodSet() const {
                 return this->getOption(solvingMethodOptionName).getHasOptionBeenSet();
             }
diff --git a/src/storm/settings/modules/MinMaxEquationSolverSettings.h b/src/storm/settings/modules/MinMaxEquationSolverSettings.h
index 35bee18ac..1ce4a9db7 100644
--- a/src/storm/settings/modules/MinMaxEquationSolverSettings.h
+++ b/src/storm/settings/modules/MinMaxEquationSolverSettings.h
@@ -27,12 +27,19 @@ namespace storm {
                 bool isMinMaxEquationSolvingMethodSet() const;
                 
                 /*!
-                 * Retrieves the selected min/max equation solving technique.
+                 * Retrieves the selected min/max equation solving method.
                  *
-                 * @return The selected min/max equation solving technique.
+                 * @return The selected min/max equation solving method.
                  */
                 storm::solver::MinMaxMethod getMinMaxEquationSolvingMethod() const;
                 
+                /*!
+                 * Retrieves whether the min/max equation solving method is set from its default value.
+                 *
+                 * @return True iff if it is set from its default value.
+                 */
+                bool isMinMaxEquationSolvingMethodSetFromDefaultValue() const;
+                
                 /*!
                  * Retrieves whether the maximal iteration count has been set.
                  *
diff --git a/src/storm/settings/modules/NativeEquationSolverSettings.cpp b/src/storm/settings/modules/NativeEquationSolverSettings.cpp
index 7803939bf..4d5c8ce59 100644
--- a/src/storm/settings/modules/NativeEquationSolverSettings.cpp
+++ b/src/storm/settings/modules/NativeEquationSolverSettings.cpp
@@ -40,6 +40,10 @@ namespace storm {
                 return this->getOption(techniqueOptionName).getHasOptionBeenSet();
             }
             
+            bool NativeEquationSolverSettings::isLinearEquationSystemTechniqueSetFromDefaultValue() const {
+                return !this->getOption(techniqueOptionName).getHasOptionBeenSet() || this->getOption(techniqueOptionName).getArgumentByName("name").wasSetFromDefaultValue();
+            }
+            
             NativeEquationSolverSettings::LinearEquationMethod NativeEquationSolverSettings::getLinearEquationSystemMethod() const {
                 std::string linearEquationSystemTechniqueAsString = this->getOption(techniqueOptionName).getArgumentByName("name").getValueAsString();
                 if (linearEquationSystemTechniqueAsString == "jacobi") {
diff --git a/src/storm/settings/modules/NativeEquationSolverSettings.h b/src/storm/settings/modules/NativeEquationSolverSettings.h
index e02d0395f..2699d3f94 100644
--- a/src/storm/settings/modules/NativeEquationSolverSettings.h
+++ b/src/storm/settings/modules/NativeEquationSolverSettings.h
@@ -30,6 +30,13 @@ namespace storm {
                  */
                 bool isLinearEquationSystemTechniqueSet() const;
                 
+                /*!
+                 * Retrieves whether the linear equation system technique is set from its default value.
+                 *
+                 * @return True iff it was set from its default value.
+                 */
+                bool isLinearEquationSystemTechniqueSetFromDefaultValue() const;
+                
                 /*!
                  * Retrieves the method that is to be used for solving systems of linear equations.
                  *
diff --git a/src/storm/solver/GmmxxLinearEquationSolver.cpp b/src/storm/solver/GmmxxLinearEquationSolver.cpp
index 7503584c8..8d541bdc8 100644
--- a/src/storm/solver/GmmxxLinearEquationSolver.cpp
+++ b/src/storm/solver/GmmxxLinearEquationSolver.cpp
@@ -224,7 +224,7 @@ namespace storm {
         
         template<typename ValueType>
         void GmmxxLinearEquationSolver<ValueType>::multiply(std::vector<ValueType>& x, std::vector<ValueType> const* b, std::vector<ValueType>& result) const {
-            if(!gmmxxA) {
+            if (!gmmxxA) {
                 gmmxxA = storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix<ValueType>(*A);
             }
             if (b) {
@@ -242,18 +242,17 @@ namespace storm {
         uint_fast64_t GmmxxLinearEquationSolver<ValueType>::solveLinearEquationSystemWithJacobi(std::vector<ValueType>& x, std::vector<ValueType> const& b) const {
             
             // Get a Jacobi decomposition of the matrix A (if not already available).
-            if(!jacobiDecomposition) {
+            if (!jacobiDecomposition) {
                 std::pair<storm::storage::SparseMatrix<ValueType>, std::vector<ValueType>> nativeJacobiDecomposition = A->getJacobiDecomposition();
                 // Convert the LU matrix to gmm++'s format.
-                jacobiDecomposition = std::make_unique<std::pair<gmm::csr_matrix<ValueType>, std::vector<ValueType>>>(*storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix<ValueType>(std::move(nativeJacobiDecomposition.first)),
-                                                                                                                      std::move(nativeJacobiDecomposition.second));
+                jacobiDecomposition = std::make_unique<std::pair<gmm::csr_matrix<ValueType>, std::vector<ValueType>>>(*storm::adapters::GmmxxAdapter::toGmmxxSparseMatrix<ValueType>(std::move(nativeJacobiDecomposition.first)), std::move(nativeJacobiDecomposition.second));
             }
             gmm::csr_matrix<ValueType> const& jacobiLU = jacobiDecomposition->first;
             std::vector<ValueType> const& jacobiD = jacobiDecomposition->second;
         
             std::vector<ValueType>* currentX = &x;
             
-            if(!this->cachedRowVector) {
+            if (!this->cachedRowVector) {
                 this->cachedRowVector = std::make_unique<std::vector<ValueType>>(getMatrixRowCount());
             }
             std::vector<ValueType>* nextX = this->cachedRowVector.get();
diff --git a/src/storm/solver/IterativeMinMaxLinearEquationSolver.h b/src/storm/solver/IterativeMinMaxLinearEquationSolver.h
index 301db3049..e197f88b0 100644
--- a/src/storm/solver/IterativeMinMaxLinearEquationSolver.h
+++ b/src/storm/solver/IterativeMinMaxLinearEquationSolver.h
@@ -73,7 +73,7 @@ namespace storm {
             IterativeMinMaxLinearEquationSolverSettings<ValueType> settings;
         };
         
-                template<typename ValueType>
+        template<typename ValueType>
         class IterativeMinMaxLinearEquationSolverFactory : public StandardMinMaxLinearEquationSolverFactory<ValueType> {
         public:
             IterativeMinMaxLinearEquationSolverFactory(MinMaxMethodSelection const& method = MinMaxMethodSelection::FROMSETTINGS, bool trackScheduler = false);
diff --git a/src/storm/solver/LinearEquationSolver.h b/src/storm/solver/LinearEquationSolver.h
index b94626cd2..170d625f7 100644
--- a/src/storm/solver/LinearEquationSolver.h
+++ b/src/storm/solver/LinearEquationSolver.h
@@ -85,17 +85,17 @@ namespace storm {
             virtual void clearCache() const;
             
             /*!
-             * Sets a lower bound for the solution that can potentially used by the solver.
+             * Sets a lower bound for the solution that can potentially be used by the solver.
              */
             void setLowerBound(ValueType const& value);
 
             /*!
-             * Sets an upper bound for the solution that can potentially used by the solver.
+             * Sets an upper bound for the solution that can potentially be used by the solver.
              */
             void setUpperBound(ValueType const& value);
 
             /*!
-             * Sets bounds for the solution that can potentially used by the solver.
+             * Sets bounds for the solution that can potentially be used by the solver.
              */
             void setBounds(ValueType const& lower, ValueType const& upper);
 
diff --git a/src/storm/solver/MinMaxLinearEquationSolver.cpp b/src/storm/solver/MinMaxLinearEquationSolver.cpp
index 08c70bd58..0ce28bd2a 100644
--- a/src/storm/solver/MinMaxLinearEquationSolver.cpp
+++ b/src/storm/solver/MinMaxLinearEquationSolver.cpp
@@ -154,7 +154,19 @@ namespace storm {
         template<typename ValueType>
         void MinMaxLinearEquationSolverFactory<ValueType>::setMinMaxMethod(MinMaxMethodSelection const& newMethod) {
             if (newMethod == MinMaxMethodSelection::FROMSETTINGS) {
-                setMinMaxMethod(storm::settings::getModule<storm::settings::modules::MinMaxEquationSolverSettings>().getMinMaxEquationSolvingMethod());
+                bool wasSet = false;
+                auto const& minMaxSettings = storm::settings::getModule<storm::settings::modules::MinMaxEquationSolverSettings>();
+                if (std::is_same<ValueType, storm::RationalNumber>::value) {
+                    if (minMaxSettings.isMinMaxEquationSolvingMethodSetFromDefaultValue() && minMaxSettings.getMinMaxEquationSolvingMethod() != MinMaxMethod::PolicyIteration) {
+                        STORM_LOG_WARN("Selecting policy iteration as the solution method to guarantee exact results. If you want to override this, please explicitly specify a different method.");
+                        this->setMinMaxMethod(MinMaxMethod::PolicyIteration);
+                        wasSet = true;
+                    }
+                }
+                
+                if (!wasSet) {
+                    setMinMaxMethod(minMaxSettings.getMinMaxEquationSolvingMethod());
+                }
             } else {
                 setMinMaxMethod(convert(newMethod));
             }
@@ -162,6 +174,7 @@ namespace storm {
         
         template<typename ValueType>
         void MinMaxLinearEquationSolverFactory<ValueType>::setMinMaxMethod(MinMaxMethod const& newMethod) {
+            STORM_LOG_WARN_COND(!(std::is_same<ValueType, storm::RationalNumber>::value) || newMethod == MinMaxMethod::PolicyIteration, "The selected solution method does not guarantee exact result. Consider using policy iteration.");
             method = newMethod;
         }
         
diff --git a/src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp b/src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp
index 43fb7f0a2..bcbc41085 100644
--- a/src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp
+++ b/src/storm/solver/SymbolicEliminationLinearEquationSolver.cpp
@@ -29,11 +29,7 @@ namespace storm {
                     ++counter;
                 }
                 
-                if (metaVariable.getType() == storm::dd::MetaVariableType::Bool) {
-                    newMetaVariables = ddManager.addMetaVariable(newMetaVariableName + std::to_string(counter), 3);
-                } else {
-                    newMetaVariables = ddManager.addMetaVariable(newMetaVariableName + std::to_string(counter), metaVariable.getLow(), metaVariable.getHigh(), 3);
-                }
+                newMetaVariables = ddManager.cloneVariable(metaVariablePair.first, newMetaVariableName + std::to_string(counter), 3);
                 
                 newRowVariables.insert(newMetaVariables[0]);
                 newColumnVariables.insert(newMetaVariables[1]);
diff --git a/src/storm/solver/SymbolicLinearEquationSolver.cpp b/src/storm/solver/SymbolicLinearEquationSolver.cpp
index 0a80a3afc..75fe87254 100644
--- a/src/storm/solver/SymbolicLinearEquationSolver.cpp
+++ b/src/storm/solver/SymbolicLinearEquationSolver.cpp
@@ -59,6 +59,31 @@ namespace storm {
             }
         }
         
+        template<storm::dd::DdType DdType>
+        std::unique_ptr<storm::solver::SymbolicLinearEquationSolver<DdType, storm::RationalNumber>> GeneralSymbolicLinearEquationSolverFactory<DdType, storm::RationalNumber>::create(storm::dd::Add<DdType, storm::RationalNumber> const& A, storm::dd::Bdd<DdType> const& allRows, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) const {
+
+            auto const& coreSettings = storm::settings::getModule<storm::settings::modules::CoreSettings>();
+            storm::solver::EquationSolverType equationSolver = coreSettings.getEquationSolver();
+            if (coreSettings.isEquationSolverSetFromDefaultValue() && equationSolver != storm::solver::EquationSolverType::Elimination) {
+                STORM_LOG_WARN("Selecting the elimination solver to guarantee exact results. If you want to override this, please explicitly specify a different equation solver.");
+                equationSolver = storm::solver::EquationSolverType::Elimination;
+            }
+            
+            if (equationSolver != storm::solver::EquationSolverType::Elimination) {
+                STORM_LOG_WARN("The chosen equation solver does not guarantee precise results despite using exact arithmetic. Consider using the elimination solver instead.");
+            }
+
+            switch (equationSolver) {
+                case storm::solver::EquationSolverType::Elimination: return std::make_unique<storm::solver::SymbolicEliminationLinearEquationSolver<DdType, storm::RationalNumber>>(A, allRows, rowMetaVariables, columnMetaVariables, rowColumnMetaVariablePairs);
+                    break;
+                case storm::solver::EquationSolverType::Native:
+                    return std::make_unique<storm::solver::SymbolicNativeLinearEquationSolver<DdType, storm::RationalNumber>>(A, allRows, rowMetaVariables, columnMetaVariables, rowColumnMetaVariablePairs);
+                    break;
+                default:
+                    STORM_LOG_WARN("The selected equation solver is not available in the dd engine. Falling back to elimination solver.");
+                    return std::make_unique<storm::solver::SymbolicEliminationLinearEquationSolver<DdType, storm::RationalNumber>>(A, allRows, rowMetaVariables, columnMetaVariables, rowColumnMetaVariablePairs);
+            }
+        }
         
         template<storm::dd::DdType DdType>
         std::unique_ptr<storm::solver::SymbolicLinearEquationSolver<DdType, storm::RationalFunction>> GeneralSymbolicLinearEquationSolverFactory<DdType, storm::RationalFunction>::create(storm::dd::Add<DdType, storm::RationalFunction> const& A, storm::dd::Bdd<DdType> const& allRows, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) const {
diff --git a/src/storm/solver/SymbolicLinearEquationSolver.h b/src/storm/solver/SymbolicLinearEquationSolver.h
index e3f68ee19..15b503175 100644
--- a/src/storm/solver/SymbolicLinearEquationSolver.h
+++ b/src/storm/solver/SymbolicLinearEquationSolver.h
@@ -101,6 +101,12 @@ namespace storm {
         public:
             virtual std::unique_ptr<storm::solver::SymbolicLinearEquationSolver<DdType, ValueType>> create(storm::dd::Add<DdType, ValueType> const& A, storm::dd::Bdd<DdType> const& allRows, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) const;
         };
+
+        template<storm::dd::DdType DdType>
+        class GeneralSymbolicLinearEquationSolverFactory<DdType, storm::RationalNumber> : public SymbolicLinearEquationSolverFactory<DdType, storm::RationalNumber> {
+        public:
+            virtual std::unique_ptr<storm::solver::SymbolicLinearEquationSolver<DdType, storm::RationalNumber>> create(storm::dd::Add<DdType, storm::RationalNumber> const& A, storm::dd::Bdd<DdType> const& allRows, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) const;
+        };
         
         template<storm::dd::DdType DdType>
         class GeneralSymbolicLinearEquationSolverFactory<DdType, storm::RationalFunction> : public SymbolicLinearEquationSolverFactory<DdType, storm::RationalFunction> {
diff --git a/src/storm/solver/SymbolicNativeLinearEquationSolver.cpp b/src/storm/solver/SymbolicNativeLinearEquationSolver.cpp
index 000e25926..2d0e82a1d 100644
--- a/src/storm/solver/SymbolicNativeLinearEquationSolver.cpp
+++ b/src/storm/solver/SymbolicNativeLinearEquationSolver.cpp
@@ -69,7 +69,7 @@ namespace storm {
             storm::dd::Add<DdType, ValueType> lu = diagonal.ite(manager.template getAddZero<ValueType>(), this->A);
             storm::dd::Add<DdType, ValueType> diagonalAdd = diagonal.template toAdd<ValueType>();
             storm::dd::Add<DdType, ValueType> diag = diagonalAdd.multiplyMatrix(this->A, this->columnMetaVariables);
-            
+
             storm::dd::Add<DdType, ValueType> scaledLu = lu / diag;
             storm::dd::Add<DdType, ValueType> scaledB = b / diag;
             
diff --git a/src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp b/src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp
index e320f3497..f659620e9 100644
--- a/src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp
+++ b/src/storm/solver/TopologicalMinMaxLinearEquationSolver.cpp
@@ -453,7 +453,6 @@ namespace storm {
                 // Reduce the vector x' by applying min/max for all non-deterministic choices as given by the topmost
                 // element of the min/max operator stack.
                 storm::utility::vector::reduceVectorMinOrMax(dir, *multiplyResult, x, this->A.getRowGroupIndices());
-                
             }
         }
 
diff --git a/src/storm/storage/SparseMatrix.cpp b/src/storm/storage/SparseMatrix.cpp
index 0394ce6ca..c0ef84d19 100644
--- a/src/storm/storage/SparseMatrix.cpp
+++ b/src/storm/storage/SparseMatrix.cpp
@@ -134,41 +134,46 @@ namespace storm {
             // the insertion.
             bool fixCurrentRow = row == lastRow && column < lastColumn;
             
-            // If we switched to another row, we have to adjust the missing entries in the row indices vector.
-            if (row != lastRow) {
-                // Otherwise, we need to push the correct values to the vectors, which might trigger reallocations.
-                for (index_type i = lastRow + 1; i <= row; ++i) {
-                    rowIndications.push_back(currentEntryCount);
+            // If the element is in the same row and column as the previous entry, we add them up.
+            if (row == lastRow && column == lastColumn && !columnsAndValues.empty()) {
+                columnsAndValues.back().setValue(columnsAndValues.back().getValue() + value);
+            } else {
+                // If we switched to another row, we have to adjust the missing entries in the row indices vector.
+                if (row != lastRow) {
+                    // Otherwise, we need to push the correct values to the vectors, which might trigger reallocations.
+                    for (index_type i = lastRow + 1; i <= row; ++i) {
+                        rowIndications.push_back(currentEntryCount);
+                    }
+                    
+                    lastRow = row;
                 }
                 
-                lastRow = row;
-            }
-            
-            lastColumn = column;
-            
-            // Finally, set the element and increase the current size.
-            columnsAndValues.emplace_back(column, value);
-            highestColumn = std::max(highestColumn, column);
-            ++currentEntryCount;
-            
-            // If we need to fix the row, do so now.
-            if (fixCurrentRow) {
-                // First, we sort according to columns.
-                std::sort(columnsAndValues.begin() + rowIndications.back(), columnsAndValues.end(), [] (storm::storage::MatrixEntry<index_type, ValueType> const& a, storm::storage::MatrixEntry<index_type, ValueType> const& b) {
-                    return a.getColumn() < b.getColumn();
-                });
+                lastColumn = column;
                 
-                // Then, we eliminate possible duplicate entries.
-                auto it = std::unique(columnsAndValues.begin() + rowIndications.back(), columnsAndValues.end(), [] (storm::storage::MatrixEntry<index_type, ValueType> const& a, storm::storage::MatrixEntry<index_type, ValueType> const& b) {
-                    return a.getColumn() == b.getColumn();
-                });
+                // Finally, set the element and increase the current size.
+                columnsAndValues.emplace_back(column, value);
+                highestColumn = std::max(highestColumn, column);
+                ++currentEntryCount;
                 
-                // Finally, remove the superfluous elements.
-                std::size_t elementsToRemove = std::distance(it, columnsAndValues.end());
-                if (elementsToRemove > 0) {
-                    STORM_LOG_WARN("Unordered insertion into matrix builder caused duplicate entries.");
-                    currentEntryCount -= elementsToRemove;
-                    columnsAndValues.resize(columnsAndValues.size() - elementsToRemove);
+                // If we need to fix the row, do so now.
+                if (fixCurrentRow) {
+                    // First, we sort according to columns.
+                    std::sort(columnsAndValues.begin() + rowIndications.back(), columnsAndValues.end(), [] (storm::storage::MatrixEntry<index_type, ValueType> const& a, storm::storage::MatrixEntry<index_type, ValueType> const& b) {
+                        return a.getColumn() < b.getColumn();
+                    });
+                    
+                    // Then, we eliminate possible duplicate entries.
+                    auto it = std::unique(columnsAndValues.begin() + rowIndications.back(), columnsAndValues.end(), [] (storm::storage::MatrixEntry<index_type, ValueType> const& a, storm::storage::MatrixEntry<index_type, ValueType> const& b) {
+                        return a.getColumn() == b.getColumn();
+                    });
+                    
+                    // Finally, remove the superfluous elements.
+                    std::size_t elementsToRemove = std::distance(it, columnsAndValues.end());
+                    if (elementsToRemove > 0) {
+                        STORM_LOG_WARN("Unordered insertion into matrix builder caused duplicate entries.");
+                        currentEntryCount -= elementsToRemove;
+                        columnsAndValues.resize(columnsAndValues.size() - elementsToRemove);
+                    }
                 }
             }
             
@@ -188,13 +193,15 @@ namespace storm {
             ++currentRowGroup;
             
             // Close all rows from the most recent one to the starting row.
-            for (index_type i = lastRow + 1; i <= startingRow; ++i) {
+            for (index_type i = lastRow + 1; i < startingRow; ++i) {
                 rowIndications.push_back(currentEntryCount);
             }
             
-            // Reset the most recently seen row/column to allow for proper insertion of the following elements.
-            lastRow = startingRow;
-            lastColumn = 0;
+            if (lastRow + 1 < startingRow) {
+                // Reset the most recently seen row/column to allow for proper insertion of the following elements.
+                lastRow = startingRow - 1;
+                lastColumn = 0;
+            }
         }
         
         template<typename ValueType>
@@ -203,10 +210,19 @@ namespace storm {
             bool hasEntries = currentEntryCount != 0;
             
             uint_fast64_t rowCount = hasEntries ? lastRow + 1 : 0;
+
+            // If the last row group was empty, we need to add one more to the row count, because otherwise this empty row is not counted.
+            if (hasCustomRowGrouping) {
+                if (lastRow < rowGroupIndices->back()) {
+                    ++rowCount;
+                }
+            }
+            
             if (initialRowCountSet && forceInitialDimensions) {
                 STORM_LOG_THROW(rowCount <= initialRowCount, storm::exceptions::InvalidStateException, "Expected not more than " << initialRowCount << " rows, but got " << rowCount << ".");
                 rowCount = std::max(rowCount, initialRowCount);
             }
+            
             rowCount = std::max(rowCount, overriddenRowCount);
             
             // If the current row count was overridden, we may need to add empty rows.
@@ -223,7 +239,7 @@ namespace storm {
             // as now the indices of row i are always between rowIndications[i] and rowIndications[i + 1], also for
             // the first and last row.
             rowIndications.push_back(currentEntryCount);
-            assert(rowCount == rowIndications.size() - 1);
+            STORM_LOG_ASSERT(rowCount == rowIndications.size() - 1, "Wrong sizes of vectors.");
             uint_fast64_t columnCount = hasEntries ? highestColumn + 1 : 0;
             if (initialColumnCountSet && forceInitialDimensions) {
                 STORM_LOG_THROW(columnCount <= initialColumnCount, storm::exceptions::InvalidStateException, "Expected not more than " << initialColumnCount << " columns, but got " << columnCount << ".");
@@ -1615,7 +1631,8 @@ namespace storm {
         bool SparseMatrix<ValueType>::isProbabilistic() const {
             storm::utility::ConstantsComparator<ValueType> comparator;
             for (index_type row = 0; row < this->rowCount; ++row) {
-                if(!comparator.isOne(getRowSum(row))) {
+                auto rowSum = getRowSum(row);
+                if (!comparator.isOne(rowSum)) {
                     return false;
                 }
             }
diff --git a/src/storm/storage/bisimulation/BisimulationDecomposition.h b/src/storm/storage/bisimulation/BisimulationDecomposition.h
index 49089063a..6aa56345c 100644
--- a/src/storm/storage/bisimulation/BisimulationDecomposition.h
+++ b/src/storm/storage/bisimulation/BisimulationDecomposition.h
@@ -7,6 +7,7 @@
 #include "storm/storage/Decomposition.h"
 #include "storm/storage/StateBlock.h"
 #include "storm/storage/bisimulation/Partition.h"
+#include "storm/storage/bisimulation/BisimulationType.h"
 #include "storm/solver/OptimizationDirection.h"
 
 #include "storm/logic/Formulas.h"
@@ -18,15 +19,12 @@ namespace storm {
     namespace utility {
         template <typename ValueType> class ConstantsComparator;
     }
-
+    
     namespace logic {
         class Formula;
     }
     
     namespace storage {
-
-        enum class BisimulationType { Strong, Weak };
-        enum class BisimulationTypeChoice { Strong, Weak, FromSettings };
         
         inline BisimulationType resolveBisimulationTypeChoice(BisimulationTypeChoice c) {
             switch(c) {
@@ -40,8 +38,8 @@ namespace storm {
                     } else {
                         return BisimulationType::Strong;
                     }
-                    
             }
+            return BisimulationType::Strong;
         }
         
         /*!
@@ -89,7 +87,7 @@ namespace storm {
                 
                 /**
                  * Sets the bisimulation type. If the bisimulation type is set to weak,
-                 * we also change the bounded flag (as bounded properties are not preserved under 
+                 * we also change the bounded flag (as bounded properties are not preserved under
                  * weak bisimulation).
                  */
                 void setType(BisimulationType t) {
@@ -136,7 +134,7 @@ namespace storm {
                 
             private:
                 boost::optional<OptimizationDirection> optimalityType;
-
+                
                 /// A flag that indicates whether or not the state-rewards of the model are to be respected (and should
                 /// be kept in the quotient model, if one is built).
                 bool keepRewards;
@@ -155,7 +153,7 @@ namespace storm {
                  * @param formula The only formula to check.
                  */
                 void preserveSingleFormula(ModelType const& model, storm::logic::Formula const& formula);
-
+                
                 /*!
                  * Adds the given expressions and labels to the set of respected atomic propositions.
                  *
@@ -189,7 +187,7 @@ namespace storm {
              * @return The quotient model.
              */
             std::shared_ptr<ModelType> getQuotient() const;
-
+            
             /*!
              * Computes the decomposition of the model into bisimulation equivalence classes. If requested, a quotient
              * model is built.
@@ -263,7 +261,7 @@ namespace storm {
             
             // The model to decompose.
             ModelType const& model;
-
+            
             // The backward transitions of the model.
             storm::storage::SparseMatrix<ValueType> backwardTransitions;
             
diff --git a/src/storm/storage/bisimulation/BisimulationType.h b/src/storm/storage/bisimulation/BisimulationType.h
new file mode 100644
index 000000000..fc9dba9b2
--- /dev/null
+++ b/src/storm/storage/bisimulation/BisimulationType.h
@@ -0,0 +1,10 @@
+#pragma once
+
+namespace storm {
+    namespace storage {
+        
+        enum class BisimulationType { Strong, Weak };
+        enum class BisimulationTypeChoice { Strong, Weak, FromSettings };
+
+    }
+}
diff --git a/src/storm/storage/dd/Add.cpp b/src/storm/storage/dd/Add.cpp
index ea8a4407d..0a1d25553 100644
--- a/src/storm/storage/dd/Add.cpp
+++ b/src/storm/storage/dd/Add.cpp
@@ -154,31 +154,31 @@ namespace storm {
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::sumAbstract(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = Bdd<LibraryType>::getCube(this->getDdManager(), metaVariables);
-            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.sumAbstract(cube), Dd<LibraryType>::subtractMetaVariables(*this, cube));
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.sumAbstract(cube.getInternalBdd()), Dd<LibraryType>::subtractMetaVariables(*this, cube));
         }
 
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::minAbstract(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = Bdd<LibraryType>::getCube(this->getDdManager(), metaVariables);
-            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.minAbstract(cube), Dd<LibraryType>::subtractMetaVariables(*this, cube));
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.minAbstract(cube.getInternalBdd()), Dd<LibraryType>::subtractMetaVariables(*this, cube));
         }
 		
 		template<DdType LibraryType, typename ValueType>
         Bdd<LibraryType> Add<LibraryType, ValueType>::minAbstractRepresentative(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = Bdd<LibraryType>::getCube(this->getDdManager(), metaVariables);
-            return Bdd<LibraryType>(this->getDdManager(), internalAdd.minAbstractRepresentative(cube), this->getContainedMetaVariables());
+            return Bdd<LibraryType>(this->getDdManager(), internalAdd.minAbstractRepresentative(cube.getInternalBdd()), this->getContainedMetaVariables());
         }
         
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::maxAbstract(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = Bdd<LibraryType>::getCube(this->getDdManager(), metaVariables);
-            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.maxAbstract(cube), Dd<LibraryType>::subtractMetaVariables(*this, cube));
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.maxAbstract(cube.getInternalBdd()), Dd<LibraryType>::subtractMetaVariables(*this, cube));
         }
 		
 		template<DdType LibraryType, typename ValueType>
         Bdd<LibraryType> Add<LibraryType, ValueType>::maxAbstractRepresentative(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = Bdd<LibraryType>::getCube(this->getDdManager(), metaVariables);
-            return Bdd<LibraryType>(this->getDdManager(), internalAdd.maxAbstractRepresentative(cube), this->getContainedMetaVariables());
+            return Bdd<LibraryType>(this->getDdManager(), internalAdd.maxAbstractRepresentative(cube.getInternalBdd()), this->getContainedMetaVariables());
         }
 
         template<DdType LibraryType, typename ValueType>
@@ -186,9 +186,37 @@ namespace storm {
             return internalAdd.equalModuloPrecision(other, precision, relative);
         }
         
+        template<DdType LibraryType, typename ValueType>
+        Add<LibraryType, ValueType> Add<LibraryType, ValueType>::renameVariables(std::set<storm::expressions::Variable> const& from, std::set<storm::expressions::Variable> const& to) const {
+            std::vector<InternalBdd<LibraryType>> fromBdds;
+            std::vector<InternalBdd<LibraryType>> toBdds;
+            
+            for (auto const& metaVariable : from) {
+                STORM_LOG_THROW(this->containsMetaVariable(metaVariable), storm::exceptions::InvalidOperationException, "Cannot rename variable '" << metaVariable.getName() << "' that is not present.");
+                DdMetaVariable<LibraryType> const& ddMetaVariable = this->getDdManager().getMetaVariable(metaVariable);
+                for (auto const& ddVariable : ddMetaVariable.getDdVariables()) {
+                    fromBdds.push_back(ddVariable.getInternalBdd());
+                }
+            }
+            for (auto const& metaVariable : to) {
+                STORM_LOG_THROW(!this->containsMetaVariable(metaVariable), storm::exceptions::InvalidOperationException, "Cannot rename to variable '" << metaVariable.getName() << "' that is already present.");
+                DdMetaVariable<LibraryType> const& ddMetaVariable = this->getDdManager().getMetaVariable(metaVariable);
+                for (auto const& ddVariable : ddMetaVariable.getDdVariables()) {
+                    toBdds.push_back(ddVariable.getInternalBdd());
+                }
+            }
+            
+            std::set<storm::expressions::Variable> newContainedMetaVariables = to;
+            std::set_difference(this->getContainedMetaVariables().begin(), this->getContainedMetaVariables().end(), from.begin(), from.end(), std::inserter(newContainedMetaVariables, newContainedMetaVariables.begin()));
+            
+            STORM_LOG_THROW(fromBdds.size() == toBdds.size(), storm::exceptions::InvalidArgumentException, "Unable to rename mismatching meta variables.");
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.swapVariables(fromBdds, toBdds), newContainedMetaVariables);
+        }
+        
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::swapVariables(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& metaVariablePairs) const {
             std::set<storm::expressions::Variable> newContainedMetaVariables;
+            std::set<storm::expressions::Variable> deletedMetaVariables;
             std::vector<InternalBdd<LibraryType>> from;
             std::vector<InternalBdd<LibraryType>> to;
             for (auto const& metaVariablePair : metaVariablePairs) {
@@ -197,26 +225,40 @@ namespace storm {
                 
                 // Keep track of the contained meta variables in the DD.
                 if (this->containsMetaVariable(metaVariablePair.first)) {
-                    newContainedMetaVariables.insert(metaVariablePair.second);
-                }
-                if (this->containsMetaVariable(metaVariablePair.second)) {
-                    newContainedMetaVariables.insert(metaVariablePair.first);
+                    if (this->containsMetaVariable(metaVariablePair.second)) {
+                        // Nothing to do here.
+                    } else {
+                        newContainedMetaVariables.insert(metaVariablePair.second);
+                        deletedMetaVariables.insert(metaVariablePair.first);
+                    }
+                } else {
+                    if (!this->containsMetaVariable(metaVariablePair.second)) {
+                        // Nothing to do here.
+                    } else {
+                        newContainedMetaVariables.insert(metaVariablePair.first);
+                        deletedMetaVariables.insert(metaVariablePair.second);
+                    }
                 }
-                
                 for (auto const& ddVariable : variable1.getDdVariables()) {
-                    from.push_back(ddVariable);
+                    from.push_back(ddVariable.getInternalBdd());
                 }
                 for (auto const& ddVariable : variable2.getDdVariables()) {
-                    to.push_back(ddVariable);
+                    to.push_back(ddVariable.getInternalBdd());
                 }
             }
+            
+            std::set<storm::expressions::Variable> tmp;
+            std::set_difference(this->getContainedMetaVariables().begin(), this->getContainedMetaVariables().end(), deletedMetaVariables.begin(), deletedMetaVariables.end(), std::inserter(tmp, tmp.begin()));
+            std::set<storm::expressions::Variable> containedMetaVariables;
+            std::set_union(tmp.begin(), tmp.end(), newContainedMetaVariables.begin(), newContainedMetaVariables.end(), std::inserter(containedMetaVariables, containedMetaVariables.begin()));
             STORM_LOG_THROW(from.size() == to.size(), storm::exceptions::InvalidArgumentException, "Unable to swap mismatching meta variables.");
-            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.swapVariables(from, to), newContainedMetaVariables);
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.swapVariables(from, to), containedMetaVariables);
         }
         
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::permuteVariables(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& metaVariablePairs) const {
             std::set<storm::expressions::Variable> newContainedMetaVariables;
+            std::set<storm::expressions::Variable> deletedMetaVariables;
             std::vector<InternalBdd<LibraryType>> from;
             std::vector<InternalBdd<LibraryType>> to;
             for (auto const& metaVariablePair : metaVariablePairs) {
@@ -225,27 +267,33 @@ namespace storm {
                 
                 // Keep track of the contained meta variables in the DD.
                 if (this->containsMetaVariable(metaVariablePair.first)) {
+                    deletedMetaVariables.insert(metaVariablePair.first);
                     newContainedMetaVariables.insert(metaVariablePair.second);
                 }
                 
                 for (auto const& ddVariable : variable1.getDdVariables()) {
-                    from.push_back(ddVariable);
+                    from.push_back(ddVariable.getInternalBdd());
                 }
                 for (auto const& ddVariable : variable2.getDdVariables()) {
-                    to.push_back(ddVariable);
+                    to.push_back(ddVariable.getInternalBdd());
                 }
             }
+            
+            std::set<storm::expressions::Variable> tmp;
+            std::set_difference(this->getContainedMetaVariables().begin(), this->getContainedMetaVariables().end(), deletedMetaVariables.begin(), deletedMetaVariables.end(), std::inserter(tmp, tmp.begin()));
+            std::set<storm::expressions::Variable> containedMetaVariables;
+            std::set_union(tmp.begin(), tmp.end(), newContainedMetaVariables.begin(), newContainedMetaVariables.end(), std::inserter(containedMetaVariables, containedMetaVariables.begin()));
             STORM_LOG_THROW(from.size() == to.size(), storm::exceptions::InvalidArgumentException, "Unable to swap mismatching meta variables.");
-            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.permuteVariables(from, to), newContainedMetaVariables);
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.permuteVariables(from, to), containedMetaVariables);
         }
         
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType> Add<LibraryType, ValueType>::multiplyMatrix(Add<LibraryType, ValueType> const& otherMatrix, std::set<storm::expressions::Variable> const& summationMetaVariables) const {
-            // Create the CUDD summation variables.
+            // Create the summation variables.
             std::vector<InternalBdd<LibraryType>> summationDdVariables;
             for (auto const& metaVariable : summationMetaVariables) {
                 for (auto const& ddVariable : this->getDdManager().getMetaVariable(metaVariable).getDdVariables()) {
-                    summationDdVariables.push_back(ddVariable);
+                    summationDdVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
@@ -255,6 +303,23 @@ namespace storm {
             
             return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.multiplyMatrix(otherMatrix, summationDdVariables), containedMetaVariables);
         }
+        
+        template<DdType LibraryType, typename ValueType>
+        Add<LibraryType, ValueType> Add<LibraryType, ValueType>::multiplyMatrix(Bdd<LibraryType> const& otherMatrix, std::set<storm::expressions::Variable> const& summationMetaVariables) const {
+            // Create the summation variables.
+            std::vector<InternalBdd<LibraryType>> summationDdVariables;
+            for (auto const& metaVariable : summationMetaVariables) {
+                for (auto const& ddVariable : this->getDdManager().getMetaVariable(metaVariable).getDdVariables()) {
+                    summationDdVariables.push_back(ddVariable.getInternalBdd());
+                }
+            }
+            
+            std::set<storm::expressions::Variable> unionOfMetaVariables = Dd<LibraryType>::joinMetaVariables(*this, otherMatrix);
+            std::set<storm::expressions::Variable> containedMetaVariables;
+            std::set_difference(unionOfMetaVariables.begin(), unionOfMetaVariables.end(), summationMetaVariables.begin(), summationMetaVariables.end(), std::inserter(containedMetaVariables, containedMetaVariables.begin()));
+            
+            return Add<LibraryType, ValueType>(this->getDdManager(), internalAdd.multiplyMatrix(otherMatrix.getInternalBdd(), summationDdVariables), containedMetaVariables);
+        }
 
         template<DdType LibraryType, typename ValueType>
         Bdd<LibraryType> Add<LibraryType, ValueType>::greater(ValueType const& value) const {
@@ -766,8 +831,8 @@ namespace storm {
         }
 
         template<DdType LibraryType, typename ValueType>
-        void Add<LibraryType, ValueType>::exportToDot(std::string const& filename) const {
-            internalAdd.exportToDot(filename, this->getDdManager().getDdVariableNames());
+        void Add<LibraryType, ValueType>::exportToDot(std::string const& filename, bool showVariablesIfPossible) const {
+            internalAdd.exportToDot(filename, this->getDdManager().getDdVariableNames(), showVariablesIfPossible);
         }
         
         template<DdType LibraryType, typename ValueType>
@@ -778,7 +843,7 @@ namespace storm {
                 numberOfDdVariables += ddMetaVariable.getNumberOfDdVariables();
             }
             
-            return internalAdd.begin(this->getDdManager(), Bdd<LibraryType>::getCube(this->getDdManager(), this->getContainedMetaVariables()), numberOfDdVariables, this->getContainedMetaVariables(), enumerateDontCareMetaVariables);
+            return internalAdd.begin(this->getDdManager(), Bdd<LibraryType>::getCube(this->getDdManager(), this->getContainedMetaVariables()).getInternalBdd(), numberOfDdVariables, this->getContainedMetaVariables(), enumerateDontCareMetaVariables);
         }
         
         template<DdType LibraryType, typename ValueType>
@@ -811,6 +876,16 @@ namespace storm {
         Odd Add<LibraryType, ValueType>::createOdd() const {
             return internalAdd.createOdd(this->getSortedVariableIndices());
         }
+        
+        template<DdType LibraryType, typename ValueType>
+        InternalAdd<LibraryType, ValueType> const& Add<LibraryType, ValueType>::getInternalAdd() const {
+            return internalAdd;
+        }
+        
+        template<DdType LibraryType, typename ValueType>
+        InternalDdManager<LibraryType> const& Add<LibraryType, ValueType>::getInternalDdManager() const {
+            return internalAdd.getInternalDdManager();
+        }
 
         template<DdType LibraryType, typename ValueType>
         Add<LibraryType, ValueType>::operator InternalAdd<LibraryType, ValueType>() const {
diff --git a/src/storm/storage/dd/Add.h b/src/storm/storage/dd/Add.h
index 092c7e7ba..ebd0cb9b3 100644
--- a/src/storm/storage/dd/Add.h
+++ b/src/storm/storage/dd/Add.h
@@ -25,6 +25,11 @@ namespace storm {
         template<DdType LibraryType, typename ValueType>
         class AddIterator;
         
+        namespace bisimulation {
+            template<DdType LibraryType, typename ValueType>
+            class InternalSignatureRefiner;
+        }
+        
         template<DdType LibraryType, typename ValueType = double>
         class Add : public Dd<LibraryType> {
         public:
@@ -33,7 +38,9 @@ namespace storm {
             
             template<DdType LibraryTypePrime, typename ValueTypePrime>
             friend class Add;
-            
+
+            friend class bisimulation::InternalSignatureRefiner<LibraryType, ValueType>;
+
             // Instantiate all copy/move constructors/assignments with the default implementation.
             Add() = default;
             Add(Add<LibraryType, ValueType> const& other) = default;
@@ -303,6 +310,17 @@ namespace storm {
              * values.
              */
             bool equalModuloPrecision(Add<LibraryType, ValueType> const& other, ValueType const& precision, bool relative = true) const;
+
+            /*!
+             * Renames the given meta variables in the ADD. The number of the underlying DD variables of the both meta
+             * variable sets needs to agree.
+             *
+             * @param from The meta variables to be renamed. The current ADD needs to contain all these meta variables.
+             * @param to The meta variables that are the target of the renaming process. The current ADD must not contain
+             * any of these meta variables.
+             * @return The resulting ADD.
+             */
+            Add<LibraryType, ValueType> renameVariables(std::set<storm::expressions::Variable> const& from, std::set<storm::expressions::Variable> const& to) const;
             
             /*!
              * Swaps the given pairs of meta variables in the ADD. The pairs of meta variables must be guaranteed to have
@@ -312,7 +330,7 @@ namespace storm {
              * @return The resulting ADD.
              */
             Add<LibraryType, ValueType> swapVariables(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& metaVariablePairs) const;
-            
+
             /*!
              * Permutes the given pairs of meta variables in the ADD. The pairs of meta variables must be guaranteed to have
              * the same number of underlying ADD variables. The first component of the i-th entry is substituted by the second component.
@@ -332,7 +350,18 @@ namespace storm {
              * @return An ADD representing the result of the matrix-matrix multiplication.
              */
             Add<LibraryType, ValueType> multiplyMatrix(Add<LibraryType, ValueType> const& otherMatrix, std::set<storm::expressions::Variable> const& summationMetaVariables) const;
-            
+
+            /*!
+             * Multiplies the current ADD (representing a matrix) with the given matrix (given by a BDD) by summing over
+             * the given meta variables.
+             *
+             * @param otherMatrix The matrix with which to multiply.
+             * @param summationMetaVariables The names of the meta variables over which to sum during the matrix-
+             * matrix multiplication.
+             * @return An ADD representing the result of the matrix-matrix multiplication.
+             */
+            Add<LibraryType, ValueType> multiplyMatrix(Bdd<LibraryType> const& otherMatrix, std::set<storm::expressions::Variable> const& summationMetaVariables) const;
+
             /*!
              * Computes a BDD that represents the function in which all assignments with a function value strictly
              * larger than the given value are mapped to one and all others to zero.
@@ -589,7 +618,7 @@ namespace storm {
              *
              * @param filename The name of the file to which the DD is to be exported.
              */
-            void exportToDot(std::string const& filename) const override;
+            void exportToDot(std::string const& filename, bool showVariablesIfPossible = true) const override;
             
             /*!
              * Retrieves an iterator that points to the first meta variable assignment with a non-zero function value.
@@ -624,7 +653,17 @@ namespace storm {
              * @return The corresponding ODD.
              */
             Odd createOdd() const;
-            
+
+            /*!
+             * Retrieves the internal ADD.
+             */
+            InternalAdd<LibraryType, ValueType> const& getInternalAdd() const;
+
+            /*!
+             * Retrieves the internal ADD.
+             */
+            InternalDdManager<LibraryType> const& getInternalDdManager() const;
+
         private:
             /*!
              * Creates an ADD from the given internal ADD.
diff --git a/src/storm/storage/dd/Bdd.cpp b/src/storm/storage/dd/Bdd.cpp
index 1b5f40172..3303cc983 100644
--- a/src/storm/storage/dd/Bdd.cpp
+++ b/src/storm/storage/dd/Bdd.cpp
@@ -141,19 +141,19 @@ namespace storm {
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::existsAbstract(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = getCube(this->getDdManager(), metaVariables);
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.existsAbstract(cube), Dd<LibraryType>::subtractMetaVariables(*this, cube));
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.existsAbstract(cube.getInternalBdd()), Dd<LibraryType>::subtractMetaVariables(*this, cube));
         }
         
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::existsAbstractRepresentative(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = getCube(this->getDdManager(), metaVariables);
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.existsAbstractRepresentative(cube), this->getContainedMetaVariables());
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.existsAbstractRepresentative(cube.getInternalBdd()), this->getContainedMetaVariables());
         }
 
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::universalAbstract(std::set<storm::expressions::Variable> const& metaVariables) const {
             Bdd<LibraryType> cube = getCube(this->getDdManager(), metaVariables);
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.universalAbstract(cube), Dd<LibraryType>::subtractMetaVariables(*this, cube));
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.universalAbstract(cube.getInternalBdd()), Dd<LibraryType>::subtractMetaVariables(*this, cube));
         }
         
         template<DdType LibraryType>
@@ -165,17 +165,17 @@ namespace storm {
             std::set<storm::expressions::Variable> containedMetaVariables;
             std::set_difference(unionOfMetaVariables.begin(), unionOfMetaVariables.end(), existentialVariables.begin(), existentialVariables.end(), std::inserter(containedMetaVariables, containedMetaVariables.begin()));
             
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.andExists(other, cube), containedMetaVariables);
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.andExists(other.getInternalBdd(), cube.getInternalBdd()), containedMetaVariables);
         }
         
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::constrain(Bdd<LibraryType> const& constraint) const {
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.constrain(constraint), Dd<LibraryType>::joinMetaVariables(*this, constraint));
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.constrain(constraint.getInternalBdd()), Dd<LibraryType>::joinMetaVariables(*this, constraint));
         }
         
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::restrict(Bdd<LibraryType> const& constraint) const {
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.restrict(constraint), Dd<LibraryType>::joinMetaVariables(*this, constraint));
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.restrict(constraint.getInternalBdd()), Dd<LibraryType>::joinMetaVariables(*this, constraint));
         }
         
         template<DdType LibraryType>
@@ -187,7 +187,7 @@ namespace storm {
             for (auto const& metaVariable : rowMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    rowVariables.push_back(ddVariable);
+                    rowVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
@@ -195,11 +195,11 @@ namespace storm {
             for (auto const& metaVariable : columnMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    columnVariables.push_back(ddVariable);
+                    columnVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.relationalProduct(relation, rowVariables, columnVariables), newMetaVariables);
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.relationalProduct(relation.getInternalBdd(), rowVariables, columnVariables), newMetaVariables);
         }
         
         template<DdType LibraryType>
@@ -211,7 +211,7 @@ namespace storm {
             for (auto const& metaVariable : rowMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    rowVariables.push_back(ddVariable);
+                    rowVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
@@ -219,11 +219,11 @@ namespace storm {
             for (auto const& metaVariable : columnMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    columnVariables.push_back(ddVariable);
+                    columnVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.inverseRelationalProduct(relation, rowVariables, columnVariables), newMetaVariables);
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.inverseRelationalProduct(relation.getInternalBdd(), rowVariables, columnVariables), newMetaVariables);
         }
         
         template<DdType LibraryType>
@@ -235,7 +235,7 @@ namespace storm {
             for (auto const& metaVariable : rowMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    rowVariables.push_back(ddVariable);
+                    rowVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
@@ -243,16 +243,17 @@ namespace storm {
             for (auto const& metaVariable : columnMetaVariables) {
                 DdMetaVariable<LibraryType> const& variable = this->getDdManager().getMetaVariable(metaVariable);
                 for (auto const& ddVariable : variable.getDdVariables()) {
-                    columnVariables.push_back(ddVariable);
+                    columnVariables.push_back(ddVariable.getInternalBdd());
                 }
             }
             
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.inverseRelationalProductWithExtendedRelation(relation, rowVariables, columnVariables), newMetaVariables);
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.inverseRelationalProductWithExtendedRelation(relation.getInternalBdd(), rowVariables, columnVariables), newMetaVariables);
         }
         
         template<DdType LibraryType>
         Bdd<LibraryType> Bdd<LibraryType>::swapVariables(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& metaVariablePairs) const {
             std::set<storm::expressions::Variable> newContainedMetaVariables;
+            std::set<storm::expressions::Variable> deletedMetaVariables;
             std::vector<InternalBdd<LibraryType>> from;
             std::vector<InternalBdd<LibraryType>> to;
             for (auto const& metaVariablePair : metaVariablePairs) {
@@ -261,20 +262,61 @@ namespace storm {
                 
                 // Keep track of the contained meta variables in the DD.
                 if (this->containsMetaVariable(metaVariablePair.first)) {
-                    newContainedMetaVariables.insert(metaVariablePair.second);
-                }
-                if (this->containsMetaVariable(metaVariablePair.second)) {
-                    newContainedMetaVariables.insert(metaVariablePair.first);
+                    if (this->containsMetaVariable(metaVariablePair.second)) {
+                        // Nothing to do here.
+                    } else {
+                        newContainedMetaVariables.insert(metaVariablePair.second);
+                        deletedMetaVariables.insert(metaVariablePair.first);
+                    }
+                } else {
+                    if (!this->containsMetaVariable(metaVariablePair.second)) {
+                        // Nothing to do here.
+                    } else {
+                        newContainedMetaVariables.insert(metaVariablePair.first);
+                        deletedMetaVariables.insert(metaVariablePair.second);
+                    }
                 }
                 
                 for (auto const& ddVariable : variable1.getDdVariables()) {
-                    from.push_back(ddVariable);
+                    from.emplace_back(ddVariable.getInternalBdd());
                 }
                 for (auto const& ddVariable : variable2.getDdVariables()) {
-                    to.push_back(ddVariable);
+                    to.emplace_back(ddVariable.getInternalBdd());
                 }
             }
-            return Bdd<LibraryType>(this->getDdManager(), internalBdd.swapVariables(from, to), newContainedMetaVariables);
+            
+            std::set<storm::expressions::Variable> tmp;
+            std::set_difference(this->getContainedMetaVariables().begin(), this->getContainedMetaVariables().end(), deletedMetaVariables.begin(), deletedMetaVariables.end(), std::inserter(tmp, tmp.begin()));
+            std::set<storm::expressions::Variable> containedMetaVariables;
+            std::set_union(tmp.begin(), tmp.end(), newContainedMetaVariables.begin(), newContainedMetaVariables.end(), std::inserter(containedMetaVariables, containedMetaVariables.begin()));
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.swapVariables(from, to), containedMetaVariables);
+        }
+        
+        template<DdType LibraryType>
+        Bdd<LibraryType> Bdd<LibraryType>::renameVariables(std::set<storm::expressions::Variable> const& from, std::set<storm::expressions::Variable> const& to) const {
+            std::vector<InternalBdd<LibraryType>> fromBdds;
+            std::vector<InternalBdd<LibraryType>> toBdds;
+            
+            for (auto const& metaVariable : from) {
+                STORM_LOG_THROW(this->containsMetaVariable(metaVariable), storm::exceptions::InvalidOperationException, "Cannot rename variable '" << metaVariable.getName() << "' that is not present.");
+                DdMetaVariable<LibraryType> const& ddMetaVariable = this->getDdManager().getMetaVariable(metaVariable);
+                for (auto const& ddVariable : ddMetaVariable.getDdVariables()) {
+                    fromBdds.push_back(ddVariable.getInternalBdd());
+                }
+            }
+            for (auto const& metaVariable : to) {
+                STORM_LOG_THROW(!this->containsMetaVariable(metaVariable), storm::exceptions::InvalidOperationException, "Cannot rename to variable '" << metaVariable.getName() << "' that is already present.");
+                DdMetaVariable<LibraryType> const& ddMetaVariable = this->getDdManager().getMetaVariable(metaVariable);
+                for (auto const& ddVariable : ddMetaVariable.getDdVariables()) {
+                    toBdds.push_back(ddVariable.getInternalBdd());
+                }
+            }
+            
+            std::set<storm::expressions::Variable> newContainedMetaVariables = to;
+            std::set_difference(this->getContainedMetaVariables().begin(), this->getContainedMetaVariables().end(), from.begin(), from.end(), std::inserter(newContainedMetaVariables, newContainedMetaVariables.begin()));
+            
+            STORM_LOG_THROW(fromBdds.size() == toBdds.size(), storm::exceptions::InvalidArgumentException, "Unable to rename mismatching meta variables.");
+            return Bdd<LibraryType>(this->getDdManager(), internalBdd.swapVariables(fromBdds, toBdds), newContainedMetaVariables);
         }
         
         template<DdType LibraryType>
@@ -338,8 +380,8 @@ namespace storm {
         }
         
         template<DdType LibraryType>
-        void Bdd<LibraryType>::exportToDot(std::string const& filename) const {
-            internalBdd.exportToDot(filename, this->getDdManager().getDdVariableNames());
+        void Bdd<LibraryType>::exportToDot(std::string const& filename, bool showVariablesIfPossible) const {
+            internalBdd.exportToDot(filename, this->getDdManager().getDdVariableNames(), showVariablesIfPossible);
         }
         
         template<DdType LibraryType>
@@ -356,6 +398,11 @@ namespace storm {
             return internalBdd.createOdd(this->getSortedVariableIndices());
         }
         
+        template<DdType LibraryType>
+        InternalBdd<LibraryType> const& Bdd<LibraryType>::getInternalBdd() const {
+            return internalBdd;
+        }
+        
         template<DdType LibraryType>
         template<typename ValueType>
         std::vector<ValueType> Bdd<LibraryType>::filterExplicitVector(Odd const& odd, std::vector<ValueType> const& values) const {
@@ -371,11 +418,6 @@ namespace storm {
             return result;
         }
         
-        template<DdType LibraryType>
-        Bdd<LibraryType>::operator InternalBdd<LibraryType>() const {
-            return internalBdd;
-        }
-        
         template class Bdd<DdType::CUDD>;
         
         template Bdd<DdType::CUDD> Bdd<DdType::CUDD>::fromVector(DdManager<DdType::CUDD> const& ddManager, std::vector<double> const& explicitValues, storm::dd::Odd const& odd, std::set<storm::expressions::Variable> const& metaVariables, storm::logic::ComparisonType comparisonType, double value);
diff --git a/src/storm/storage/dd/Bdd.h b/src/storm/storage/dd/Bdd.h
index b1a6840fe..41d6b2b86 100644
--- a/src/storm/storage/dd/Bdd.h
+++ b/src/storm/storage/dd/Bdd.h
@@ -29,6 +29,15 @@ namespace storm {
             template<DdType LibraryTypePrime, typename ValueTypePrime>
             friend class Add;
             
+            /*!
+             * Creates a DD that encapsulates the given internal BDD.
+             *
+             * @param ddManager The manager responsible for this DD.
+             * @param internalBdd The internal BDD to store.
+             * @param containedMetaVariables The meta variables that appear in the DD.
+             */
+            Bdd(DdManager<LibraryType> const& ddManager, InternalBdd<LibraryType> const& internalBdd, std::set<storm::expressions::Variable> const& containedMetaVariables = std::set<storm::expressions::Variable>());
+            
             // Instantiate all copy/move constructors/assignments with the default implementation.
             Bdd() = default;
             Bdd(Bdd<LibraryType> const& other) = default;
@@ -265,6 +274,17 @@ namespace storm {
              */
             Bdd<LibraryType> swapVariables(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& metaVariablePairs) const;
             
+            /*!
+             * Renames the given meta variables in the BDD. The number of the underlying DD variables of the both meta
+             * variable sets needs to agree.
+             *
+             * @param from The meta variables to be renamed. The current BDD needs to contain all these meta variables.
+             * @param to The meta variables that are the target of the renaming process. The current BDD must not contain
+             * any of these meta variables.
+             * @return The resulting BDD.
+             */
+            Bdd<LibraryType> renameVariables(std::set<storm::expressions::Variable> const& from, std::set<storm::expressions::Variable> const& to) const;
+            
             /*!
              * Retrieves whether this DD represents the constant one function.
              *
@@ -317,7 +337,7 @@ namespace storm {
             
             virtual uint_fast64_t getLevel() const override;
             
-            virtual void exportToDot(std::string const& filename) const override;
+            virtual void exportToDot(std::string const& filename, bool showVariablesIfPossible = true) const override;
             
             /*!
              * Retrieves the cube of all given meta variables.
@@ -353,26 +373,17 @@ namespace storm {
              */
             storm::storage::BitVector filterExplicitVector(Odd const& odd, storm::storage::BitVector const& values) const;
             
+            /*!
+             * Retrieves the internal BDD.
+             */
+            InternalBdd<LibraryType> const& getInternalBdd() const;
+            
             friend struct std::hash<storm::dd::Bdd<LibraryType>>;
             
             template<DdType LibraryTypePrime, typename ValueType>
             friend struct FromVectorHelper;
             
         private:
-            /*!
-             * We provide a conversion operator from the BDD to its internal type to ease calling the internal functions.
-             */
-            operator InternalBdd<LibraryType>() const;
-            
-            /*!
-             * Creates a DD that encapsulates the given internal BDD.
-             *
-             * @param ddManager The manager responsible for this DD.
-             * @param internalBdd The internal BDD to store.
-             * @param containedMetaVariables The meta variables that appear in the DD.
-             */
-            Bdd(DdManager<LibraryType> const& ddManager, InternalBdd<LibraryType> const& internalBdd, std::set<storm::expressions::Variable> const& containedMetaVariables = std::set<storm::expressions::Variable>());
-            
             // The internal BDD that depends on the chosen library.
             InternalBdd<LibraryType> internalBdd;
         };
diff --git a/src/storm/storage/dd/BisimulationDecomposition.cpp b/src/storm/storage/dd/BisimulationDecomposition.cpp
new file mode 100644
index 000000000..57190ff23
--- /dev/null
+++ b/src/storm/storage/dd/BisimulationDecomposition.cpp
@@ -0,0 +1,97 @@
+#include "storm/storage/dd/BisimulationDecomposition.h"
+
+#include "storm/storage/dd/bisimulation/Partition.h"
+#include "storm/storage/dd/bisimulation/PartitionRefiner.h"
+#include "storm/storage/dd/bisimulation/MdpPartitionRefiner.h"
+#include "storm/storage/dd/bisimulation/QuotientExtractor.h"
+
+#include "storm/models/symbolic/Model.h"
+#include "storm/models/symbolic/Mdp.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/InvalidOperationException.h"
+#include "storm/exceptions/NotSupportedException.h"
+
+namespace storm {
+    namespace dd {
+        
+        using namespace bisimulation;
+
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::unique_ptr<PartitionRefiner<DdType, ValueType>> createRefiner(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& initialPartition) {
+            if (model.isOfType(storm::models::ModelType::Mdp)) {
+                return std::make_unique<MdpPartitionRefiner<DdType, ValueType>>(*model.template as<storm::models::symbolic::Mdp<DdType, ValueType>>(), initialPartition);
+            } else {
+                return std::make_unique<PartitionRefiner<DdType, ValueType>>(model, initialPartition);
+            }
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        BisimulationDecomposition<DdType, ValueType>::BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType) : model(model), preservationInformation(model, bisimulationType), refiner(createRefiner(model, Partition<DdType, ValueType>::create(model, bisimulationType, preservationInformation))) {
+            this->refineWrtRewardModels();
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        BisimulationDecomposition<DdType, ValueType>::BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const& bisimulationType) : model(model), preservationInformation(model, formulas, bisimulationType), refiner(createRefiner(model, Partition<DdType, ValueType>::create(model, bisimulationType, preservationInformation))) {
+            this->refineWrtRewardModels();
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        BisimulationDecomposition<DdType, ValueType>::BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& initialPartition, bisimulation::PreservationInformation<DdType, ValueType> const& preservationInformation) : model(model), preservationInformation(preservationInformation), refiner(createRefiner(model, initialPartition)) {
+            this->refineWrtRewardModels();
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        BisimulationDecomposition<DdType, ValueType>::~BisimulationDecomposition() = default;
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        void BisimulationDecomposition<DdType, ValueType>::compute(bisimulation::SignatureMode const& mode) {
+            STORM_LOG_ASSERT(refiner, "No suitable refiner.");
+            
+            STORM_LOG_TRACE("Initial partition has " << refiner->getStatePartition().getNumberOfBlocks() << " blocks.");
+#ifndef NDEBUG
+            STORM_LOG_TRACE("Initial partition has " << refiner->getStatePartition().getNodeCount() << " nodes.");
+#endif
+
+            auto start = std::chrono::high_resolution_clock::now();
+            uint64_t iterations = 0;
+            bool refined = true;
+            while (refined) {
+                refined = refiner->refine(mode);
+                ++iterations;
+                STORM_LOG_TRACE("After iteration " << iterations << " partition has " << refiner->getStatePartition().getNumberOfBlocks() << " blocks.");
+            }
+            auto end = std::chrono::high_resolution_clock::now();
+            
+            STORM_LOG_DEBUG("Partition refinement completed in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms (" << iterations << " iterations).");
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        std::shared_ptr<storm::models::Model<ValueType>> BisimulationDecomposition<DdType, ValueType>::getQuotient() const {
+            STORM_LOG_THROW(this->refiner->getStatus() == Status::FixedPoint, storm::exceptions::InvalidOperationException, "Cannot extract quotient, because bisimulation decomposition was not completed.");
+
+            STORM_LOG_TRACE("Starting quotient extraction.");
+            QuotientExtractor<DdType, ValueType> extractor;
+            std::shared_ptr<storm::models::Model<ValueType>> quotient = extractor.extract(model, refiner->getStatePartition(), preservationInformation);
+            STORM_LOG_TRACE("Quotient extraction done.");
+            
+            return quotient;
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        void BisimulationDecomposition<DdType, ValueType>::refineWrtRewardModels() {
+            for (auto const& rewardModelName : this->preservationInformation.getRewardModelNames()) {
+                auto const& rewardModel = this->model.getRewardModel(rewardModelName);
+                refiner->refineWrtRewardModel(rewardModel);
+            }
+        }
+        
+        template class BisimulationDecomposition<storm::dd::DdType::CUDD, double>;
+
+        template class BisimulationDecomposition<storm::dd::DdType::Sylvan, double>;
+        template class BisimulationDecomposition<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+        template class BisimulationDecomposition<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+        
+    }
+}
diff --git a/src/storm/storage/dd/BisimulationDecomposition.h b/src/storm/storage/dd/BisimulationDecomposition.h
new file mode 100644
index 000000000..66e562bea
--- /dev/null
+++ b/src/storm/storage/dd/BisimulationDecomposition.h
@@ -0,0 +1,66 @@
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include "storm/storage/dd/DdType.h"
+#include "storm/storage/bisimulation/BisimulationType.h"
+#include "storm/storage/dd/bisimulation/SignatureMode.h"
+#include "storm/storage/dd/bisimulation/PreservationInformation.h"
+
+#include "storm/logic/Formula.h"
+
+namespace storm {
+    namespace models {
+        template <typename ValueType>
+        class Model;
+        
+        namespace symbolic {
+            template <storm::dd::DdType DdType, typename ValueType>
+            class Model;
+        }
+    }
+    
+    namespace dd {
+        namespace bisimulation {
+            template <storm::dd::DdType DdType, typename ValueType>
+            class Partition;
+
+            template <storm::dd::DdType DdType, typename ValueType>
+            class PartitionRefiner;
+        }
+        
+        template <storm::dd::DdType DdType, typename ValueType>
+        class BisimulationDecomposition {
+        public:
+            BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType);
+            BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const& bisimulationType);
+            BisimulationDecomposition(storm::models::symbolic::Model<DdType, ValueType> const& model, bisimulation::Partition<DdType, ValueType> const& initialPartition, bisimulation::PreservationInformation<DdType, ValueType> const& preservationInformation);
+            
+            ~BisimulationDecomposition();
+            
+            /*!
+             * Computes the decomposition.
+             */
+            void compute(bisimulation::SignatureMode const& mode = bisimulation::SignatureMode::Eager);
+            
+            /*!
+             * Retrieves the quotient model after the bisimulation decomposition was computed.
+             */
+            std::shared_ptr<storm::models::Model<ValueType>> getQuotient() const;
+            
+        private:
+            void refineWrtRewardModels();
+            
+            // The model for which to compute the bisimulation decomposition.
+            storm::models::symbolic::Model<DdType, ValueType> const& model;
+            
+            // The object capturing what is preserved.
+            bisimulation::PreservationInformation<DdType, ValueType> preservationInformation;
+            
+            // The refiner to use.
+            std::unique_ptr<bisimulation::PartitionRefiner<DdType, ValueType>> refiner;
+        };
+        
+    }
+}
diff --git a/src/storm/storage/dd/Dd.h b/src/storm/storage/dd/Dd.h
index 765416bfe..e3f7890fb 100644
--- a/src/storm/storage/dd/Dd.h
+++ b/src/storm/storage/dd/Dd.h
@@ -102,7 +102,7 @@ namespace storm {
              *
              * @param filename The name of the file to which the DD is to be exported.
              */
-            virtual void exportToDot(std::string const& filename) const = 0;
+            virtual void exportToDot(std::string const& filename, bool showVariablesIfPossible = true) const = 0;
             
             /*!
              * Retrieves the manager that is responsible for this DD.
diff --git a/src/storm/storage/dd/DdManager.cpp b/src/storm/storage/dd/DdManager.cpp
index ff4d83785..6a00b4cbf 100644
--- a/src/storm/storage/dd/DdManager.cpp
+++ b/src/storm/storage/dd/DdManager.cpp
@@ -7,6 +7,7 @@
 #include "storm/exceptions/InvalidArgumentException.h"
 
 #include "storm/exceptions/NotSupportedException.h"
+#include "storm/exceptions/InvalidOperationException.h"
 
 #include "storm-config.h"
 #include "storm/adapters/RationalFunctionAdapter.h"
@@ -21,6 +22,16 @@ namespace storm {
             // Intentionally left empty.
         }
         
+        template<DdType LibraryType>
+        std::shared_ptr<DdManager<LibraryType>> DdManager<LibraryType>::asSharedPointer() {
+            return this->shared_from_this();
+        }
+        
+        template<DdType LibraryType>
+        std::shared_ptr<DdManager<LibraryType> const> DdManager<LibraryType>::asSharedPointer() const {
+            return this->shared_from_this();
+        }
+        
         template<DdType LibraryType>
         Bdd<LibraryType> DdManager<LibraryType>::getBddOne() const {
             return Bdd<LibraryType>(*this, internalDdManager.getBddOne());
@@ -42,6 +53,12 @@ namespace storm {
         Add<LibraryType, ValueType> DdManager<LibraryType>::getAddZero() const {
             return Add<LibraryType, ValueType>(*this, internalDdManager.template getAddZero<ValueType>());
         }
+
+        template<DdType LibraryType>
+        template<typename ValueType>
+        Add<LibraryType, ValueType> DdManager<LibraryType>::getAddUndefined() const {
+            return Add<LibraryType, ValueType>(*this, internalDdManager.template getAddUndefined<ValueType>());
+        }
         
         template<DdType LibraryType>
         template<typename ValueType>
@@ -56,10 +73,10 @@ namespace storm {
         }
         
         template<DdType LibraryType>
-        Bdd<LibraryType> DdManager<LibraryType>::getEncoding(storm::expressions::Variable const& variable, int_fast64_t value) const {
+        Bdd<LibraryType> DdManager<LibraryType>::getEncoding(storm::expressions::Variable const& variable, int_fast64_t value, bool mostSignificantBitAtTop) const {
             DdMetaVariable<LibraryType> const& metaVariable = this->getMetaVariable(variable);
             
-            STORM_LOG_THROW(value >= metaVariable.getLow() && value <= metaVariable.getHigh(), storm::exceptions::InvalidArgumentException, "Illegal value " << value << " for meta variable '" << variable.getName() << "'.");
+            STORM_LOG_THROW(metaVariable.canRepresent(value), storm::exceptions::InvalidArgumentException, "Illegal value " << value << " for meta variable '" << variable.getName() << "'.");
             
             // Now compute the encoding relative to the low value of the meta variable.
             value -= metaVariable.getLow();
@@ -67,17 +84,35 @@ namespace storm {
             std::vector<Bdd<LibraryType>> const& ddVariables = metaVariable.getDdVariables();
             
             Bdd<LibraryType> result;
-            if (value & (1ull << (ddVariables.size() - 1))) {
-                result = ddVariables[0];
+            if (mostSignificantBitAtTop) {
+                if (value & (1ull << (ddVariables.size() - 1))) {
+                    result = ddVariables[0];
+                } else {
+                    result = !ddVariables[0];
+                }
+                
+                for (std::size_t i = 1; i < ddVariables.size(); ++i) {
+                    if (value & (1ull << (ddVariables.size() - i - 1))) {
+                        result &= ddVariables[i];
+                    } else {
+                        result &= !ddVariables[i];
+                    }
+                }
             } else {
-                result = !ddVariables[0];
-            }
-            
-            for (std::size_t i = 1; i < ddVariables.size(); ++i) {
-                if (value & (1ull << (ddVariables.size() - i - 1))) {
-                    result &= ddVariables[i];
+                if (value & 1ull) {
+                    result = ddVariables[0];
                 } else {
-                    result &= !ddVariables[i];
+                    result = !ddVariables[0];
+                }
+                value >>= 1;
+                
+                for (std::size_t i = 1; i < ddVariables.size(); ++i) {
+                    if (value & 1ull) {
+                        result &= ddVariables[i];
+                    } else {
+                        result &= !ddVariables[i];
+                    }
+                    value >>= 1;
                 }
             }
             
@@ -87,20 +122,27 @@ namespace storm {
         template<DdType LibraryType>
         Bdd<LibraryType> DdManager<LibraryType>::getRange(storm::expressions::Variable const& variable) const {
             storm::dd::DdMetaVariable<LibraryType> const& metaVariable = this->getMetaVariable(variable);
-            
-            Bdd<LibraryType> result = this->getBddZero();
-            
-            for (int_fast64_t value = metaVariable.getLow(); value <= metaVariable.getHigh(); ++value) {
-                result |= this->getEncoding(variable, value);
+
+            if (metaVariable.hasHigh()) {
+                return Bdd<LibraryType>(*this, internalDdManager.getBddEncodingLessOrEqualThan(static_cast<uint64_t>(metaVariable.getHigh() - metaVariable.getLow()), metaVariable.getCube().getInternalBdd(), metaVariable.getNumberOfDdVariables()), {variable});
+//                Bdd<LibraryType> result = this->getBddZero();
+//                for (int_fast64_t value = metaVariable.getLow(); value <= metaVariable.getHigh(); ++value) {
+//                    result |= this->getEncoding(variable, value);
+//                }
+//                return result;
+            } else {
+                // If there is no upper bound on this variable, the whole range is valid.
+                Bdd<LibraryType> result = this->getBddOne();
+                result.addMetaVariable(variable);
+                return result;
             }
-            
-            return result;
         }
 
         template<DdType LibraryType>
         template<typename ValueType>
         Add<LibraryType, ValueType> DdManager<LibraryType>::getIdentity(storm::expressions::Variable const& variable) const {
             storm::dd::DdMetaVariable<LibraryType> const& metaVariable = this->getMetaVariable(variable);
+            STORM_LOG_THROW(metaVariable.hasHigh(), storm::exceptions::InvalidOperationException, "Cannot create identity for meta variable.");
             
             Add<LibraryType, ValueType> result = this->getAddZero<ValueType>();
             for (int_fast64_t value = metaVariable.getLow(); value <= metaVariable.getHigh(); ++value) {
@@ -109,6 +151,33 @@ namespace storm {
             return result;
         }
 		
+        template<DdType LibraryType>
+        Bdd<LibraryType> DdManager<LibraryType>::getIdentity(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& variablePairs, bool restrictToFirstRange) const {
+            auto result = this->getBddOne();
+            for (auto const& pair : variablePairs) {
+                result &= this->getIdentity(pair.first, pair.second, restrictToFirstRange);
+            }
+            return result;
+        }
+        
+        template<DdType LibraryType>
+        Bdd<LibraryType> DdManager<LibraryType>::getIdentity(storm::expressions::Variable const& first, storm::expressions::Variable const& second, bool restrictToFirstRange) const {
+            auto const& firstMetaVariable = this->getMetaVariable(first);
+            auto const& secondMetaVariable = this->getMetaVariable(second);
+            
+            STORM_LOG_THROW(firstMetaVariable.getNumberOfDdVariables() == secondMetaVariable.getNumberOfDdVariables(), storm::exceptions::InvalidOperationException, "Mismatching sizes of meta variables.");
+            
+            auto const& firstDdVariables = firstMetaVariable.getDdVariables();
+            auto const& secondDdVariables = secondMetaVariable.getDdVariables();
+
+            auto result = restrictToFirstRange ? this->getRange(first) : this->getBddOne();
+            for (auto it1 = firstDdVariables.begin(), it2 = secondDdVariables.begin(), ite1 = firstDdVariables.end(); it1 != ite1; ++it1, ++it2) {
+                result &= it1->iff(*it2);
+            }
+            
+            return result;
+        }
+        
         template<DdType LibraryType>
         Bdd<LibraryType> DdManager<LibraryType>::getCube(storm::expressions::Variable const& variable) const {
             return getCube({variable});
@@ -124,6 +193,20 @@ namespace storm {
             return result;
         }
         
+        template<DdType LibraryType>
+        std::vector<storm::expressions::Variable> DdManager<LibraryType>::cloneVariable(storm::expressions::Variable const& variable, std::string const& newMetaVariableName, boost::optional<uint64_t> const& numberOfLayers) {
+            std::vector<storm::expressions::Variable> newMetaVariables;
+            auto const& ddMetaVariable = this->getMetaVariable(variable);
+            if (ddMetaVariable.getType() == storm::dd::MetaVariableType::Bool) {
+                newMetaVariables = this->addMetaVariable(newMetaVariableName, 3);
+            } else if (ddMetaVariable.getType() == storm::dd::MetaVariableType::Int) {
+                newMetaVariables = this->addMetaVariable(newMetaVariableName, ddMetaVariable.getLow(), ddMetaVariable.getHigh(), 3);
+            } else if (ddMetaVariable.getType() == storm::dd::MetaVariableType::BitVector) {
+                newMetaVariables = this->addBitVectorMetaVariable(newMetaVariableName, ddMetaVariable.getNumberOfDdVariables(), 3);
+            }
+            return newMetaVariables;
+        }
+        
         template<DdType LibraryType>
         std::pair<storm::expressions::Variable, storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, int_fast64_t low, int_fast64_t high, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
             std::vector<storm::expressions::Variable> result = addMetaVariable(name, low, high, 2, position);
@@ -132,20 +215,39 @@ namespace storm {
         
         template<DdType LibraryType>
         std::vector<storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, int_fast64_t low, int_fast64_t high, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
+            return this->addMetaVariableHelper(MetaVariableType::Int, name, std::max(static_cast<uint64_t>(std::ceil(std::log2(high - low + 1))), static_cast<uint64_t>(1)), numberOfLayers, position, std::make_pair(low, high));
+        }
+        
+        template<DdType LibraryType>
+        std::vector<storm::expressions::Variable> DdManager<LibraryType>::addBitVectorMetaVariable(std::string const& variableName, uint64_t bits, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
+            return this->addMetaVariableHelper(MetaVariableType::BitVector, variableName, bits, numberOfLayers, position);
+        }
+        
+        template<DdType LibraryType>
+        std::pair<storm::expressions::Variable, storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
+            std::vector<storm::expressions::Variable> result = this->addMetaVariableHelper(MetaVariableType::Bool, name, 1, 2, position);
+            return std::make_pair(result[0], result[1]);
+        }
+        
+        template<DdType LibraryType>
+        std::vector<storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
+            return this->addMetaVariableHelper(MetaVariableType::Bool, name, 1, numberOfLayers, position);
+        }
+        
+        template<DdType LibraryType>
+        std::vector<storm::expressions::Variable> DdManager<LibraryType>::addMetaVariableHelper(MetaVariableType const& type, std::string const& name, uint64_t numberOfDdVariables, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position, boost::optional<std::pair<int_fast64_t, int_fast64_t>> const& bounds) {
             // Check whether number of layers is legal.
             STORM_LOG_THROW(numberOfLayers >= 1, storm::exceptions::InvalidArgumentException, "Layers must be at least 1.");
             
+            // Check that the number of DD variables is legal.
+            STORM_LOG_THROW(numberOfDdVariables >= 1, storm::exceptions::InvalidArgumentException, "Illegal number of DD variables.");
+            
             // Check whether the variable name is legal.
             STORM_LOG_THROW(name != "" && name.back() != '\'', storm::exceptions::InvalidArgumentException, "Illegal name of meta variable: '" << name << "'.");
             
             // Check whether a meta variable already exists.
             STORM_LOG_THROW(!this->hasMetaVariable(name), storm::exceptions::InvalidArgumentException, "A meta variable '" << name << "' already exists.");
             
-            // Check that the range is legal.
-            STORM_LOG_THROW(high >= low, storm::exceptions::InvalidArgumentException, "Illegal empty range for meta variable.");
-            
-            std::size_t numberOfBits = static_cast<std::size_t>(std::ceil(std::log2(high - low + 1)));
-            
             // If a specific position was requested, we compute it now.
             boost::optional<uint_fast64_t> level;
             if (position) {
@@ -159,21 +261,23 @@ namespace storm {
                 }
             }
             
-            // For the case where low and high coincide, we need to have a single bit.
-            if (numberOfBits == 0) {
-                ++numberOfBits;
-            }
+            STORM_LOG_TRACE("Creating meta variable with " << numberOfDdVariables << " bit(s) and " << numberOfLayers << " layer(s).");
             
             std::stringstream tmp1;
             std::vector<storm::expressions::Variable> result;
             for (uint64 layer = 0; layer < numberOfLayers; ++layer) {
-                result.emplace_back(manager->declareBitVectorVariable(name + tmp1.str(), numberOfBits));
+                if (type == MetaVariableType::Int) {
+                    result.emplace_back(manager->declareIntegerVariable(name + tmp1.str()));
+                } else if (type == MetaVariableType::Bool) {
+                    result.emplace_back(manager->declareBooleanVariable(name + tmp1.str()));
+                } else if (type == MetaVariableType::BitVector) {
+                    result.emplace_back(manager->declareBitVectorVariable(name + tmp1.str(), numberOfDdVariables));
+                }
                 tmp1 << "'";
             }
             
             std::vector<std::vector<Bdd<LibraryType>>> variables(numberOfLayers);
-            
-            for (std::size_t i = 0; i < numberOfBits; ++i) {
+            for (std::size_t i = 0; i < numberOfDdVariables; ++i) {
                 std::vector<InternalBdd<LibraryType>> ddVariables = internalDdManager.createDdVariables(numberOfLayers, level);
                 for (uint64 layer = 0; layer < numberOfLayers; ++layer) {
                     variables[layer].emplace_back(Bdd<LibraryType>(*this, ddVariables[layer], {result[layer]}));
@@ -188,65 +292,14 @@ namespace storm {
             
             std::stringstream tmp2;
             for (uint64_t layer = 0; layer < numberOfLayers; ++layer) {
-                metaVariableMap.emplace(result[layer], DdMetaVariable<LibraryType>(name + tmp2.str(), low, high, variables[layer]));
-                tmp2 << "'";
-            }
-            
-            return result;
-        }
-        
-        template<DdType LibraryType>
-        std::pair<storm::expressions::Variable, storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
-            std::vector<storm::expressions::Variable> result = addMetaVariable(name, 2, position);
-            return std::make_pair(result[0], result[1]);
-        }
-        
-        template<DdType LibraryType>
-        std::vector<storm::expressions::Variable> DdManager<LibraryType>::addMetaVariable(std::string const& name, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position) {
-            // Check whether number of layers is legal.
-            STORM_LOG_THROW(numberOfLayers >= 1, storm::exceptions::InvalidArgumentException, "Layers must be at least 1.");
-
-            // Check whether the variable name is legal.
-            STORM_LOG_THROW(name != "" && name.back() != '\'', storm::exceptions::InvalidArgumentException, "Illegal name of meta variable: '" << name << "'.");
-            
-            // Check whether a meta variable already exists.
-            STORM_LOG_THROW(!this->hasMetaVariable(name), storm::exceptions::InvalidArgumentException, "A meta variable '" << name << "' already exists.");
-            
-            // If a specific position was requested, we compute it now.
-            boost::optional<uint_fast64_t> level;
-            if (position) {
-                STORM_LOG_THROW(this->supportsOrderedInsertion(), storm::exceptions::NotSupportedException, "Cannot add meta variable at position, because the manager does not support ordered insertion.");
-                storm::dd::DdMetaVariable<LibraryType> beforeVariable = this->getMetaVariable(position.get().second);
-                level = position.get().first == MetaVariablePosition::Above ? std::numeric_limits<uint_fast64_t>::max() : std::numeric_limits<uint_fast64_t>::min();
-                for (auto const& ddVariable : beforeVariable.getDdVariables()) {
-                    level = position.get().first == MetaVariablePosition::Above ? std::min(level.get(), ddVariable.getLevel()) : std::max(level.get(), ddVariable.getLevel());
-                }
-                if (position.get().first == MetaVariablePosition::Below) {
-                    ++level.get();
+                if (bounds) {
+                    metaVariableMap.emplace(result[layer], DdMetaVariable<LibraryType>(name + tmp2.str(), bounds.get().first, bounds.get().second, variables[layer]));
+                } else {
+                    metaVariableMap.emplace(result[layer], DdMetaVariable<LibraryType>(type, name + tmp2.str(), variables[layer]));
                 }
-            }
-            
-            std::stringstream tmp1;
-            std::vector<storm::expressions::Variable> result;
-            for (uint64 layer = 0; layer < numberOfLayers; ++layer) {
-                result.emplace_back(manager->declareBooleanVariable(name + tmp1.str()));
-                tmp1 << "'";
-            }
-            
-            std::vector<std::vector<Bdd<LibraryType>>> variables(numberOfLayers);
-            
-            std::vector<InternalBdd<LibraryType>> ddVariables = internalDdManager.createDdVariables(numberOfLayers, level);
-            
-            for (uint64_t layer = 0; layer < numberOfLayers; ++layer) {
-                variables[layer].emplace_back(Bdd<LibraryType>(*this, ddVariables[layer], {result[layer]}));
-            }
-            
-            std::stringstream tmp2;
-            for (uint64_t layer = 0; layer < numberOfLayers; ++layer) {
-                metaVariableMap.emplace(result[layer], DdMetaVariable<LibraryType>(name + tmp2.str(), variables[layer]));
                 tmp2 << "'";
             }
-
+            
             return result;
         }
         
@@ -423,6 +476,11 @@ namespace storm {
             return &internalDdManager;
         }
         
+        template<DdType LibraryType>
+        void DdManager<LibraryType>::debugCheck() const {
+            internalDdManager.debugCheck();
+        }
+        
         template class DdManager<DdType::CUDD>;
         
         template Add<DdType::CUDD, double> DdManager<DdType::CUDD>::getAddZero() const;
@@ -440,7 +498,6 @@ namespace storm {
         template Add<DdType::CUDD, double> DdManager<DdType::CUDD>::getIdentity(storm::expressions::Variable const& variable) const;
         template Add<DdType::CUDD, uint_fast64_t> DdManager<DdType::CUDD>::getIdentity(storm::expressions::Variable const& variable) const;
         
-        
         template class DdManager<DdType::Sylvan>;
         
         template Add<DdType::Sylvan, double> DdManager<DdType::Sylvan>::getAddZero() const;
@@ -450,6 +507,13 @@ namespace storm {
 		template Add<DdType::Sylvan, storm::RationalFunction> DdManager<DdType::Sylvan>::getAddZero() const;
 #endif
         
+        template Add<DdType::Sylvan, double> DdManager<DdType::Sylvan>::getAddUndefined() const;
+        template Add<DdType::Sylvan, uint_fast64_t> DdManager<DdType::Sylvan>::getAddUndefined() const;
+#ifdef STORM_HAVE_CARL
+        template Add<DdType::Sylvan, storm::RationalNumber> DdManager<DdType::Sylvan>::getAddUndefined() const;
+        template Add<DdType::Sylvan, storm::RationalFunction> DdManager<DdType::Sylvan>::getAddUndefined() const;
+#endif
+
         template Add<DdType::Sylvan, double> DdManager<DdType::Sylvan>::getAddOne() const;
         template Add<DdType::Sylvan, uint_fast64_t> DdManager<DdType::Sylvan>::getAddOne() const;
 #ifdef STORM_HAVE_CARL
diff --git a/src/storm/storage/dd/DdManager.h b/src/storm/storage/dd/DdManager.h
index 24e737ee0..055bbfb1a 100644
--- a/src/storm/storage/dd/DdManager.h
+++ b/src/storm/storage/dd/DdManager.h
@@ -21,7 +21,7 @@ namespace storm {
     namespace dd {
         // Declare DdManager class so we can then specialize it for the different DD types.
         template<DdType LibraryType>
-        class DdManager {
+        class DdManager : public std::enable_shared_from_this<DdManager<LibraryType>> {
         public:
             friend class Bdd<LibraryType>;
             
@@ -41,6 +41,9 @@ namespace storm {
             DdManager<LibraryType>& operator=(DdManager<LibraryType> const& other) = delete;
             DdManager(DdManager<LibraryType>&& other) = default;
             DdManager<LibraryType>& operator=(DdManager<LibraryType>&& other) = default;
+
+            std::shared_ptr<DdManager<LibraryType>> asSharedPointer();
+            std::shared_ptr<DdManager<LibraryType> const> asSharedPointer() const;
             
             /*!
              * Retrieves a BDD representing the constant one function.
@@ -72,6 +75,14 @@ namespace storm {
             template<typename ValueType>
             Add<LibraryType, ValueType> getAddZero() const;
 
+            /*!
+             * Retrieves an ADD representing the an undefined value.
+             *
+             * @return An ADD representing an undefined value.
+             */
+            template<typename ValueType>
+            Add<LibraryType, ValueType> getAddUndefined() const;
+
             /*!
              * Retrieves an ADD representing the constant infinity function.
              *
@@ -94,10 +105,12 @@ namespace storm {
              *
              * @param variable The expression variable associated with the meta variable.
              * @param value The value the meta variable is supposed to have.
+             * @param mostSignificantBitAtTop A flag indicating whether the most significant bit of the value is to be
+             * encoded with the topmost variable or the bottommost.
              * @return The DD representing the function that maps all inputs which have the given meta variable equal
              * to the given value one.
              */
-            Bdd<LibraryType> getEncoding(storm::expressions::Variable const& variable, int_fast64_t value) const;
+            Bdd<LibraryType> getEncoding(storm::expressions::Variable const& variable, int_fast64_t value, bool mostSignificantBitAtTop = true) const;
             
             /*!
              * Retrieves the BDD representing the range of the meta variable, i.e., a function that maps all legal values
@@ -118,6 +131,23 @@ namespace storm {
             template<typename ValueType>
             Add<LibraryType, ValueType> getIdentity(storm::expressions::Variable const& variable) const;
             
+            /*!
+             * Retrieves a BDD in which an encoding is mapped to true iff the meta variables of each pair encode the same value.
+             *
+             * @param variablePairs The variable pairs for which to compute the identity.
+             * @param restrictToFirstRange If set, the identity will be restricted to the legal range of the first variable.
+             */
+            Bdd<LibraryType> getIdentity(std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& variablePairs, bool restrictToFirstRange = true) const;
+
+            /*!
+             * Retrieves a BDD in which an encoding is mapped to true iff the two meta variables encode the same value.
+             * 
+             * @param first The first meta variable in the identity.
+             * @param second The second meta variable in the identity.
+             * @param restrictToFirstRange If set, the identity will be restricted to the legal range of the first variable.
+             */
+            Bdd<LibraryType> getIdentity(storm::expressions::Variable const& first, storm::expressions::Variable const& second, bool restrictToFirstRange = true) const;
+
             /*!
              * Retrieves a BDD that is the cube of the variables representing the given meta variable.
              *
@@ -134,6 +164,15 @@ namespace storm {
              */
             Bdd<LibraryType> getCube(std::set<storm::expressions::Variable> const& variables) const;
 
+            /*!
+             * Clones the given meta variable and optionally changes the number of layers of the variable.
+             *
+             * @param variable The variable to clone.
+             * @param newVariableName The name of the variable to crate.
+             * @param numberOfLayers The number of layers of the variable to crate
+             */
+            std::vector<storm::expressions::Variable> cloneVariable(storm::expressions::Variable const& variable, std::string const& newVariableName, boost::optional<uint64_t> const& numberOfLayers = boost::none);
+            
             /*!
              * Adds an integer meta variable with the given range with two layers (a 'normal' and a 'primed' one).
              *
@@ -150,7 +189,15 @@ namespace storm {
              * @param numberOfLayers The number of layers of this variable (must be greater or equal 1).
              */
             std::vector<storm::expressions::Variable> addMetaVariable(std::string const& variableName, int_fast64_t low, int_fast64_t high, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position = boost::none);
-            
+
+            /*!
+             * Creates a meta variable with the given number of layers.
+             *
+             * @param variableName The name of the variable.
+             * @param numberOfLayers The number of layers of this variable (must be greater or equal 1).
+             */
+            std::vector<storm::expressions::Variable> addBitVectorMetaVariable(std::string const& variableName, uint64_t bits, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position = boost::none);
+
             /*!
              * Adds a boolean meta variable with two layers (a 'normal' and a 'primed' one).
              *
@@ -269,8 +316,22 @@ namespace storm {
              * @return The internal DD manager.
              */
             InternalDdManager<LibraryType> const& getInternalDdManager() const;
+            
+            /*!
+             * Performs a debug check if available.
+             */
+            void debugCheck() const;
 
         private:
+            /*!
+             * Creates a meta variable with the given number of DD variables and layers.
+             *
+             * @param type The type of the meta variable to create.
+             * @param name The name of the variable.
+             * @param numberOfLayers The number of layers of this variable (must be greater or equal 1).
+             */
+            std::vector<storm::expressions::Variable> addMetaVariableHelper(MetaVariableType const& type, std::string const& name, uint64_t numberOfDdVariables, uint64_t numberOfLayers, boost::optional<std::pair<MetaVariablePosition, storm::expressions::Variable>> const& position = boost::none, boost::optional<std::pair<int_fast64_t, int_fast64_t>> const& bounds = boost::none);
+            
             /*!
              * Retrieves a list of names of the DD variables in the order of their index.
              *
diff --git a/src/storm/storage/dd/DdMetaVariable.cpp b/src/storm/storage/dd/DdMetaVariable.cpp
index 620e7311e..393eea4af 100644
--- a/src/storm/storage/dd/DdMetaVariable.cpp
+++ b/src/storm/storage/dd/DdMetaVariable.cpp
@@ -5,13 +5,24 @@
 namespace storm {
     namespace dd {
         template<DdType LibraryType>
-        DdMetaVariable<LibraryType>::DdMetaVariable(std::string const& name, int_fast64_t low, int_fast64_t high, std::vector<Bdd<LibraryType>> const& ddVariables) : name(name), type(MetaVariableType::Int), low(low), high(high), ddVariables(ddVariables) {
+        DdMetaVariable<LibraryType>::DdMetaVariable(std::string const& name, int_fast64_t low, int_fast64_t high, std::vector<Bdd<LibraryType>> const& ddVariables) : name(name), type(MetaVariableType::Int), low(low), high(high), ddVariables(ddVariables), lowestIndex(0) {
             this->createCube();
+            this->precomputeLowestIndex();
         }
         
         template<DdType LibraryType>
-        DdMetaVariable<LibraryType>::DdMetaVariable(std::string const& name, std::vector<Bdd<LibraryType>> const& ddVariables) : name(name), type(MetaVariableType::Bool), low(0), high(1), ddVariables(ddVariables) {
+        DdMetaVariable<LibraryType>::DdMetaVariable(MetaVariableType const& type, std::string const& name, std::vector<Bdd<LibraryType>> const& ddVariables) : name(name), type(type), low(0), ddVariables(ddVariables), lowestIndex(0) {
+            STORM_LOG_ASSERT(type == MetaVariableType::Bool || type == MetaVariableType::BitVector, "Cannot create this type of meta variable in this constructor.");
+            if (ddVariables.size() < 63) {
+                this->high = (1ull << ddVariables.size()) - 1;
+            }
+            
+            // Correct type in the case of boolean variables.
+            if (ddVariables.size() == 1) {
+                this->type = MetaVariableType::Bool;
+            }
             this->createCube();
+            this->precomputeLowestIndex();
         }
         
         template<DdType LibraryType>
@@ -31,7 +42,21 @@ namespace storm {
         
         template<DdType LibraryType>
         int_fast64_t DdMetaVariable<LibraryType>::getHigh() const {
-            return this->high;
+            return this->high.get();
+        }
+        
+        template<DdType LibraryType>
+        bool DdMetaVariable<LibraryType>::hasHigh() const {
+            return static_cast<bool>(this->high);
+        }
+        
+        template<DdType LibraryType>
+        bool DdMetaVariable<LibraryType>::canRepresent(int_fast64_t value) const {
+            bool result = value >= this->low;
+            if (result && high) {
+                return value <= this->high;
+            }
+            return result;
         }
         
         template<DdType LibraryType>
@@ -49,6 +74,51 @@ namespace storm {
             return this->cube;
         }
         
+        template<DdType LibraryType>
+        std::vector<uint64_t> DdMetaVariable<LibraryType>::getIndices(bool sortedByLevel) const {
+            std::vector<std::pair<uint64_t, uint64_t>> indicesAndLevels = this->getIndicesAndLevels();
+            if (sortedByLevel) {
+                std::sort(indicesAndLevels.begin(), indicesAndLevels.end(), [] (std::pair<uint64_t, uint64_t> const& a, std::pair<uint64_t, uint64_t> const& b) { return a.second < b.second; });
+            }
+            
+            std::vector<uint64_t> indices;
+            for (auto const& e : indicesAndLevels) {
+                indices.emplace_back(e.first);
+            }
+            
+            return indices;
+        }
+        
+        template<DdType LibraryType>
+        std::vector<std::pair<uint64_t, uint64_t>> DdMetaVariable<LibraryType>::getIndicesAndLevels() const {
+            std::vector<std::pair<uint64_t, uint64_t>> indicesAndLevels;
+            for (auto const& v : ddVariables) {
+                indicesAndLevels.emplace_back(v.getIndex(), v.getLevel());
+            }
+            
+            return indicesAndLevels;
+        }
+        
+        template<DdType LibraryType>
+        uint64_t DdMetaVariable<LibraryType>::getHighestLevel() const {
+            uint64_t result = 0;
+            bool first = true;
+            for (auto const& v : ddVariables) {
+                if (first) {
+                    result = v.getLevel();
+                } else {
+                    result = std::max(result, v.getLevel());
+                }
+            }
+            
+            return result;
+        }
+        
+        template<DdType LibraryType>
+        uint64_t DdMetaVariable<LibraryType>::getLowestIndex() const {
+            return lowestIndex;
+        }
+        
         template<DdType LibraryType>
         void DdMetaVariable<LibraryType>::createCube() {
             STORM_LOG_ASSERT(!this->ddVariables.empty(), "The DD variables must not be empty.");
@@ -60,6 +130,19 @@ namespace storm {
             }
         }
         
+        template<DdType LibraryType>
+        void DdMetaVariable<LibraryType>::precomputeLowestIndex() {
+            bool first = true;
+            for (auto const& var : this->ddVariables) {
+                if (first) {
+                    first = false;
+                    this->lowestIndex = var.getIndex();
+                } else {
+                    this->lowestIndex = std::min(lowestIndex, var.getIndex());
+                }
+            }
+        }
+        
         template class DdMetaVariable<DdType::CUDD>;
         template class DdMetaVariable<DdType::Sylvan>;
     }
diff --git a/src/storm/storage/dd/DdMetaVariable.h b/src/storm/storage/dd/DdMetaVariable.h
index c596b612a..6b1703796 100644
--- a/src/storm/storage/dd/DdMetaVariable.h
+++ b/src/storm/storage/dd/DdMetaVariable.h
@@ -16,7 +16,7 @@ namespace storm {
         class Add;
 
         // An enumeration for all legal types of meta variables.
-        enum class MetaVariableType { Bool, Int };
+        enum class MetaVariableType { Bool, Int, BitVector };
         
         // Declare DdMetaVariable class so we can then specialize it for the different DD types.
         template<DdType LibraryType>
@@ -52,6 +52,13 @@ namespace storm {
              */
             int_fast64_t getLow() const;
             
+            /*!
+             * Retrieves whether the variable has an upper bound.
+             *
+             * @return True iff the variable has an upper bound.
+             */
+            bool hasHigh() const;
+            
             /*!
              * Retrieves the highest value of the range of the variable.
              *
@@ -59,6 +66,14 @@ namespace storm {
              */
             int_fast64_t getHigh() const;
 
+            /*!
+             * Retrieves whether the meta variable can represent the given value.
+             *
+             * @param value The value to check for legality.
+             * @return True iff the value is legal.
+             */
+            bool canRepresent(int_fast64_t value) const;
+            
             /*!
              * Retrieves the number of DD variables for this meta variable.
              *
@@ -73,6 +88,28 @@ namespace storm {
              */
             Bdd<LibraryType> const& getCube() const;
             
+            /*!
+             * Retrieves the highest index of all DD variables belonging to this meta variable.
+             */
+            uint64_t getLowestIndex() const;
+            
+            /*!
+             * Retrieves the highest level of all DD variables belonging to this meta variable.
+             */
+            uint64_t getHighestLevel() const;
+            
+            /*!
+             * Retrieves the indices of the DD variables associated with this meta variable.
+             *
+             * @param sortedByLevel If true, the indices are sorted by their level.
+             */
+            std::vector<uint64_t> getIndices(bool sortedByLevel = true) const;
+            
+            /*!
+             * Retrieves the indices and levels of the DD variables associated with this meta variable.
+             */
+            std::vector<std::pair<uint64_t, uint64_t>> getIndicesAndLevels() const;
+
         private:
             /*!
              * Creates an integer meta variable with the given name and range bounds.
@@ -84,14 +121,20 @@ namespace storm {
              * @param manager A pointer to the manager that is responsible for this meta variable.
              */
             DdMetaVariable(std::string const& name, int_fast64_t low, int_fast64_t high, std::vector<Bdd<LibraryType>> const& ddVariables);
-            
+
             /*!
              * Creates a boolean meta variable with the given name.
+             * @param type The type of the meta variable.
              * @param name The name of the meta variable.
              * @param ddVariables The vector of variables used to encode this variable.
              * @param manager A pointer to the manager that is responsible for this meta variable.
              */
-            DdMetaVariable(std::string const& name, std::vector<Bdd<LibraryType>> const& ddVariables);
+            DdMetaVariable(MetaVariableType const& type, std::string const& name, std::vector<Bdd<LibraryType>> const& ddVariables);
+            
+            /*!
+             * Precomputes the lowest index of any DD variable associated with this meta variable.
+             */
+            void precomputeLowestIndex();
             
             /*!
              * Retrieves the variables used to encode the meta variable.
@@ -115,13 +158,16 @@ namespace storm {
             int_fast64_t low;
             
             // The highest value of the range of the variable.
-            int_fast64_t high;
+            boost::optional<int_fast64_t> high;
             
             // The vector of variables that are used to encode the meta variable.
             std::vector<Bdd<LibraryType>> ddVariables;
             
             // The cube consisting of all variables that encode the meta variable.
             Bdd<LibraryType> cube;
+            
+            // The lowest index of any DD variable of this meta variable.
+            uint64_t lowestIndex;
         };
     }
 }
diff --git a/src/storm/storage/dd/InternalBdd.h b/src/storm/storage/dd/InternalBdd.h
index 9dfe22ca3..d59b03ffb 100644
--- a/src/storm/storage/dd/InternalBdd.h
+++ b/src/storm/storage/dd/InternalBdd.h
@@ -1,5 +1,4 @@
-#ifndef STORM_STORAGE_DD_INTERNALBDD_H_
-#define STORM_STORAGE_DD_INTERNALBDD_H_
+#pragma once
 
 #include "storm/storage/dd/DdType.h"
 
@@ -9,6 +8,3 @@ namespace storm {
         class InternalBdd;
     }
 }
-
-
-#endif /* STORM_STORAGE_DD_CUDD_INTERNALBDD_H_ */
diff --git a/src/storm/storage/dd/Odd.cpp b/src/storm/storage/dd/Odd.cpp
index af86aba1d..b94295f0e 100644
--- a/src/storm/storage/dd/Odd.cpp
+++ b/src/storm/storage/dd/Odd.cpp
@@ -13,7 +13,7 @@
 namespace storm {
     namespace dd {
         Odd::Odd(std::shared_ptr<Odd> elseNode, uint_fast64_t elseOffset, std::shared_ptr<Odd> thenNode, uint_fast64_t thenOffset) : elseNode(elseNode), thenNode(thenNode), elseOffset(elseOffset), thenOffset(thenOffset) {
-            // Intentionally left empty.
+            STORM_LOG_ASSERT(this != elseNode.get() && this != thenNode.get(), "Cyclic ODD.");
         }
         
         Odd const& Odd::getThenSuccessor() const {
@@ -105,30 +105,23 @@ namespace storm {
             dotFile << boost::join(levelNames, " -> ") << ";";
             dotFile << "}" << std::endl;
             
-            std::map<uint_fast64_t, std::vector<std::reference_wrapper<storm::dd::Odd const>>> levelToOddNodesMap;
+            std::map<uint_fast64_t, std::unordered_set<storm::dd::Odd const*>> levelToOddNodesMap;
             this->addToLevelToOddNodesMap(levelToOddNodesMap);
             
             for (auto const& levelNodes : levelToOddNodesMap) {
                 dotFile << "{ rank = same; \"" << levelNodes.first << "\"" << std::endl;;
                 for (auto const& node : levelNodes.second) {
-                    dotFile << "\"" << &node.get() << "\";" << std::endl;
+                    dotFile << "\"" << node << "\";" << std::endl;
                 }
                 dotFile << "}" << std::endl;
             }
             
-            std::set<storm::dd::Odd const*> printedNodes;
             for (auto const& levelNodes : levelToOddNodesMap) {
                 for (auto const& node : levelNodes.second) {
-                    if (printedNodes.find(&node.get()) != printedNodes.end()) {
-                        continue;
-                    } else {
-                        printedNodes.insert(&node.get());
-                    }
-                    
-                    dotFile << "\"" << &node.get() << "\" [label=\"" << levelNodes.first << "\"];" << std::endl;
-                    if (!node.get().isTerminalNode()) {
-                        dotFile << "\"" << &node.get() << "\" -> \"" << &node.get().getElseSuccessor() << "\" [style=dashed, label=\"0\"];" << std::endl;
-                        dotFile << "\"" << &node.get() << "\" -> \"" << &node.get().getThenSuccessor() << "\" [style=solid, label=\"" << node.get().getElseOffset() << "\"];" << std::endl;
+                    dotFile << "\"" << node << "\" [label=\"" << node->getTotalOffset() << "\"];" << std::endl;
+                    if (!node->isTerminalNode()) {
+                        dotFile << "\"" << node << "\" -> \"" << &node->getElseSuccessor() << "\" [style=dashed, label=\"0\"];" << std::endl;
+                        dotFile << "\"" << node << "\" -> \"" << &node->getThenSuccessor() << "\" [style=solid, label=\"" << node->getElseOffset() << "\"];" << std::endl;
                     }
                 }
             }
@@ -137,11 +130,13 @@ namespace storm {
             storm::utility::closeFile(dotFile);
         }
         
-        void Odd::addToLevelToOddNodesMap(std::map<uint_fast64_t, std::vector<std::reference_wrapper<storm::dd::Odd const>>>& levelToOddNodesMap, uint_fast64_t level) const {
-            levelToOddNodesMap[level].push_back(*this);
+        void Odd::addToLevelToOddNodesMap(std::map<uint_fast64_t, std::unordered_set<storm::dd::Odd const*>>& levelToOddNodesMap, uint_fast64_t level) const {
+            levelToOddNodesMap[level].emplace(this);
             if (!this->isTerminalNode()) {
                 this->getElseSuccessor().addToLevelToOddNodesMap(levelToOddNodesMap, level + 1);
-                this->getThenSuccessor().addToLevelToOddNodesMap(levelToOddNodesMap, level + 1);
+                if (this->thenNode != this->elseNode) {
+                    this->getThenSuccessor().addToLevelToOddNodesMap(levelToOddNodesMap, level + 1);
+                }
             }
         }
         
diff --git a/src/storm/storage/dd/Odd.h b/src/storm/storage/dd/Odd.h
index 637e9ca6c..0cb4a307c 100644
--- a/src/storm/storage/dd/Odd.h
+++ b/src/storm/storage/dd/Odd.h
@@ -4,6 +4,7 @@
 #include <vector>
 #include <map>
 #include <memory>
+#include <unordered_set>
 
 namespace storm {
     namespace dd {
@@ -125,7 +126,7 @@ namespace storm {
              * @param levelToOddNodesMap A mapping of the level to the ODD node.
              * @param The level of the current node.
              */
-            void addToLevelToOddNodesMap(std::map<uint_fast64_t, std::vector<std::reference_wrapper<storm::dd::Odd const>>>& levelToOddNodesMap, uint_fast64_t level = 0) const;
+            void addToLevelToOddNodesMap(std::map<uint_fast64_t, std::unordered_set<storm::dd::Odd const*>>& levelToOddNodesMap, uint_fast64_t level = 0) const;
             
             /*!
              * Adds the values of the old explicit values to the new explicit values where the positions in the old vector
diff --git a/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.cpp b/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.cpp
new file mode 100644
index 000000000..a79c67720
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.cpp
@@ -0,0 +1,71 @@
+#include "storm/storage/dd/bisimulation/MdpPartitionRefiner.h"
+
+#include "storm/models/symbolic/Mdp.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            MdpPartitionRefiner<DdType, ValueType>::MdpPartitionRefiner(storm::models::symbolic::Mdp<DdType, ValueType> const& mdp, Partition<DdType, ValueType> const& initialStatePartition) : PartitionRefiner<DdType, ValueType>(mdp, initialStatePartition), choicePartition(Partition<DdType, ValueType>::createTrivialChoicePartition(mdp, initialStatePartition.getBlockVariables())), stateSignatureComputer(mdp.getQualitativeTransitionMatrix(), mdp.getColumnAndNondeterminismVariables(), SignatureMode::Qualitative, true), stateSignatureRefiner(mdp.getManager(), this->statePartition.getBlockVariable(), mdp.getRowVariables()) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool MdpPartitionRefiner<DdType, ValueType>::refine(bisimulation::SignatureMode const& mode) {
+                // In this procedure, we will
+                // (1) refine the partition of nondeterministic choices based on the state partition. For this, we use
+                // the signature computer/refiner of the superclass. These objects use the full transition matrix.
+                // (2) if the choice partition was in fact split, the state partition also needs to be refined.
+                // For this, we use the signature computer/refiner of this class.
+                
+                STORM_LOG_TRACE("Refining choice partition.");
+                Partition<DdType, ValueType> newChoicePartition = this->internalRefine(this->signatureComputer, this->signatureRefiner, this->choicePartition, this->statePartition, mode);
+                
+                if (newChoicePartition == choicePartition) {
+                    this->status = Status::FixedPoint;
+                    return false;
+                } else {
+                    this->choicePartition = newChoicePartition;
+                    
+                    // If the choice partition changed, refine the state partition. Use qualitative mode we must properly abstract from choice counts.
+                    STORM_LOG_TRACE("Refining state partition.");
+                    Partition<DdType, ValueType> newStatePartition = this->internalRefine(this->stateSignatureComputer, this->stateSignatureRefiner, this->statePartition, this->choicePartition, SignatureMode::Qualitative);
+
+                    if (newStatePartition == this->statePartition) {
+                        this->status = Status::FixedPoint;
+                        return false;
+                    } else {
+                        this->statePartition = newStatePartition;
+                        return true;
+                    }
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> const& MdpPartitionRefiner<DdType, ValueType>::getChoicePartition() const {
+                return choicePartition;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool MdpPartitionRefiner<DdType, ValueType>::refineWrtStateActionRewards(storm::dd::Add<DdType, ValueType> const& stateActionRewards) {
+                STORM_LOG_TRACE("Refining with respect to state-action rewards.");
+                Partition<DdType, ValueType> newChoicePartition = this->signatureRefiner.refine(this->choicePartition, Signature<DdType, ValueType>(stateActionRewards));
+                if (newChoicePartition == this->choicePartition) {
+                    return false;
+                } else {
+                    this->choicePartition = newChoicePartition;
+                    return true;
+                }
+            }
+            
+            template class MdpPartitionRefiner<storm::dd::DdType::CUDD, double>;
+            
+            template class MdpPartitionRefiner<storm::dd::DdType::Sylvan, double>;
+            template class MdpPartitionRefiner<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class MdpPartitionRefiner<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.h b/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.h
new file mode 100644
index 000000000..bafd9d97f
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/MdpPartitionRefiner.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "storm/storage/dd/bisimulation/PartitionRefiner.h"
+
+namespace storm {
+    namespace models {
+        namespace symbolic {
+            template <storm::dd::DdType DdType, typename ValueType>
+            class Mdp;
+        }
+    }
+    
+    namespace dd {
+        namespace bisimulation {
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            class MdpPartitionRefiner : public PartitionRefiner<DdType, ValueType> {
+            public:
+                MdpPartitionRefiner(storm::models::symbolic::Mdp<DdType, ValueType> const& mdp, Partition<DdType, ValueType> const& initialStatePartition);
+                
+                /*!
+                 * Refines the partition.
+                 *
+                 * @param mode The signature mode to use.
+                 * @return False iff the partition is stable and no refinement was actually performed.
+                 */
+                virtual bool refine(bisimulation::SignatureMode const& mode = bisimulation::SignatureMode::Eager) override;
+                
+                /*!
+                 * Retrieves the current choice partition in the refinement process.
+                 */
+                Partition<DdType, ValueType> const& getChoicePartition() const;
+                
+            private:
+                virtual bool refineWrtStateActionRewards(storm::dd::Add<DdType, ValueType> const& stateActionRewards) override;
+                
+                // The choice partition in the refinement process.
+                Partition<DdType, ValueType> choicePartition;
+
+                // The object used to compute the state signatures.
+                SignatureComputer<DdType, ValueType> stateSignatureComputer;
+                
+                // The object used to refine the state partition based on the signatures.
+                SignatureRefiner<DdType, ValueType> stateSignatureRefiner;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/Partition.cpp b/src/storm/storage/dd/bisimulation/Partition.cpp
new file mode 100644
index 000000000..0fa4dffd4
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/Partition.cpp
@@ -0,0 +1,234 @@
+#include "storm/storage/dd/bisimulation/Partition.h"
+
+#include "storm/storage/dd/DdManager.h"
+
+#include "storm/storage/dd/bisimulation/PreservationInformation.h"
+
+#include "storm/logic/Formula.h"
+#include "storm/logic/AtomicExpressionFormula.h"
+#include "storm/logic/AtomicLabelFormula.h"
+
+#include "storm/models/symbolic/Mdp.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/settings/SettingsManager.h"
+#include "storm/settings/modules/BisimulationSettings.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/NotSupportedException.h"
+#include "storm/exceptions/InvalidPropertyException.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType>::Partition() : nextFreeBlockIndex(0) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType>::Partition(storm::dd::Add<DdType, ValueType> const& partitionAdd, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables, uint64_t nextFreeBlockIndex) : partition(partitionAdd), blockVariables(blockVariables), nextFreeBlockIndex(nextFreeBlockIndex) {
+                // Intentionally left empty.
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType>::Partition(storm::dd::Bdd<DdType> const& partitionBdd, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables, uint64_t nextFreeBlockIndex) : partition(partitionBdd), blockVariables(blockVariables), nextFreeBlockIndex(nextFreeBlockIndex) {
+                // Intentionally left empty.
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool Partition<DdType, ValueType>::operator==(Partition<DdType, ValueType> const& other) {
+                return this->partition == other.partition && this->blockVariables == other.blockVariables && this->nextFreeBlockIndex == other.nextFreeBlockIndex;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> Partition<DdType, ValueType>::replacePartition(storm::dd::Add<DdType, ValueType> const& newPartitionAdd, uint64_t nextFreeBlockIndex) const {
+                return Partition<DdType, ValueType>(newPartitionAdd, blockVariables, nextFreeBlockIndex);
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> Partition<DdType, ValueType>::replacePartition(storm::dd::Bdd<DdType> const& newPartitionBdd, uint64_t nextFreeBlockIndex) const {
+                return Partition<DdType, ValueType>(newPartitionBdd, blockVariables, nextFreeBlockIndex);
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> Partition<DdType, ValueType>::create(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType, PreservationInformation<DdType, ValueType> const& preservationInformation) {
+                
+                std::vector<storm::expressions::Expression> expressionVector;
+                for (auto const& expression : preservationInformation.getExpressions()) {
+                    expressionVector.emplace_back(expression);
+                }
+                
+                return create(model, expressionVector, bisimulationType);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> Partition<DdType, ValueType>::create(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::expressions::Expression> const& expressions, storm::storage::BisimulationType const& bisimulationType) {
+                STORM_LOG_THROW(bisimulationType == storm::storage::BisimulationType::Strong, storm::exceptions::NotSupportedException, "Currently only strong bisimulation is supported.");
+                
+                storm::dd::DdManager<DdType>& manager = model.getManager();
+                
+                std::vector<storm::dd::Bdd<DdType>> stateSets;
+                for (auto const& expression : expressions) {
+                    stateSets.emplace_back(model.getStates(expression));
+                }
+                
+                uint64_t numberOfDdVariables = 0;
+                for (auto const& metaVariable : model.getRowVariables()) {
+                    auto const& ddMetaVariable = manager.getMetaVariable(metaVariable);
+                    numberOfDdVariables += ddMetaVariable.getNumberOfDdVariables();
+                }
+                if (model.getType() == storm::models::ModelType::Mdp) {
+                    auto mdp = model.template as<storm::models::symbolic::Mdp<DdType, ValueType>>();
+                    for (auto const& metaVariable : mdp->getNondeterminismVariables()) {
+                        auto const& ddMetaVariable = manager.getMetaVariable(metaVariable);
+                        numberOfDdVariables += ddMetaVariable.getNumberOfDdVariables();
+                    }
+                }
+                
+                std::pair<storm::expressions::Variable, storm::expressions::Variable> blockVariables = createBlockVariables(manager, numberOfDdVariables);
+                std::pair<storm::dd::Bdd<DdType>, uint64_t> partitionBddAndBlockCount = createPartitionBdd(manager, model, stateSets, blockVariables.first);
+                
+                // Store the partition as an ADD only in the case of CUDD.
+                if (DdType == storm::dd::DdType::CUDD) {
+                    return Partition<DdType, ValueType>(partitionBddAndBlockCount.first.template toAdd<ValueType>(), blockVariables, partitionBddAndBlockCount.second);
+                } else {
+                    return Partition<DdType, ValueType>(partitionBddAndBlockCount.first, blockVariables, partitionBddAndBlockCount.second);
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> Partition<DdType, ValueType>::createTrivialChoicePartition(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables) {
+                storm::dd::Bdd<DdType> choicePartitionBdd = !model.getIllegalMask().renameVariables(model.getRowVariables(), model.getColumnVariables()) && model.getManager().getEncoding(blockVariables.first, 0, false);
+                
+                // Store the partition as an ADD only in the case of CUDD.
+                if (DdType == storm::dd::DdType::CUDD) {
+                    return Partition<DdType, ValueType>(choicePartitionBdd.template toAdd<ValueType>(), blockVariables, 1);
+                } else {
+                    return Partition<DdType, ValueType>(choicePartitionBdd, blockVariables, 1);
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            uint64_t Partition<DdType, ValueType>::getNumberOfStates() const {
+                return this->getStates().getNonZeroCount();
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Bdd<DdType> Partition<DdType, ValueType>::getStates() const {
+                if (this->storedAsAdd()) {
+                    return this->asAdd().notZero().existsAbstract({this->getBlockVariable()});
+                } else {
+                    return this->asBdd().existsAbstract({this->getBlockVariable()});
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            uint64_t Partition<DdType, ValueType>::getNumberOfBlocks() const {
+                return nextFreeBlockIndex;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool Partition<DdType, ValueType>::storedAsAdd() const {
+                return partition.which() == 1;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool Partition<DdType, ValueType>::storedAsBdd() const {
+                return partition.which() == 0;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Add<DdType, ValueType> const& Partition<DdType, ValueType>::asAdd() const {
+                return boost::get<storm::dd::Add<DdType, ValueType>>(partition);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Bdd<DdType> const& Partition<DdType, ValueType>::asBdd() const {
+                return boost::get<storm::dd::Bdd<DdType>>(partition);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::pair<storm::expressions::Variable, storm::expressions::Variable> const& Partition<DdType, ValueType>::getBlockVariables() const {
+                return blockVariables;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::expressions::Variable const& Partition<DdType, ValueType>::getBlockVariable() const {
+                return blockVariables.first;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::expressions::Variable const& Partition<DdType, ValueType>::getPrimedBlockVariable() const {
+                return blockVariables.second;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            uint64_t Partition<DdType, ValueType>::getNextFreeBlockIndex() const {
+                return nextFreeBlockIndex;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            uint64_t Partition<DdType, ValueType>::getNodeCount() const {
+                if (this->storedAsBdd()) {
+                    return asBdd().getNodeCount();
+                } else {
+                    return asAdd().getNodeCount();
+                }
+            }
+            
+            template<storm::dd::DdType DdType>
+            void enumerateBlocksRec(std::vector<storm::dd::Bdd<DdType>> const& stateSets, storm::dd::Bdd<DdType> const& currentStateSet, uint64_t offset, storm::expressions::Variable const& blockVariable, std::function<void (storm::dd::Bdd<DdType> const&)> const& callback) {
+                if (currentStateSet.isZero()) {
+                    return;
+                }
+                if (offset == stateSets.size()) {
+                    callback(currentStateSet);
+                } else {
+                    enumerateBlocksRec(stateSets, currentStateSet && stateSets[offset], offset + 1, blockVariable, callback);
+                    enumerateBlocksRec(stateSets, currentStateSet && !stateSets[offset], offset + 1, blockVariable, callback);
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::pair<storm::dd::Bdd<DdType>, uint64_t> Partition<DdType, ValueType>::createPartitionBdd(storm::dd::DdManager<DdType> const& manager, storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::dd::Bdd<DdType>> const& stateSets, storm::expressions::Variable const& blockVariable) {
+                uint64_t blockCount = 0;
+                storm::dd::Bdd<DdType> partitionBdd = manager.getBddZero();
+                
+                // Enumerate all realizable blocks.
+                enumerateBlocksRec<DdType>(stateSets, model.getReachableStates(), 0, blockVariable, [&manager, &partitionBdd, &blockVariable, &blockCount](storm::dd::Bdd<DdType> const& stateSet) {
+                    partitionBdd |= (stateSet && manager.getEncoding(blockVariable, blockCount, false));
+                    blockCount++;
+                } );
+
+                // Move the partition over to the primed variables.
+                partitionBdd = partitionBdd.swapVariables(model.getRowColumnMetaVariablePairs());
+
+                return std::make_pair(partitionBdd, blockCount);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::pair<storm::expressions::Variable, storm::expressions::Variable> Partition<DdType, ValueType>::createBlockVariables(storm::dd::DdManager<DdType>& manager, uint64_t numberOfDdVariables) {
+                std::vector<storm::expressions::Variable> blockVariables;
+                if (manager.hasMetaVariable("blocks")) {
+                    int64_t counter = 0;
+                    while (manager.hasMetaVariable("block" + std::to_string(counter))) {
+                        ++counter;
+                    }
+                    blockVariables = manager.addBitVectorMetaVariable("blocks" + std::to_string(counter), numberOfDdVariables, 2);
+                } else {
+                    blockVariables = manager.addBitVectorMetaVariable("blocks", numberOfDdVariables, 2);
+                }
+                return std::make_pair(blockVariables[0], blockVariables[1]);
+            }
+            
+            template class Partition<storm::dd::DdType::CUDD, double>;
+
+            template class Partition<storm::dd::DdType::Sylvan, double>;
+            template class Partition<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class Partition<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/Partition.h b/src/storm/storage/dd/bisimulation/Partition.h
new file mode 100644
index 000000000..3a620e681
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/Partition.h
@@ -0,0 +1,104 @@
+#pragma once
+
+#include <vector>
+#include <memory>
+
+#include <boost/variant.hpp>
+
+#include "storm/storage/dd/DdType.h"
+#include "storm/storage/dd/Add.h"
+#include "storm/storage/dd/Bdd.h"
+#include "storm/storage/bisimulation/BisimulationType.h"
+
+#include "storm/models/symbolic/Model.h"
+#include "storm/models/symbolic/NondeterministicModel.h"
+
+namespace storm {
+    namespace logic {
+        class Formula;
+    }
+    
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class PreservationInformation;
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class Partition {
+            public:
+                Partition();
+                
+                bool operator==(Partition<DdType, ValueType> const& other);
+                
+                Partition<DdType, ValueType> replacePartition(storm::dd::Add<DdType, ValueType> const& newPartitionAdd, uint64_t nextFreeBlockIndex) const;
+                Partition<DdType, ValueType> replacePartition(storm::dd::Bdd<DdType> const& newPartitionBdd, uint64_t nextFreeBlockIndex) const;
+
+                static Partition create(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType, PreservationInformation<DdType, ValueType> const& preservationInformation);
+                static Partition createTrivialChoicePartition(storm::models::symbolic::NondeterministicModel<DdType, ValueType> const& model, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables);
+                
+                uint64_t getNumberOfStates() const;
+                uint64_t getNumberOfBlocks() const;
+                
+                bool storedAsAdd() const;
+                bool storedAsBdd() const;
+                
+                storm::dd::Add<DdType, ValueType> const& asAdd() const;
+                storm::dd::Bdd<DdType> const& asBdd() const;
+
+                std::pair<storm::expressions::Variable, storm::expressions::Variable> const& getBlockVariables() const;
+                storm::expressions::Variable const& getBlockVariable() const;
+                storm::expressions::Variable const& getPrimedBlockVariable() const;
+                
+                uint64_t getNextFreeBlockIndex() const;
+                uint64_t getNodeCount() const;
+
+                storm::dd::Bdd<DdType> getStates() const;
+                                
+            private:
+                /*!
+                 * Creates a new partition from the given data.
+                 *
+                 * @param partitionAdd An ADD that maps encoding over the state/row variables and the block variable to
+                 * one iff the state is in the block.
+                 * @param blockVariables The variables to use for the block encoding. Its range must be [0, x] where x is
+                 * greater or equal than the number of states in the partition.
+                 * @param nextFreeBlockIndex The next free block index. The existing blocks must be encoded with indices
+                 * between 0 and this number.
+                 */
+                Partition(storm::dd::Add<DdType, ValueType> const& partitionAdd, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables, uint64_t nextFreeBlockIndex);
+                
+                /*!
+                 * Creates a new partition from the given data.
+                 *
+                 * @param partitionBdd A BDD that maps encoding over the state/row variables and the block variable to
+                 * true iff the state is in the block.
+                 * @param blockVariables The variables to use for the block encoding. Their range must be [0, x] where x is
+                 * greater or equal than the number of states in the partition.
+                 * @param nextFreeBlockIndex The next free block index. The existing blocks must be encoded with indices
+                 * between 0 and this number.
+                 */
+                Partition(storm::dd::Bdd<DdType> const& partitionBdd, std::pair<storm::expressions::Variable, storm::expressions::Variable> const& blockVariables, uint64_t nextFreeBlockIndex);
+                
+                /*!
+                 * Creates a partition from the given model that respects the given expressions.
+                 */
+                static Partition create(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::expressions::Expression> const& expressions, storm::storage::BisimulationType const& bisimulationType);
+                
+                static std::pair<storm::dd::Bdd<DdType>, uint64_t> createPartitionBdd(storm::dd::DdManager<DdType> const& manager, storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::dd::Bdd<DdType>> const& stateSets, storm::expressions::Variable const& blockVariable);
+                
+                static std::pair<storm::expressions::Variable, storm::expressions::Variable> createBlockVariables(storm::dd::DdManager<DdType>& manager, uint64_t numberOfDdVariables);
+                
+                /// The DD representing the partition. The DD is over the row variables of the model and the block variable.
+                boost::variant<storm::dd::Bdd<DdType>, storm::dd::Add<DdType, ValueType>> partition;
+                
+                /// The meta variables used to encode the block of each state in this partition.
+                std::pair<storm::expressions::Variable, storm::expressions::Variable> blockVariables;
+                
+                /// The next free block index.
+                uint64_t nextFreeBlockIndex;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/PartitionRefiner.cpp b/src/storm/storage/dd/bisimulation/PartitionRefiner.cpp
new file mode 100644
index 000000000..e06d9db41
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/PartitionRefiner.cpp
@@ -0,0 +1,126 @@
+#include "storm/storage/dd/bisimulation/PartitionRefiner.h"
+
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/NotSupportedException.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            PartitionRefiner<DdType, ValueType>::PartitionRefiner(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& initialStatePartition) : status(Status::Initialized), refinements(0), statePartition(initialStatePartition), signatureComputer(model), signatureRefiner(model.getManager(), statePartition.getBlockVariable(), model.getRowAndNondeterminismVariables(), model.getNondeterminismVariables()) {
+                // Intentionally left empty.
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            bool PartitionRefiner<DdType, ValueType>::refine(SignatureMode const& mode) {
+                Partition<DdType, ValueType> newStatePartition = this->internalRefine(signatureComputer, signatureRefiner, statePartition, statePartition, mode);
+                if (statePartition == newStatePartition) {
+                    this->status = Status::FixedPoint;
+                    return false;
+                } else {
+                    this->statePartition = newStatePartition;
+                    return true;
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> PartitionRefiner<DdType, ValueType>::internalRefine(SignatureComputer<DdType, ValueType>& signatureComputer, SignatureRefiner<DdType, ValueType>& signatureRefiner, Partition<DdType, ValueType> const& oldPartition, Partition<DdType, ValueType> const& targetPartition, SignatureMode const& mode) {
+                auto start = std::chrono::high_resolution_clock::now();
+                
+                if (this->status != Status::FixedPoint) {
+                    this->status = Status::InComputation;
+                    
+                    signatureComputer.setSignatureMode(mode);
+                    
+                    std::chrono::milliseconds::rep signatureTime = 0;
+                    std::chrono::milliseconds::rep refinementTime = 0;
+                    
+                    bool refined = false;
+                    uint64_t index = 0;
+                    Partition<DdType, ValueType> newPartition;
+                    auto signatureIterator = signatureComputer.compute(targetPartition);
+                    while (signatureIterator.hasNext() && !refined) {
+                        auto signatureStart = std::chrono::high_resolution_clock::now();
+                        auto signature = signatureIterator.next();
+                        auto signatureEnd = std::chrono::high_resolution_clock::now();
+                        totalSignatureTime += (signatureEnd - signatureStart);
+                        STORM_LOG_DEBUG("Signature " << refinements << "[" << index << "] DD has " << signature.getSignatureAdd().getNodeCount() << " nodes.");
+                        
+                        auto refinementStart = std::chrono::high_resolution_clock::now();
+                        newPartition = signatureRefiner.refine(statePartition, signature);
+                        auto refinementEnd = std::chrono::high_resolution_clock::now();
+                        totalRefinementTime += (refinementEnd - refinementStart);
+                        
+                        signatureTime += std::chrono::duration_cast<std::chrono::milliseconds>(signatureEnd - signatureStart).count();
+                        refinementTime = std::chrono::duration_cast<std::chrono::milliseconds>(refinementEnd - refinementStart).count();
+                        
+                        // Potentially exit early in case we have refined the partition already.
+                        if (newPartition.getNumberOfBlocks() > oldPartition.getNumberOfBlocks()) {
+                            refined = true;
+                        }
+                    }
+                    
+                    auto totalTimeInRefinement = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now() - start).count();
+                    ++refinements;
+                    STORM_LOG_DEBUG("Refinement " << refinements << " produced " << newPartition.getNumberOfBlocks() << " blocks and was completed in " << totalTimeInRefinement << "ms (signature: " << signatureTime << "ms, refinement: " << refinementTime << "ms).");
+                    return newPartition;
+                } else {
+                    return oldPartition;
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            bool PartitionRefiner<DdType, ValueType>::refineWrtRewardModel(storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel) {
+                STORM_LOG_THROW(!rewardModel.hasTransitionRewards(), storm::exceptions::NotSupportedException, "Symbolic bisimulation currently does not support transition rewards.");
+                STORM_LOG_TRACE("Refining with respect to reward model.");
+                bool result = false;
+                if (rewardModel.hasStateRewards()) {
+                    result |= refineWrtStateRewards(rewardModel.getStateRewardVector());
+                }
+                if (rewardModel.hasStateActionRewards()) {
+                    result |= refineWrtStateActionRewards(rewardModel.getStateActionRewardVector());
+                }
+                return result;
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            bool PartitionRefiner<DdType, ValueType>::refineWrtStateRewards(storm::dd::Add<DdType, ValueType> const& stateRewards) {
+                STORM_LOG_TRACE("Refining with respect to state rewards.");
+                Partition<DdType, ValueType> newPartition = signatureRefiner.refine(statePartition, Signature<DdType, ValueType>(stateRewards));
+                if (newPartition == statePartition) {
+                    return false;
+                } else {
+                    this->statePartition = newPartition;
+                    return true;
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            bool PartitionRefiner<DdType, ValueType>::refineWrtStateActionRewards(storm::dd::Add<DdType, ValueType> const& stateActionRewards) {
+                STORM_LOG_TRACE("Refining with respect to state-action rewards.");
+                // By default, we treat state-action rewards just like state-rewards, which works for DTMCs and CTMCs.
+                return refineWrtStateRewards(stateActionRewards);
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> const& PartitionRefiner<DdType, ValueType>::getStatePartition() const {
+                return statePartition;
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            Status PartitionRefiner<DdType, ValueType>::getStatus() const {
+                return status;
+            }
+            
+            template class PartitionRefiner<storm::dd::DdType::CUDD, double>;
+            
+            template class PartitionRefiner<storm::dd::DdType::Sylvan, double>;
+            template class PartitionRefiner<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class PartitionRefiner<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/PartitionRefiner.h b/src/storm/storage/dd/bisimulation/PartitionRefiner.h
new file mode 100644
index 000000000..5ae860201
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/PartitionRefiner.h
@@ -0,0 +1,79 @@
+#pragma once
+
+#include "storm/storage/dd/bisimulation/Status.h"
+#include "storm/storage/dd/bisimulation/Partition.h"
+
+#include "storm/storage/dd/bisimulation/SignatureComputer.h"
+#include "storm/storage/dd/bisimulation/SignatureRefiner.h"
+
+namespace storm {
+    namespace models {
+        namespace symbolic {
+            template <storm::dd::DdType DdType, typename ValueType>
+            class Model;
+        }
+    }
+    
+    namespace dd {
+        namespace bisimulation {
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            class PartitionRefiner {
+            public:
+                PartitionRefiner(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& initialStatePartition);
+                
+                virtual ~PartitionRefiner() = default;
+                
+                /*!
+                 * Refines the partition. 
+                 *
+                 * @param mode The signature mode to use.
+                 * @return False iff the partition is stable and no refinement was actually performed.
+                 */
+                virtual bool refine(SignatureMode const& mode = SignatureMode::Eager);
+                
+                /*!
+                 * Refines the partition wrt. to the reward model.
+                 * @return True iff the partition is stable and no refinement was actually performed.
+                 */
+                bool refineWrtRewardModel(storm::models::symbolic::StandardRewardModel<DdType, ValueType> const& rewardModel);
+                
+                /*!
+                 * Retrieves the current state partition in the refinement process.
+                 */
+                Partition<DdType, ValueType> const& getStatePartition() const;
+                
+                /*!
+                 * Retrieves the status of the refinement process.
+                 */
+                Status getStatus() const;
+                
+            protected:
+                Partition<DdType, ValueType> internalRefine(SignatureComputer<DdType, ValueType>& stateSignatureComputer, SignatureRefiner<DdType, ValueType>& signatureRefiner, Partition<DdType, ValueType> const& oldPartition, Partition<DdType, ValueType> const& targetPartition, SignatureMode const& mode = SignatureMode::Eager);
+                
+                virtual bool refineWrtStateRewards(storm::dd::Add<DdType, ValueType> const& stateRewards);
+                virtual bool refineWrtStateActionRewards(storm::dd::Add<DdType, ValueType> const& stateActionRewards);
+                
+                // The current status.
+                Status status;
+                
+                // The number of refinements that were made.
+                uint64_t refinements;
+                
+                // The state partition in the refinement process. Initially set to the initial partition.
+                Partition<DdType, ValueType> statePartition;
+                
+                // The object used to compute the signatures.
+                SignatureComputer<DdType, ValueType> signatureComputer;
+                
+                // The object used to refine the state partition based on the signatures.
+                SignatureRefiner<DdType, ValueType> signatureRefiner;
+                
+                // Time measurements.
+                std::chrono::high_resolution_clock::duration totalSignatureTime;
+                std::chrono::high_resolution_clock::duration totalRefinementTime;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/PreservationInformation.cpp b/src/storm/storage/dd/bisimulation/PreservationInformation.cpp
new file mode 100644
index 000000000..0569df104
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/PreservationInformation.cpp
@@ -0,0 +1,109 @@
+#include "storm/storage/dd/bisimulation/PreservationInformation.h"
+
+#include "storm/logic/Formulas.h"
+
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/InvalidPropertyException.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            PreservationInformation<DdType, ValueType>::PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType) : PreservationInformation(model, model.getLabels(), bisimulationType) {
+                // Intentionally left empty.
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            PreservationInformation<DdType, ValueType>::PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::string> const& labels, storm::storage::BisimulationType const&) {
+                for (auto const& label : labels) {
+                    this->addLabel(label);
+                    this->addExpression(model.getExpression(label));
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            PreservationInformation<DdType, ValueType>::PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::expressions::Expression> const& expressions, storm::storage::BisimulationType const&) {
+                for (auto const& e : expressions) {
+                    this->addExpression(e);
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            PreservationInformation<DdType, ValueType>::PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const&) {
+                if (formulas.empty()) {
+                    // Default to respect all labels if no formulas are given.
+                    for (auto const& label : model.getLabels()) {
+                        this->addLabel(label);
+                        this->addExpression(model.getExpression(label));
+                    }
+                    for (auto const& rewardModel : model.getRewardModels()) {
+                        this->addRewardModel(rewardModel.first);
+                    }
+                } else {
+                    for (auto const& formula : formulas) {
+                        for (auto const& expressionFormula : formula->getAtomicExpressionFormulas()) {
+                            this->addExpression(expressionFormula->getExpression());
+                        }
+                        for (auto const& labelFormula : formula->getAtomicLabelFormulas()) {
+                            this->addLabel(labelFormula->getLabel());
+                            std::string const& label = labelFormula->getLabel();
+                            STORM_LOG_THROW(model.hasLabel(label), storm::exceptions::InvalidPropertyException, "Property refers to illegal label '" << label << "'.");
+                            this->addExpression(model.getExpression(label));
+                        }
+                        for (auto const& rewardModel : formula->getReferencedRewardModels()) {
+                            if (rewardModel == "") {
+                                if (model.hasRewardModel("")) {
+                                    this->addRewardModel(rewardModel);
+                                } else {
+                                    STORM_LOG_THROW(model.hasUniqueRewardModel(), storm::exceptions::InvalidPropertyException, "Property refers to the default reward model, but it does not exist or is not unique.");
+                                    this->addRewardModel(model.getUniqueRewardModelName());
+                                }
+                            } else {
+                                this->addRewardModel(rewardModel);
+                            }
+                        }
+                    }
+                }
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            void PreservationInformation<DdType, ValueType>::addLabel(std::string const& label) {
+                labels.insert(label);
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            void PreservationInformation<DdType, ValueType>::addExpression(storm::expressions::Expression const& expression) {
+                expressions.insert(expression);
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            void PreservationInformation<DdType, ValueType>::addRewardModel(std::string const& name) {
+                rewardModelNames.insert(name);
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            std::set<std::string> const& PreservationInformation<DdType, ValueType>::getLabels() const {
+                return labels;
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            std::set<storm::expressions::Expression> const& PreservationInformation<DdType, ValueType>::getExpressions() const {
+                return expressions;
+            }
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            std::set<std::string> const& PreservationInformation<DdType, ValueType>::getRewardModelNames() const {
+                return rewardModelNames;
+            }
+            
+            template class PreservationInformation<storm::dd::DdType::CUDD, double>;
+            
+            template class PreservationInformation<storm::dd::DdType::Sylvan, double>;
+            template class PreservationInformation<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class PreservationInformation<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/PreservationInformation.h b/src/storm/storage/dd/bisimulation/PreservationInformation.h
new file mode 100644
index 000000000..2286e9bc6
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/PreservationInformation.h
@@ -0,0 +1,45 @@
+#pragma once
+
+#include <set>
+#include <string>
+#include <vector>
+#include <memory>
+
+#include "storm/models/symbolic/Model.h"
+#include "storm/storage/bisimulation/BisimulationType.h"
+
+#include "storm/logic/Formula.h"
+
+#include "storm/storage/expressions/Expression.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template <storm::dd::DdType DdType, typename ValueType>
+            class PreservationInformation {
+            public:
+                PreservationInformation() = default;
+            
+                PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::storage::BisimulationType const& bisimulationType);
+                PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::string> const& labels, storm::storage::BisimulationType const& bisimulationType);
+                PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<storm::expressions::Expression> const& expressions, storm::storage::BisimulationType const& bisimulationType);
+                PreservationInformation(storm::models::symbolic::Model<DdType, ValueType> const& model, std::vector<std::shared_ptr<storm::logic::Formula const>> const& formulas, storm::storage::BisimulationType const& bisimulationType);
+                
+                void addLabel(std::string const& label);
+                void addExpression(storm::expressions::Expression const& expression);
+                void addRewardModel(std::string const& name);
+                
+                std::set<std::string> const& getLabels() const;
+                std::set<storm::expressions::Expression> const& getExpressions() const;
+                std::set<std::string> const& getRewardModelNames() const;
+                
+            private:
+                std::set<std::string> labels;
+                std::set<storm::expressions::Expression> expressions;
+                std::set<std::string> rewardModelNames;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/QuotientExtractor.cpp b/src/storm/storage/dd/bisimulation/QuotientExtractor.cpp
new file mode 100644
index 000000000..6385c1d93
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/QuotientExtractor.cpp
@@ -0,0 +1,1037 @@
+#include "storm/storage/dd/bisimulation/QuotientExtractor.h"
+
+#include <numeric>
+
+#include "storm/storage/dd/DdManager.h"
+
+#include "storm/models/symbolic/Dtmc.h"
+#include "storm/models/symbolic/Ctmc.h"
+#include "storm/models/symbolic/Mdp.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/models/sparse/Dtmc.h"
+#include "storm/models/sparse/Ctmc.h"
+#include "storm/models/sparse/Mdp.h"
+#include "storm/models/sparse/StandardRewardModel.h"
+
+#include "storm/storage/dd/bisimulation/PreservationInformation.h"
+
+#include "storm/storage/dd/cudd/utility.h"
+#include "storm/storage/dd/sylvan/utility.h"
+
+#include "storm/settings/SettingsManager.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/NotSupportedException.h"
+
+#include "storm/storage/SparseMatrix.h"
+#include "storm/storage/BitVector.h"
+
+#include <sparsepp/spp.h>
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+
+            template<storm::dd::DdType DdType>
+            class InternalRepresentativeComputer;
+
+            template<storm::dd::DdType DdType>
+            class InternalRepresentativeComputerBase {
+            public:
+                InternalRepresentativeComputerBase(storm::dd::Bdd<DdType> const& partitionBdd, std::set<storm::expressions::Variable> const& rowVariables) : rowVariables(rowVariables), partitionBdd(partitionBdd) {
+                    ddManager = &partitionBdd.getDdManager();
+                    internalDdManager = &ddManager->getInternalDdManager();
+                    
+                    // Create state variables cube.
+                    this->rowVariablesCube = ddManager->getBddOne();
+                    for (auto const& var : rowVariables) {
+                        auto const& metaVariable = ddManager->getMetaVariable(var);
+                        this->rowVariablesCube &= metaVariable.getCube();
+                    }
+                }
+                
+            protected:
+                storm::dd::DdManager<DdType> const* ddManager;
+                storm::dd::InternalDdManager<DdType> const* internalDdManager;
+
+                std::set<storm::expressions::Variable> const& rowVariables;
+                storm::dd::Bdd<DdType> rowVariablesCube;
+                
+                storm::dd::Bdd<DdType> partitionBdd;
+            };
+
+            template <>
+            class InternalRepresentativeComputer<storm::dd::DdType::CUDD> : public InternalRepresentativeComputerBase<storm::dd::DdType::CUDD> {
+            public:
+                InternalRepresentativeComputer(storm::dd::Bdd<storm::dd::DdType::CUDD> const& partitionBdd, std::set<storm::expressions::Variable> const& rowVariables) : InternalRepresentativeComputerBase<storm::dd::DdType::CUDD>(partitionBdd, rowVariables) {
+                    this->ddman = this->internalDdManager->getCuddManager().getManager();
+                }
+                
+                storm::dd::Bdd<storm::dd::DdType::CUDD> getRepresentatives() {
+                    return storm::dd::Bdd<storm::dd::DdType::CUDD>(*this->ddManager, storm::dd::InternalBdd<storm::dd::DdType::CUDD>(this->internalDdManager, cudd::BDD(this->internalDdManager->getCuddManager(), this->getRepresentativesRec(this->partitionBdd.getInternalBdd().getCuddDdNode(), this->rowVariablesCube.getInternalBdd().getCuddDdNode()))), this->rowVariables);
+                }
+                
+            private:
+                DdNodePtr getRepresentativesRec(DdNodePtr partitionNode, DdNodePtr stateVariablesCube) {
+                    if (partitionNode == Cudd_ReadLogicZero(ddman)) {
+                        return partitionNode;
+                    }
+                    
+                    // If we visited the node before, there is no block that we still need to cover.
+                    if (visitedNodes.find(partitionNode) != visitedNodes.end()) {
+                        return Cudd_ReadLogicZero(ddman);
+                    }
+                    
+                    // If we hit a block variable and have not yet terminated the DFS earlier, it means we have a new representative.
+                    if (Cudd_IsConstant(stateVariablesCube)) {
+                        visitedNodes.emplace(partitionNode, true);
+                        return Cudd_ReadOne(ddman);
+                    } else {
+                        bool skipped = false;
+                        DdNodePtr elsePartitionNode;
+                        DdNodePtr thenPartitionNode;
+                        if (Cudd_NodeReadIndex(partitionNode) == Cudd_NodeReadIndex(stateVariablesCube)) {
+                            elsePartitionNode = Cudd_E(partitionNode);
+                            thenPartitionNode = Cudd_T(partitionNode);
+                            
+                            if (Cudd_IsComplement(partitionNode)) {
+                                elsePartitionNode = Cudd_Not(elsePartitionNode);
+                                thenPartitionNode = Cudd_Not(thenPartitionNode);
+                            }
+                        } else {
+                            elsePartitionNode = thenPartitionNode = partitionNode;
+                            skipped = true;
+                        }
+                        
+                        if (!skipped) {
+                            visitedNodes.emplace(partitionNode, true);
+                        }
+                        
+                        // Otherwise, recursively proceed with DFS.
+                        DdNodePtr elseResult = getRepresentativesRec(elsePartitionNode, Cudd_T(stateVariablesCube));
+                        Cudd_Ref(elseResult);
+
+                        DdNodePtr thenResult = nullptr;
+                        if (!skipped) {
+                            thenResult = getRepresentativesRec(thenPartitionNode, Cudd_T(stateVariablesCube));
+                            Cudd_Ref(thenResult);
+                            
+                            if (thenResult == elseResult) {
+                                Cudd_Deref(elseResult);
+                                Cudd_Deref(thenResult);
+                                return elseResult;
+                            } else {
+                                bool complement = Cudd_IsComplement(thenResult);
+                                auto result = cuddUniqueInter(ddman, Cudd_NodeReadIndex(stateVariablesCube), Cudd_Regular(thenResult), complement ? Cudd_Not(elseResult) : elseResult);
+                                Cudd_Deref(elseResult);
+                                Cudd_Deref(thenResult);
+                                return complement ? Cudd_Not(result) : result;
+                            }
+                        } else {
+                            DdNodePtr result;
+                            if (elseResult == Cudd_ReadLogicZero(ddman)) {
+                                result = elseResult;
+                            } else {
+                                result = Cudd_Not(cuddUniqueInter(ddman, Cudd_NodeReadIndex(stateVariablesCube), Cudd_ReadOne(ddman), Cudd_Not(elseResult)));
+                            }
+                            Cudd_Deref(elseResult);
+                            return result;
+                        }
+                    }
+                }
+                
+                ::DdManager* ddman;
+                spp::sparse_hash_map<DdNode const*, bool> visitedNodes;
+            };
+
+            template<>
+            class InternalRepresentativeComputer<storm::dd::DdType::Sylvan> : public InternalRepresentativeComputerBase<storm::dd::DdType::Sylvan> {
+            public:
+                InternalRepresentativeComputer(storm::dd::Bdd<storm::dd::DdType::Sylvan> const& partitionBdd, std::set<storm::expressions::Variable> const& rowVariables) : InternalRepresentativeComputerBase<storm::dd::DdType::Sylvan>(partitionBdd, rowVariables) {
+                    // Intentionally left empty.
+                }
+                
+                storm::dd::Bdd<storm::dd::DdType::Sylvan> getRepresentatives() {
+                    return storm::dd::Bdd<storm::dd::DdType::Sylvan>(*this->ddManager, storm::dd::InternalBdd<storm::dd::DdType::Sylvan>(this->internalDdManager, sylvan::Bdd(this->getRepresentativesRec(this->partitionBdd.getInternalBdd().getSylvanBdd().GetBDD(), this->rowVariablesCube.getInternalBdd().getSylvanBdd().GetBDD()))), this->rowVariables);
+                }
+
+            private:
+                BDD getRepresentativesRec(BDD partitionNode, BDD stateVariablesCube) {
+                    if (partitionNode == sylvan_false) {
+                        return sylvan_false;
+                    }
+                    
+                    // If we visited the node before, there is no block that we still need to cover.
+                    if (visitedNodes.find(partitionNode) != visitedNodes.end()) {
+                        return sylvan_false;
+                    }
+                    
+                    // If we hit a block variable and have not yet terminated the DFS earlier, it means we have a new representative.
+                    if (sylvan_isconst(stateVariablesCube)) {
+                        visitedNodes.emplace(partitionNode, true);
+                        return sylvan_true;
+                    } else {
+                        bool skipped = false;
+                        BDD elsePartitionNode;
+                        BDD thenPartitionNode;
+                        if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(partitionNode, sylvan_var(stateVariablesCube))) {
+                            elsePartitionNode = sylvan_low(partitionNode);
+                            thenPartitionNode = sylvan_high(partitionNode);
+                        } else {
+                            elsePartitionNode = thenPartitionNode = partitionNode;
+                            skipped = true;
+                        }
+                        
+                        if (!skipped) {
+                            visitedNodes.emplace(partitionNode, true);
+                        }
+                        
+                        // Otherwise, recursively proceed with DFS.
+                        BDD elseResult = getRepresentativesRec(elsePartitionNode, sylvan_high(stateVariablesCube));
+                        mtbdd_refs_push(elseResult);
+                        
+                        BDD thenResult;
+                        if (!skipped) {
+                            thenResult = getRepresentativesRec(thenPartitionNode, sylvan_high(stateVariablesCube));
+                            mtbdd_refs_push(thenResult);
+                            
+                            if (thenResult == elseResult) {
+                                mtbdd_refs_pop(2);
+                                return elseResult;
+                            } else {
+                                auto result = sylvan_makenode(sylvan_var(stateVariablesCube), elseResult, thenResult);
+                                mtbdd_refs_pop(2);
+                                return result;
+                            }
+                        } else {
+                            BDD result;
+                            if (elseResult == sylvan_false) {
+                                result = elseResult;
+                            } else {
+                                result = sylvan_makenode(sylvan_var(stateVariablesCube), elseResult, sylvan_false);
+                            }
+                            mtbdd_refs_pop(1);
+                            return result;
+                        }
+                    }
+                }
+                
+                spp::sparse_hash_map<BDD, bool> visitedNodes;
+            };
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            class InternalSparseQuotientExtractor;
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            class InternalSparseQuotientExtractorBase {
+            public:
+                InternalSparseQuotientExtractorBase(storm::models::symbolic::Model<DdType, ValueType> const& model, storm::dd::Bdd<DdType> const& partitionBdd, storm::expressions::Variable const& blockVariable, uint64_t numberOfBlocks, storm::dd::Bdd<DdType> const& representatives) : model(model), manager(model.getManager()), isNondeterministic(false), partitionBdd(partitionBdd), numberOfBlocks(numberOfBlocks), blockVariable(blockVariable), representatives(representatives), matrixEntriesCreated(false) {
+                    // Create cubes.
+                    rowVariablesCube = manager.getBddOne();
+                    for (auto const& variable : model.getRowVariables()) {
+                        auto const& ddMetaVariable = manager.getMetaVariable(variable);
+                        rowVariablesCube &= ddMetaVariable.getCube();
+                    }
+                    columnVariablesCube = manager.getBddOne();
+                    for (auto const& variable : model.getColumnVariables()) {
+                        auto const& ddMetaVariable = manager.getMetaVariable(variable);
+                        columnVariablesCube &= ddMetaVariable.getCube();
+                    }
+                    nondeterminismVariablesCube = manager.getBddOne();
+                    for (auto const& variable : model.getNondeterminismVariables()) {
+                        auto const& ddMetaVariable = manager.getMetaVariable(variable);
+                        nondeterminismVariablesCube &= ddMetaVariable.getCube();
+                    }
+                    allSourceVariablesCube = rowVariablesCube && nondeterminismVariablesCube;
+                    isNondeterministic = !nondeterminismVariablesCube.isOne();
+                    
+                    // Create ODDs.
+                    this->odd = representatives.createOdd();
+                    if (this->isNondeterministic) {
+                        this->nondeterminismOdd = (model.getQualitativeTransitionMatrix().existsAbstract(model.getColumnVariables()) && this->representatives).createOdd();
+                    }
+
+                    STORM_LOG_TRACE("Partition has " << partitionBdd.existsAbstract(model.getRowVariables()).getNonZeroCount() << " states in " << this->numberOfBlocks << " blocks.");
+                }
+
+                storm::storage::SparseMatrix<ValueType> extractTransitionMatrix(storm::dd::Add<DdType, ValueType> const& transitionMatrix) {
+                    return extractMatrixInternal(transitionMatrix);
+                }
+                
+                std::vector<ValueType> extractStateVector(storm::dd::Add<DdType, ValueType> const& vector) {
+                    return extractVectorInternal(vector, this->rowVariablesCube, this->odd);
+                }
+
+                std::vector<ValueType> extractStateActionVector(storm::dd::Add<DdType, ValueType> const& vector) {
+                    if (!this->isNondeterministic) {
+                        return extractStateVector(vector);
+                    } else {
+                        STORM_LOG_ASSERT(!this->rowPermutation.empty(), "Expected proper row permutation.");
+                        std::vector<ValueType> valueVector = extractVectorInternal(vector, this->allSourceVariablesCube, this->nondeterminismOdd);
+                        
+                        // Reorder the values according to the known row permutation.
+                        for (uint64_t position = 0; position < valueVector.size(); ) {
+                            if (rowPermutation[position] != position) {
+                                std::swap(valueVector[position], valueVector[rowPermutation[position]]);
+                                std::swap(rowPermutation[position], rowPermutation[rowPermutation[position]]);
+                            } else {
+                                ++position;
+                            }
+                        }
+
+                        return valueVector;
+                    }
+                }
+                
+                storm::storage::BitVector extractSetAll(storm::dd::Bdd<DdType> const& set) {
+                    return (set && representatives).toVector(this->odd);
+                }
+
+                storm::storage::BitVector extractSetExists(storm::dd::Bdd<DdType> const& set) {
+                    return ((set && partitionBdd).existsAbstract(model.getRowVariables()) && partitionBdd && representatives).existsAbstract({this->blockVariable}).toVector(this->odd);
+                }
+
+            protected:
+                virtual storm::storage::SparseMatrix<ValueType> extractMatrixInternal(storm::dd::Add<DdType, ValueType> const& matrix) = 0;
+                
+                virtual std::vector<ValueType> extractVectorInternal(storm::dd::Add<DdType, ValueType> const& vector, storm::dd::Bdd<DdType> const& variablesCube, storm::dd::Odd const& odd) = 0;
+                
+                storm::storage::SparseMatrix<ValueType> createMatrixFromEntries() {
+                    for (auto& row : matrixEntries) {
+                        std::sort(row.begin(), row.end(),
+                                  [] (storm::storage::MatrixEntry<uint_fast64_t, ValueType> const& a, storm::storage::MatrixEntry<uint_fast64_t, ValueType> const& b) {
+                                      return a.getColumn() < b.getColumn();
+                                  });
+                    }
+                    
+                    rowPermutation = std::vector<uint64_t>(matrixEntries.size());
+                    std::iota(rowPermutation.begin(), rowPermutation.end(), 0ull);
+                    if (this->isNondeterministic) {
+                        std::sort(rowPermutation.begin(), rowPermutation.end(), [this] (uint64_t first, uint64_t second) { return this->rowToState[first] < this->rowToState[second]; } );
+                    }
+                    
+                    uint64_t rowCounter = 0;
+                    uint64_t lastState = this->isNondeterministic ? rowToState[rowPermutation.front()] : 0;
+                    storm::storage::SparseMatrixBuilder<ValueType> builder(matrixEntries.size(), this->numberOfBlocks, 0, true, this->isNondeterministic);
+                    if (this->isNondeterministic) {
+                        builder.newRowGroup(0);
+                    }
+                    for (auto& rowIdx : rowPermutation) {
+                        // For nondeterministic models, open a new row group.
+                        if (this->isNondeterministic && rowToState[rowIdx] != lastState) {
+                            builder.newRowGroup(rowCounter);
+                            lastState = rowToState[rowIdx];
+                        }
+                        
+                        auto& row = matrixEntries[rowIdx];
+                        for (auto const& entry : row) {
+                            builder.addNextValue(rowCounter, entry.getColumn(), entry.getValue());
+                        }
+                        
+                        // Free storage for row.
+                        row.clear();
+                        row.shrink_to_fit();
+                        
+                        ++rowCounter;
+                    }
+                    
+                    rowToState.clear();
+                    rowToState.shrink_to_fit();
+                    matrixEntries.clear();
+                    matrixEntries.shrink_to_fit();
+                    
+                    return builder.build();
+                }
+
+                void addMatrixEntry(uint64_t row, uint64_t column, ValueType const& value) {
+                    this->matrixEntries[row].emplace_back(column, value);
+                }
+                
+                void createMatrixEntryStorage() {
+                    if (matrixEntriesCreated) {
+                        matrixEntries.clear();
+                        if (isNondeterministic) {
+                            rowToState.clear();
+                        }
+                    }
+                    matrixEntries.resize(this->isNondeterministic ? nondeterminismOdd.getTotalOffset() : odd.getTotalOffset());
+                    if (isNondeterministic) {
+                        rowToState.resize(matrixEntries.size());
+                    }
+                }
+                
+                void assignRowToState(uint64_t row, uint64_t state) {
+                    rowToState[row] = state;
+                }
+
+                storm::models::symbolic::Model<DdType, ValueType> const& model;
+                
+                // The manager responsible for the DDs.
+                storm::dd::DdManager<DdType> const& manager;
+                
+                // A flag that stores whether we need to take care of nondeterminism.
+                bool isNondeterministic;
+                
+                // Useful cubes needed in the translation.
+                storm::dd::Bdd<DdType> rowVariablesCube;
+                storm::dd::Bdd<DdType> columnVariablesCube;
+                storm::dd::Bdd<DdType> allSourceVariablesCube;
+                storm::dd::Bdd<DdType> nondeterminismVariablesCube;
+                
+                // Information about the state partition.
+                storm::dd::Bdd<DdType> partitionBdd;
+                uint64_t numberOfBlocks;
+                storm::expressions::Variable blockVariable;
+                storm::dd::Bdd<DdType> representatives;
+                storm::dd::Odd odd;
+                storm::dd::Odd nondeterminismOdd;
+                
+                // A flag that stores whether the underlying storage for matrix entries has been created.
+                bool matrixEntriesCreated;
+                
+                // The entries of the quotient matrix that is built.
+                std::vector<std::vector<storm::storage::MatrixEntry<uint_fast64_t, ValueType>>> matrixEntries;
+                
+                // A vector storing for each row which state it belongs to.
+                std::vector<uint64_t> rowToState;
+                
+                // A vector storing the row permutation for nondeterministic models.
+                std::vector<uint64_t> rowPermutation;
+            };
+            
+            template<typename ValueType>
+            class InternalSparseQuotientExtractor<storm::dd::DdType::CUDD, ValueType> : public InternalSparseQuotientExtractorBase<storm::dd::DdType::CUDD, ValueType> {
+            public:
+                InternalSparseQuotientExtractor(storm::models::symbolic::Model<storm::dd::DdType::CUDD, ValueType> const& model, storm::dd::Bdd<storm::dd::DdType::CUDD> const& partitionBdd, storm::expressions::Variable const& blockVariable, uint64_t numberOfBlocks, storm::dd::Bdd<storm::dd::DdType::CUDD> const& representatives) : InternalSparseQuotientExtractorBase<storm::dd::DdType::CUDD, ValueType>(model, partitionBdd, blockVariable, numberOfBlocks, representatives), ddman(this->manager.getInternalDdManager().getCuddManager().getManager()) {
+                    this->createBlockToOffsetMapping();
+                }
+            
+            private:
+                virtual storm::storage::SparseMatrix<ValueType> extractMatrixInternal(storm::dd::Add<storm::dd::DdType::CUDD, ValueType> const& matrix) override {
+                    this->createMatrixEntryStorage();
+                    extractTransitionMatrixRec(matrix.getInternalAdd().getCuddDdNode(), this->isNondeterministic ? this->nondeterminismOdd : this->odd, 0, this->partitionBdd.getInternalBdd().getCuddDdNode(), this->representatives.getInternalBdd().getCuddDdNode(), this->allSourceVariablesCube.getInternalBdd().getCuddDdNode(), this->nondeterminismVariablesCube.getInternalBdd().getCuddDdNode(), this->isNondeterministic ? &this->odd : nullptr, 0);
+                    return this->createMatrixFromEntries();
+                }
+                
+                virtual std::vector<ValueType> extractVectorInternal(storm::dd::Add<storm::dd::DdType::CUDD, ValueType> const& vector, storm::dd::Bdd<storm::dd::DdType::CUDD> const& variablesCube, storm::dd::Odd const& odd) override {
+                    std::vector<ValueType> result(odd.getTotalOffset());
+                    extractVectorRec(vector.getInternalAdd().getCuddDdNode(), this->representatives.getInternalBdd().getCuddDdNode(), variablesCube.getInternalBdd().getCuddDdNode(), odd, 0, result);
+                    return result;
+                }
+                
+                void createBlockToOffsetMapping() {
+                    this->createBlockToOffsetMappingRec(this->partitionBdd.getInternalBdd().getCuddDdNode(), this->representatives.getInternalBdd().getCuddDdNode(), this->rowVariablesCube.getInternalBdd().getCuddDdNode(), this->odd, 0);
+                    STORM_LOG_ASSERT(blockToOffset.size() == this->numberOfBlocks, "Mismatching block-to-offset mapping: " << blockToOffset.size() << " vs. " << this->numberOfBlocks << ".");
+                }
+                
+                void createBlockToOffsetMappingRec(DdNodePtr partitionNode, DdNodePtr representativesNode, DdNodePtr variables, storm::dd::Odd const& odd, uint64_t offset) {
+                    STORM_LOG_ASSERT(partitionNode != Cudd_ReadLogicZero(ddman) || representativesNode == Cudd_ReadLogicZero(ddman), "Expected representative to be zero if the partition is zero.");
+                    if (representativesNode == Cudd_ReadLogicZero(ddman)) {
+                        return;
+                    }
+                    
+                    if (Cudd_IsConstant(variables)) {
+                        STORM_LOG_ASSERT(odd.isTerminalNode(), "Expected terminal node.");
+                        STORM_LOG_ASSERT(blockToOffset.find(partitionNode) == blockToOffset.end(), "Duplicate entry.");
+                        blockToOffset[partitionNode] = offset;
+                    } else {
+                        STORM_LOG_ASSERT(!odd.isTerminalNode(), "Expected non-terminal node.");
+                        DdNodePtr partitionT;
+                        DdNodePtr partitionE;
+                        if (Cudd_NodeReadIndex(partitionNode) == Cudd_NodeReadIndex(variables)) {
+                            partitionT = Cudd_T(partitionNode);
+                            partitionE = Cudd_E(partitionNode);
+
+                            if (Cudd_IsComplement(partitionNode)) {
+                                partitionE = Cudd_Not(partitionE);
+                                partitionT = Cudd_Not(partitionT);
+                            }
+                        } else {
+                            partitionT = partitionE = partitionNode;
+                        }
+                        
+                        DdNodePtr representativesT;
+                        DdNodePtr representativesE;
+                        if (Cudd_NodeReadIndex(representativesNode) == Cudd_NodeReadIndex(variables)) {
+                            representativesT = Cudd_T(representativesNode);
+                            representativesE = Cudd_E(representativesNode);
+                            
+                            if (Cudd_IsComplement(representativesNode)) {
+                                representativesE = Cudd_Not(representativesE);
+                                representativesT = Cudd_Not(representativesT);
+                            }
+                        } else {
+                            representativesT = representativesE = representativesNode;
+                        }
+                        
+                        createBlockToOffsetMappingRec(partitionE, representativesE, Cudd_T(variables), odd.getElseSuccessor(), offset);
+                        createBlockToOffsetMappingRec(partitionT, representativesT, Cudd_T(variables), odd.getThenSuccessor(), offset + odd.getElseOffset());
+                    }
+                }
+                
+                void extractVectorRec(DdNodePtr vector, DdNodePtr representativesNode, DdNodePtr variables, storm::dd::Odd const& odd, uint64_t offset, std::vector<ValueType>& result) {
+                    if (representativesNode == Cudd_ReadLogicZero(ddman)) {
+                        return;
+                    }
+                    
+                    if (Cudd_IsConstant(variables)) {
+                        result[offset] = Cudd_V(vector);
+                    } else {
+                        DdNodePtr vectorT;
+                        DdNodePtr vectorE;
+                        if (Cudd_NodeReadIndex(vector) == Cudd_NodeReadIndex(variables)) {
+                            vectorT = Cudd_T(vector);
+                            vectorE = Cudd_E(vector);
+                        } else {
+                            vectorT = vectorE = vector;
+                        }
+                        
+                        DdNodePtr representativesT;
+                        DdNodePtr representativesE;
+                        if (Cudd_NodeReadIndex(representativesNode) == Cudd_NodeReadIndex(variables)) {
+                            representativesT = Cudd_T(representativesNode);
+                            representativesE = Cudd_E(representativesNode);
+                            
+                            if (Cudd_IsComplement(representativesNode)) {
+                                representativesT = Cudd_Not(representativesT);
+                                representativesE = Cudd_Not(representativesE);
+                            }
+                        } else {
+                            representativesT = representativesE = representativesNode;
+                        }
+                        
+                        extractVectorRec(vectorE, representativesE, Cudd_T(variables), odd.getElseSuccessor(), offset, result);
+                        extractVectorRec(vectorT, representativesT, Cudd_T(variables), odd.getThenSuccessor(), offset + odd.getElseOffset(), result);
+                    }
+                }
+                
+                void extractTransitionMatrixRec(DdNodePtr transitionMatrixNode, storm::dd::Odd const& sourceOdd, uint64_t sourceOffset, DdNodePtr targetPartitionNode, DdNodePtr representativesNode, DdNodePtr variables, DdNodePtr nondeterminismVariables, storm::dd::Odd const* stateOdd, uint64_t stateOffset) {
+                    // For the empty DD, we do not need to add any entries. Note that the partition nodes cannot be zero
+                    // as all states of the model have to be contained.
+                    if (transitionMatrixNode == Cudd_ReadZero(ddman) || representativesNode == Cudd_ReadLogicZero(ddman)) {
+                        return;
+                    }
+
+                    // If we have moved through all source variables, we must have arrived at a target block encoding.
+                    if (Cudd_IsConstant(variables)) {
+                        STORM_LOG_ASSERT(Cudd_IsConstant(transitionMatrixNode), "Expected constant node.");
+                        this->addMatrixEntry(sourceOffset, blockToOffset.at(targetPartitionNode), Cudd_V(transitionMatrixNode));
+                        if (stateOdd) {
+                            this->assignRowToState(sourceOffset, stateOffset);
+                        }
+                    } else {
+                        // Determine whether the next variable is a nondeterminism variable.
+                        bool nextVariableIsNondeterminismVariable = !Cudd_IsConstant(nondeterminismVariables) && Cudd_NodeReadIndex(nondeterminismVariables) == Cudd_NodeReadIndex(variables);
+                        
+                        if (nextVariableIsNondeterminismVariable) {
+                            DdNodePtr t;
+                            DdNodePtr e;
+                            
+                            // Determine whether the variable was skipped in the matrix.
+                            if (Cudd_NodeReadIndex(transitionMatrixNode) == Cudd_NodeReadIndex(variables)) {
+                                t = Cudd_T(transitionMatrixNode);
+                                e = Cudd_E(transitionMatrixNode);
+                            } else {
+                                t = e = transitionMatrixNode;
+                            }
+                            
+                            STORM_LOG_ASSERT(stateOdd, "Expected separate state ODD.");
+                            extractTransitionMatrixRec(e, sourceOdd.getElseSuccessor(), sourceOffset, targetPartitionNode, representativesNode, Cudd_T(variables), Cudd_T(nondeterminismVariables), stateOdd, stateOffset);
+                            extractTransitionMatrixRec(t, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetPartitionNode, representativesNode, Cudd_T(variables), Cudd_T(nondeterminismVariables), stateOdd, stateOffset);
+                        } else {
+                            DdNodePtr t;
+                            DdNodePtr tt;
+                            DdNodePtr te;
+                            DdNodePtr e;
+                            DdNodePtr et;
+                            DdNodePtr ee;
+                            if (Cudd_NodeReadIndex(transitionMatrixNode) == Cudd_NodeReadIndex(variables)) {
+                                // Source node was not skipped in transition matrix.
+                                t = Cudd_T(transitionMatrixNode);
+                                e = Cudd_E(transitionMatrixNode);
+                            } else {
+                                t = e = transitionMatrixNode;
+                            }
+                            
+                            if (Cudd_NodeReadIndex(t) == Cudd_NodeReadIndex(variables) + 1) {
+                                // Target node was not skipped in transition matrix.
+                                tt = Cudd_T(t);
+                                te = Cudd_E(t);
+                            } else {
+                                // Target node was skipped in transition matrix.
+                                tt = te = t;
+                            }
+                            if (t != e) {
+                                if (Cudd_NodeReadIndex(e) == Cudd_NodeReadIndex(variables) + 1) {
+                                    // Target node was not skipped in transition matrix.
+                                    et = Cudd_T(e);
+                                    ee = Cudd_E(e);
+                                } else {
+                                    // Target node was skipped in transition matrix.
+                                    et = ee = e;
+                                }
+                            } else {
+                                et = tt;
+                                ee = te;
+                            }
+                            
+                            DdNodePtr targetT;
+                            DdNodePtr targetE;
+                            if (Cudd_NodeReadIndex(targetPartitionNode) == Cudd_NodeReadIndex(variables)) {
+                                // Node was not skipped in target partition.
+                                targetT = Cudd_T(targetPartitionNode);
+                                targetE = Cudd_E(targetPartitionNode);
+                                
+                                if (Cudd_IsComplement(targetPartitionNode)) {
+                                    targetT = Cudd_Not(targetT);
+                                    targetE = Cudd_Not(targetE);
+                                }
+                            } else {
+                                // Node was skipped in target partition.
+                                targetT = targetE = targetPartitionNode;
+                            }
+                            
+                            DdNodePtr representativesT;
+                            DdNodePtr representativesE;
+                            if (Cudd_NodeReadIndex(representativesNode) == Cudd_NodeReadIndex(variables)) {
+                                // Node was not skipped in representatives.
+                                representativesT = Cudd_T(representativesNode);
+                                representativesE = Cudd_E(representativesNode);
+                            } else {
+                                // Node was skipped in representatives.
+                                representativesT = representativesE = representativesNode;
+                            }
+                            
+                            if (representativesT != representativesE && Cudd_IsComplement(representativesNode)) {
+                                representativesT = Cudd_Not(representativesT);
+                                representativesE = Cudd_Not(representativesE);
+                            }
+                            
+                            extractTransitionMatrixRec(ee, sourceOdd.getElseSuccessor(), sourceOffset, targetE, representativesE, Cudd_T(variables), nondeterminismVariables, stateOdd ? &stateOdd->getElseSuccessor() : stateOdd, stateOffset);
+                            extractTransitionMatrixRec(et, sourceOdd.getElseSuccessor(), sourceOffset, targetT, representativesE, Cudd_T(variables), nondeterminismVariables, stateOdd ? &stateOdd->getElseSuccessor() : stateOdd, stateOffset);
+                            extractTransitionMatrixRec(te, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetE, representativesT, Cudd_T(variables), nondeterminismVariables, stateOdd ? &stateOdd->getThenSuccessor() : stateOdd, stateOffset + (stateOdd ? stateOdd->getElseOffset() : 0));
+                            extractTransitionMatrixRec(tt, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetT, representativesT, Cudd_T(variables), nondeterminismVariables, stateOdd ? &stateOdd->getThenSuccessor() : stateOdd, stateOffset + (stateOdd ? stateOdd->getElseOffset() : 0));
+                        }
+                    }
+                }
+
+                ::DdManager* ddman;
+                
+                // A mapping from blocks (stored in terms of a DD node) to the offset of the corresponding block.
+                spp::sparse_hash_map<DdNode const*, uint64_t> blockToOffset;
+            };
+
+            template<typename ValueType>
+            class InternalSparseQuotientExtractor<storm::dd::DdType::Sylvan, ValueType> : public InternalSparseQuotientExtractorBase<storm::dd::DdType::Sylvan, ValueType> {
+            public:
+                InternalSparseQuotientExtractor(storm::models::symbolic::Model<storm::dd::DdType::Sylvan, ValueType> const& model, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& partitionBdd, storm::expressions::Variable const& blockVariable, uint64_t numberOfBlocks, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& representatives) : InternalSparseQuotientExtractorBase<storm::dd::DdType::Sylvan, ValueType>(model, partitionBdd, blockVariable, numberOfBlocks, representatives) {
+                    this->createBlockToOffsetMapping();
+                }
+                
+            private:
+                virtual storm::storage::SparseMatrix<ValueType> extractMatrixInternal(storm::dd::Add<storm::dd::DdType::Sylvan, ValueType> const& matrix) override {
+                    this->createMatrixEntryStorage();
+                    extractTransitionMatrixRec(matrix.getInternalAdd().getSylvanMtbdd().GetMTBDD(), this->isNondeterministic ? this->nondeterminismOdd : this->odd, 0, this->partitionBdd.getInternalBdd().getSylvanBdd().GetBDD(), this->representatives.getInternalBdd().getSylvanBdd().GetBDD(), this->allSourceVariablesCube.getInternalBdd().getSylvanBdd().GetBDD(), this->nondeterminismVariablesCube.getInternalBdd().getSylvanBdd().GetBDD(), this->isNondeterministic ? &this->odd : nullptr, 0);
+                    return this->createMatrixFromEntries();
+                }
+                
+                virtual std::vector<ValueType> extractVectorInternal(storm::dd::Add<storm::dd::DdType::Sylvan, ValueType> const& vector, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& variablesCube, storm::dd::Odd const& odd) override {
+                    std::vector<ValueType> result(odd.getTotalOffset());
+                    extractVectorRec(vector.getInternalAdd().getSylvanMtbdd().GetMTBDD(), this->representatives.getInternalBdd().getSylvanBdd().GetBDD(), variablesCube.getInternalBdd().getSylvanBdd().GetBDD(), odd, 0, result);
+                    return result;
+                }
+                
+                void extractVectorRec(MTBDD vector, BDD representativesNode, BDD variables, storm::dd::Odd const& odd, uint64_t offset, std::vector<ValueType>& result) {
+                    if (representativesNode == sylvan_false) {
+                        return;
+                    }
+                    
+                    if (sylvan_isconst(variables)) {
+                        result[offset] = storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::getValue(vector);
+                    } else {
+                        MTBDD vectorT;
+                        MTBDD vectorE;
+                        if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(vector, sylvan_var(variables))) {
+                            vectorT = sylvan_high(vector);
+                            vectorE = sylvan_low(vector);
+                        } else {
+                            vectorT = vectorE = vector;
+                        }
+                        
+                        BDD representativesT;
+                        BDD representativesE;
+                        if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(representativesNode, sylvan_var(variables))) {
+                            representativesT = sylvan_high(representativesNode);
+                            representativesE = sylvan_low(representativesNode);
+                        } else {
+                            representativesT = representativesE = representativesNode;
+                        }
+                        
+                        extractVectorRec(vectorE, representativesE, sylvan_high(variables), odd.getElseSuccessor(), offset, result);
+                        extractVectorRec(vectorT, representativesT, sylvan_high(variables), odd.getThenSuccessor(), offset + odd.getElseOffset(), result);
+                    }
+                }
+                
+                void createBlockToOffsetMapping() {
+                    this->createBlockToOffsetMappingRec(this->partitionBdd.getInternalBdd().getSylvanBdd().GetBDD(), this->representatives.getInternalBdd().getSylvanBdd().GetBDD(), this->rowVariablesCube.getInternalBdd().getSylvanBdd().GetBDD(), this->odd, 0);
+                    STORM_LOG_ASSERT(blockToOffset.size() == this->numberOfBlocks, "Mismatching block-to-offset mapping: " << blockToOffset.size() << " vs. " << this->numberOfBlocks << ".");
+                }
+                
+                void createBlockToOffsetMappingRec(BDD partitionNode, BDD representativesNode, BDD variables, storm::dd::Odd const& odd, uint64_t offset) {
+                    STORM_LOG_ASSERT(partitionNode != sylvan_false || representativesNode == sylvan_false, "Expected representative to be zero if the partition is zero.");
+                    if (representativesNode == sylvan_false) {
+                        return;
+                    }
+                    
+                    if (sylvan_isconst(variables)) {
+                        STORM_LOG_ASSERT(odd.isTerminalNode(), "Expected terminal node.");
+                        STORM_LOG_ASSERT(blockToOffset.find(partitionNode) == blockToOffset.end(), "Duplicate entry.");
+                        blockToOffset[partitionNode] = offset;
+                    } else {
+                        STORM_LOG_ASSERT(!odd.isTerminalNode(), "Expected non-terminal node.");
+                        BDD partitionT;
+                        BDD partitionE;
+                        if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(partitionNode, sylvan_var(variables))) {
+                            partitionT = sylvan_high(partitionNode);
+                            partitionE = sylvan_low(partitionNode);
+                        } else {
+                            partitionT = partitionE = partitionNode;
+                        }
+                        
+                        BDD representativesT;
+                        BDD representativesE;
+                        if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(representativesNode, sylvan_var(variables))) {
+                            representativesT = sylvan_high(representativesNode);
+                            representativesE = sylvan_low(representativesNode);
+                        } else {
+                            representativesT = representativesE = representativesNode;
+                        }
+                        
+                        createBlockToOffsetMappingRec(partitionE, representativesE, sylvan_high(variables), odd.getElseSuccessor(), offset);
+                        createBlockToOffsetMappingRec(partitionT, representativesT, sylvan_high(variables), odd.getThenSuccessor(), offset + odd.getElseOffset());
+                    }
+                }
+                
+                void extractTransitionMatrixRec(MTBDD transitionMatrixNode, storm::dd::Odd const& sourceOdd, uint64_t sourceOffset, BDD targetPartitionNode, BDD representativesNode, BDD variables, BDD nondeterminismVariables, storm::dd::Odd const* stateOdd, uint64_t stateOffset) {
+
+                    // For the empty DD, we do not need to add any entries. Note that the partition nodes cannot be zero
+                    // as all states of the model have to be contained.
+                    if (mtbdd_iszero(transitionMatrixNode) || representativesNode == sylvan_false) {
+                        return;
+                    }
+                    
+                    // If we have moved through all source variables, we must have arrived at a target block encoding.
+                    if (sylvan_isconst(variables)) {
+                        STORM_LOG_ASSERT(mtbdd_isleaf(transitionMatrixNode), "Expected constant node.");
+                        this->addMatrixEntry(sourceOffset, blockToOffset.at(targetPartitionNode), storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::getValue(transitionMatrixNode));
+                        if (stateOdd) {
+                            this->assignRowToState(sourceOffset, stateOffset);
+                        }
+                    } else {
+                        // Determine whether the next variable is a nondeterminism variable.
+                        bool nextVariableIsNondeterminismVariable = !sylvan_isconst(nondeterminismVariables) && sylvan_var(nondeterminismVariables) == sylvan_var(variables);
+                        
+                        if (nextVariableIsNondeterminismVariable) {
+                            MTBDD t;
+                            MTBDD e;
+                            
+                            // Determine whether the variable was skipped in the matrix.
+                            if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(transitionMatrixNode, sylvan_var(variables))) {
+                                t = sylvan_high(transitionMatrixNode);
+                                e = sylvan_low(transitionMatrixNode);
+                            } else {
+                                t = e = transitionMatrixNode;
+                            }
+                            
+                            STORM_LOG_ASSERT(stateOdd, "Expected separate state ODD.");
+                            extractTransitionMatrixRec(e, sourceOdd.getElseSuccessor(), sourceOffset, targetPartitionNode, representativesNode, sylvan_high(variables), sylvan_high(nondeterminismVariables), stateOdd, stateOffset);
+                            extractTransitionMatrixRec(t, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetPartitionNode, representativesNode, sylvan_high(variables), sylvan_high(nondeterminismVariables), stateOdd, stateOffset);
+                        } else {
+                            MTBDD t;
+                            MTBDD tt;
+                            MTBDD te;
+                            MTBDD e;
+                            MTBDD et;
+                            MTBDD ee;
+                            if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(transitionMatrixNode, sylvan_var(variables))) {
+                                // Source node was not skipped in transition matrix.
+                                t = sylvan_high(transitionMatrixNode);
+                                e = sylvan_low(transitionMatrixNode);
+                            } else {
+                                t = e = transitionMatrixNode;
+                            }
+                            
+                            if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(t, sylvan_var(variables) + 1)) {
+                                // Target node was not skipped in transition matrix.
+                                tt = sylvan_high(t);
+                                te = sylvan_low(t);
+                            } else {
+                                // Target node was skipped in transition matrix.
+                                tt = te = t;
+                            }
+                            if (t != e) {
+                                if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(e, sylvan_var(variables) + 1)) {
+                                    // Target node was not skipped in transition matrix.
+                                    et = sylvan_high(e);
+                                    ee = sylvan_low(e);
+                                } else {
+                                    // Target node was skipped in transition matrix.
+                                    et = ee = e;
+                                }
+                            } else {
+                                et = tt;
+                                ee = te;
+                            }
+                            
+                            BDD targetT;
+                            BDD targetE;
+                            if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(targetPartitionNode, sylvan_var(variables))) {
+                                // Node was not skipped in target partition.
+                                targetT = sylvan_high(targetPartitionNode);
+                                targetE = sylvan_low(targetPartitionNode);
+                            } else {
+                                // Node was skipped in target partition.
+                                targetT = targetE = targetPartitionNode;
+                            }
+                            
+                            BDD representativesT;
+                            BDD representativesE;
+                            if (storm::dd::InternalBdd<storm::dd::DdType::Sylvan>::matchesVariableIndex(representativesNode, sylvan_var(variables))) {
+                                // Node was not skipped in representatives.
+                                representativesT = sylvan_high(representativesNode);
+                                representativesE = sylvan_low(representativesNode);
+                            } else {
+                                // Node was skipped in representatives.
+                                representativesT = representativesE = representativesNode;
+                            }
+                            
+                            extractTransitionMatrixRec(ee, sourceOdd.getElseSuccessor(), sourceOffset, targetE, representativesE, sylvan_high(variables), nondeterminismVariables, stateOdd ? &stateOdd->getElseSuccessor() : stateOdd, stateOffset);
+                            extractTransitionMatrixRec(et, sourceOdd.getElseSuccessor(), sourceOffset, targetT, representativesE, sylvan_high(variables), nondeterminismVariables, stateOdd ? &stateOdd->getElseSuccessor() : stateOdd, stateOffset);
+                            extractTransitionMatrixRec(te, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetE, representativesT, sylvan_high(variables), nondeterminismVariables, stateOdd ? &stateOdd->getThenSuccessor() : stateOdd, stateOffset + (stateOdd ? stateOdd->getElseOffset() : 0));
+                            extractTransitionMatrixRec(tt, sourceOdd.getThenSuccessor(), sourceOffset + sourceOdd.getElseOffset(), targetT, representativesT, sylvan_high(variables), nondeterminismVariables, stateOdd ? &stateOdd->getThenSuccessor() : stateOdd, stateOffset + (stateOdd ? stateOdd->getElseOffset() : 0));
+                        }
+                    }
+                }
+                
+                // A mapping from blocks (stored in terms of a DD node) to the offset of the corresponding block.
+                spp::sparse_hash_map<BDD, uint64_t> blockToOffset;
+            };
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            QuotientExtractor<DdType, ValueType>::QuotientExtractor() : useRepresentatives(false) {
+                auto const& settings = storm::settings::getModule<storm::settings::modules::BisimulationSettings>();
+                this->useRepresentatives = settings.isUseRepresentativesSet();
+                this->quotientFormat = settings.getQuotientFormat();
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::shared_ptr<storm::models::Model<ValueType>> QuotientExtractor<DdType, ValueType>::extract(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation) {
+                auto start = std::chrono::high_resolution_clock::now();
+                std::shared_ptr<storm::models::Model<ValueType>> result;
+                if (quotientFormat == storm::settings::modules::BisimulationSettings::QuotientFormat::Sparse) {
+                    result = extractSparseQuotient(model, partition, preservationInformation);
+                } else {
+                    result = extractDdQuotient(model, partition, preservationInformation);
+                }
+                auto end = std::chrono::high_resolution_clock::now();
+                STORM_LOG_TRACE("Quotient extraction completed in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+                
+                STORM_LOG_THROW(result, storm::exceptions::NotSupportedException, "Quotient could not be extracted.");
+                
+                return result;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::shared_ptr<storm::models::sparse::Model<ValueType>> QuotientExtractor<DdType, ValueType>::extractSparseQuotient(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation) {
+                auto states = partition.getStates().swapVariables(model.getRowColumnMetaVariablePairs());
+                
+                storm::dd::Bdd<DdType> partitionAsBdd = partition.storedAsAdd() ? partition.asAdd().toBdd() : partition.asBdd();
+                partitionAsBdd = partitionAsBdd.renameVariables(model.getColumnVariables(), model.getRowVariables());
+
+                auto start = std::chrono::high_resolution_clock::now();
+                auto representatives = InternalRepresentativeComputer<DdType>(partitionAsBdd, model.getRowVariables()).getRepresentatives();
+                STORM_LOG_ASSERT(representatives.getNonZeroCount() == partition.getNumberOfBlocks(), "Representatives size does not match that of the partition: " << representatives.getNonZeroCount() << " vs. " << partition.getNumberOfBlocks() << ".");
+                STORM_LOG_ASSERT((representatives && partitionAsBdd).existsAbstract(model.getRowVariables()) == partitionAsBdd.existsAbstract(model.getRowVariables()), "Representatives do not cover all blocks.");
+                InternalSparseQuotientExtractor<DdType, ValueType> sparseExtractor(model, partitionAsBdd, partition.getBlockVariable(), partition.getNumberOfBlocks(), representatives);
+                storm::storage::SparseMatrix<ValueType> quotientTransitionMatrix = sparseExtractor.extractTransitionMatrix(model.getTransitionMatrix());
+                auto end = std::chrono::high_resolution_clock::now();
+                STORM_LOG_TRACE("Quotient transition matrix extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+                
+                start = std::chrono::high_resolution_clock::now();
+                storm::models::sparse::StateLabeling quotientStateLabeling(partition.getNumberOfBlocks());
+                quotientStateLabeling.addLabel("init", sparseExtractor.extractSetExists(model.getInitialStates()));
+                quotientStateLabeling.addLabel("deadlock", sparseExtractor.extractSetExists(model.getDeadlockStates()));
+                
+                for (auto const& label : preservationInformation.getLabels()) {
+                    quotientStateLabeling.addLabel(label, sparseExtractor.extractSetAll(model.getStates(label)));
+                }
+                for (auto const& expression : preservationInformation.getExpressions()) {
+                    std::stringstream stream;
+                    stream << expression;
+                    std::string expressionAsString = stream.str();
+                    
+                    if (quotientStateLabeling.containsLabel(expressionAsString)) {
+                        STORM_LOG_WARN("Duplicate label '" << expressionAsString << "', dropping second label definition.");
+                    } else {
+                        quotientStateLabeling.addLabel(stream.str(), sparseExtractor.extractSetAll(model.getStates(expression)));
+                    }
+                }
+                end = std::chrono::high_resolution_clock::now();
+                STORM_LOG_TRACE("Quotient labels extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+
+                start = std::chrono::high_resolution_clock::now();
+                std::unordered_map<std::string, storm::models::sparse::StandardRewardModel<ValueType>> quotientRewardModels;
+                for (auto const& rewardModelName : preservationInformation.getRewardModelNames()) {
+                    auto const& rewardModel = model.getRewardModel(rewardModelName);
+                    
+                    boost::optional<std::vector<ValueType>> quotientStateRewards;
+                    if (rewardModel.hasStateRewards()) {
+                        quotientStateRewards = sparseExtractor.extractStateVector(rewardModel.getStateRewardVector());
+                    }
+                    
+                    boost::optional<std::vector<ValueType>> quotientStateActionRewards;
+                    if (rewardModel.hasStateActionRewards()) {
+                        quotientStateActionRewards = sparseExtractor.extractStateActionVector(rewardModel.getStateActionRewardVector());
+                    }
+                    
+                    quotientRewardModels.emplace(rewardModelName, storm::models::sparse::StandardRewardModel<ValueType>(std::move(quotientStateRewards), std::move(quotientStateActionRewards), boost::none));
+                }
+                end = std::chrono::high_resolution_clock::now();
+                STORM_LOG_TRACE("Reward models extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+
+                std::shared_ptr<storm::models::sparse::Model<ValueType>> result;
+                if (model.getType() == storm::models::ModelType::Dtmc) {
+                    result = std::make_shared<storm::models::sparse::Dtmc<ValueType>>(std::move(quotientTransitionMatrix), std::move(quotientStateLabeling), std::move(quotientRewardModels));
+                } else if (model.getType() == storm::models::ModelType::Ctmc) {
+                    result = std::make_shared<storm::models::sparse::Ctmc<ValueType>>(std::move(quotientTransitionMatrix), std::move(quotientStateLabeling), std::move(quotientRewardModels));
+                } else if (model.getType() == storm::models::ModelType::Mdp) {
+                    result = std::make_shared<storm::models::sparse::Mdp<ValueType>>(std::move(quotientTransitionMatrix), std::move(quotientStateLabeling), std::move(quotientRewardModels));
+                }
+                
+                return result;
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> QuotientExtractor<DdType, ValueType>::extractDdQuotient(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation) {
+                return extractQuotientUsingBlockVariables(model, partition, preservationInformation);
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> QuotientExtractor<DdType, ValueType>::extractQuotientUsingBlockVariables(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation) {
+                auto modelType = model.getType();
+                
+                bool useRepresentativesForThisExtraction = this->useRepresentatives;
+                if (modelType == storm::models::ModelType::Dtmc || modelType == storm::models::ModelType::Ctmc || modelType == storm::models::ModelType::Mdp) {
+                    if (modelType == storm::models::ModelType::Mdp) {
+                        STORM_LOG_WARN_COND(!useRepresentativesForThisExtraction, "Using representatives is unsupported for MDPs, falling back to regular extraction.");
+                        useRepresentativesForThisExtraction = false;
+                    }
+                    
+                    // Sanity checks.
+                    STORM_LOG_ASSERT(partition.getNumberOfStates() == model.getNumberOfStates(), "Mismatching partition size.");
+                    STORM_LOG_ASSERT(partition.getStates().renameVariables(model.getColumnVariables(), model.getRowVariables()) == model.getReachableStates(), "Mismatching partition.");
+                    
+                    std::set<storm::expressions::Variable> blockVariableSet = {partition.getBlockVariable()};
+                    std::set<storm::expressions::Variable> blockPrimeVariableSet = {partition.getPrimedBlockVariable()};
+                    std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> blockMetaVariablePairs = {std::make_pair(partition.getBlockVariable(), partition.getPrimedBlockVariable())};
+                    
+                    storm::dd::Bdd<DdType> partitionAsBdd = partition.storedAsBdd() ? partition.asBdd() : partition.asAdd().notZero();
+                    if (useRepresentativesForThisExtraction) {
+                        storm::dd::Bdd<DdType> partitionAsBddOverPrimedBlockVariable = partitionAsBdd.renameVariables(blockVariableSet, blockPrimeVariableSet);
+                        storm::dd::Bdd<DdType> representativePartition = partitionAsBddOverPrimedBlockVariable.existsAbstractRepresentative(model.getColumnVariables()).renameVariables(model.getColumnVariables(), blockVariableSet);
+                        partitionAsBdd = (representativePartition && partitionAsBddOverPrimedBlockVariable).existsAbstract(blockPrimeVariableSet);
+                    }
+                    
+                    auto start = std::chrono::high_resolution_clock::now();
+                    partitionAsBdd = partitionAsBdd.renameVariables(model.getColumnVariables(), model.getRowVariables());
+                    storm::dd::Bdd<DdType> reachableStates = partitionAsBdd.existsAbstract(model.getRowVariables());
+                    storm::dd::Bdd<DdType> initialStates = (model.getInitialStates() && partitionAsBdd).existsAbstract(model.getRowVariables());
+                    
+                    std::map<std::string, storm::dd::Bdd<DdType>> preservedLabelBdds;
+                    for (auto const& label : preservationInformation.getLabels()) {
+                        preservedLabelBdds.emplace(label, (model.getStates(label) && partitionAsBdd).existsAbstract(model.getRowVariables()));
+                    }
+                    for (auto const& expression : preservationInformation.getExpressions()) {
+                        std::stringstream stream;
+                        stream << expression;
+                        std::string expressionAsString = stream.str();
+                        
+                        auto it = preservedLabelBdds.find(expressionAsString);
+                        if (it != preservedLabelBdds.end()) {
+                            STORM_LOG_WARN("Duplicate label '" << expressionAsString << "', dropping second label definition.");
+                        } else {
+                            preservedLabelBdds.emplace(stream.str(), (model.getStates(expression) && partitionAsBdd).existsAbstract(model.getRowVariables()));
+                        }
+                    }
+                    auto end = std::chrono::high_resolution_clock::now();
+                    STORM_LOG_TRACE("Quotient labels extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+
+                    start = std::chrono::high_resolution_clock::now();
+                    std::set<storm::expressions::Variable> blockAndRowVariables;
+                    std::set_union(blockVariableSet.begin(), blockVariableSet.end(), model.getRowVariables().begin(), model.getRowVariables().end(), std::inserter(blockAndRowVariables, blockAndRowVariables.end()));
+                    std::set<storm::expressions::Variable> blockPrimeAndColumnVariables;
+                    std::set_union(blockPrimeVariableSet.begin(), blockPrimeVariableSet.end(), model.getColumnVariables().begin(), model.getColumnVariables().end(), std::inserter(blockPrimeAndColumnVariables, blockPrimeAndColumnVariables.end()));
+                    storm::dd::Add<DdType, ValueType> partitionAsAdd = partitionAsBdd.template toAdd<ValueType>();
+                    storm::dd::Add<DdType, ValueType> quotientTransitionMatrix = model.getTransitionMatrix().multiplyMatrix(partitionAsAdd.renameVariables(blockAndRowVariables, blockPrimeAndColumnVariables), model.getColumnVariables());
+                    
+                    // Pick a representative from each block.
+                    auto representatives = InternalRepresentativeComputer<DdType>(partitionAsBdd, model.getRowVariables()).getRepresentatives();
+                    partitionAsBdd &= representatives;
+                    partitionAsAdd *= partitionAsBdd.template toAdd<ValueType>();
+
+                    quotientTransitionMatrix = quotientTransitionMatrix.multiplyMatrix(partitionAsAdd, model.getRowVariables());
+                    end = std::chrono::high_resolution_clock::now();
+                    
+                    // Check quotient matrix for sanity.
+                    STORM_LOG_ASSERT(quotientTransitionMatrix.greater(storm::utility::one<ValueType>()).isZero(), "Illegal entries in quotient matrix.");
+                    STORM_LOG_ASSERT(quotientTransitionMatrix.sumAbstract(blockPrimeVariableSet).equalModuloPrecision(quotientTransitionMatrix.notZero().existsAbstract(blockPrimeVariableSet).template toAdd<ValueType>(), ValueType(1e-6)), "Illegal non-probabilistic matrix.");
+                    
+                    STORM_LOG_TRACE("Quotient transition matrix extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+
+                    storm::dd::Bdd<DdType> quotientTransitionMatrixBdd = quotientTransitionMatrix.notZero();
+                    storm::dd::Bdd<DdType> deadlockStates = !quotientTransitionMatrixBdd.existsAbstract(blockPrimeVariableSet) && reachableStates;
+                    
+                    start = std::chrono::high_resolution_clock::now();
+                    std::unordered_map<std::string, storm::models::symbolic::StandardRewardModel<DdType, ValueType>> quotientRewardModels;
+                    for (auto const& rewardModelName : preservationInformation.getRewardModelNames()) {
+                        auto const& rewardModel = model.getRewardModel(rewardModelName);
+                        
+                        boost::optional<storm::dd::Add<DdType, ValueType>> quotientStateRewards;
+                        if (rewardModel.hasStateRewards()) {
+                            quotientStateRewards = rewardModel.getStateRewardVector().multiplyMatrix(partitionAsAdd, model.getRowVariables());
+                        }
+                        
+                        boost::optional<storm::dd::Add<DdType, ValueType>> quotientStateActionRewards;
+                        if (rewardModel.hasStateActionRewards()) {
+                            quotientStateActionRewards = rewardModel.getStateActionRewardVector().multiplyMatrix(partitionAsAdd, model.getRowVariables());
+                        }
+                        
+                        quotientRewardModels.emplace(rewardModelName, storm::models::symbolic::StandardRewardModel<DdType, ValueType>(quotientStateRewards, quotientStateActionRewards, boost::none));
+                    }
+                    end = std::chrono::high_resolution_clock::now();
+                    STORM_LOG_TRACE("Reward models extracted in " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms.");
+
+                    if (modelType == storm::models::ModelType::Dtmc) {
+                        return std::shared_ptr<storm::models::symbolic::Dtmc<DdType, ValueType>>(new storm::models::symbolic::Dtmc<DdType, ValueType>(model.getManager().asSharedPointer(), reachableStates, initialStates, deadlockStates, quotientTransitionMatrix, blockVariableSet, blockPrimeVariableSet, blockMetaVariablePairs, preservedLabelBdds, quotientRewardModels));
+                    } else if (modelType == storm::models::ModelType::Ctmc) {
+                        return std::shared_ptr<storm::models::symbolic::Ctmc<DdType, ValueType>>(new storm::models::symbolic::Ctmc<DdType, ValueType>(model.getManager().asSharedPointer(), reachableStates, initialStates, deadlockStates, quotientTransitionMatrix, blockVariableSet, blockPrimeVariableSet, blockMetaVariablePairs, preservedLabelBdds, quotientRewardModels));
+                    } else if (modelType == storm::models::ModelType::Mdp) {
+                        return std::shared_ptr<storm::models::symbolic::Mdp<DdType, ValueType>>(new storm::models::symbolic::Mdp<DdType, ValueType>(model.getManager().asSharedPointer(), reachableStates, initialStates, deadlockStates, quotientTransitionMatrix, blockVariableSet, blockPrimeVariableSet, blockMetaVariablePairs, model.getNondeterminismVariables(), preservedLabelBdds, quotientRewardModels));
+                    } else {
+                        STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Unsupported quotient type.");
+                    }
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Cannot extract quotient for this model type.");
+                }
+            }
+                        
+            template class QuotientExtractor<storm::dd::DdType::CUDD, double>;
+            
+            template class QuotientExtractor<storm::dd::DdType::Sylvan, double>;
+            template class QuotientExtractor<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class QuotientExtractor<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/QuotientExtractor.h b/src/storm/storage/dd/bisimulation/QuotientExtractor.h
new file mode 100644
index 000000000..dcce31659
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/QuotientExtractor.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#include <memory>
+
+#include "storm/storage/dd/DdType.h"
+
+#include "storm/models/symbolic/Model.h"
+#include "storm/models/sparse/Model.h"
+
+#include "storm/storage/dd/bisimulation/Partition.h"
+#include "storm/storage/dd/bisimulation/PreservationInformation.h"
+
+#include "storm/settings/modules/BisimulationSettings.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class QuotientExtractor {
+            public:
+                QuotientExtractor();
+                
+                std::shared_ptr<storm::models::Model<ValueType>> extract(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation);
+                
+            private:
+                std::shared_ptr<storm::models::sparse::Model<ValueType>> extractSparseQuotient(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation);
+                
+                std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> extractDdQuotient(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation);
+                std::shared_ptr<storm::models::symbolic::Model<DdType, ValueType>> extractQuotientUsingBlockVariables(storm::models::symbolic::Model<DdType, ValueType> const& model, Partition<DdType, ValueType> const& partition, PreservationInformation<DdType, ValueType> const& preservationInformation);
+                
+                bool useRepresentatives;
+                storm::settings::modules::BisimulationSettings::QuotientFormat quotientFormat;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/Signature.cpp b/src/storm/storage/dd/bisimulation/Signature.cpp
new file mode 100644
index 000000000..e4a06feab
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/Signature.cpp
@@ -0,0 +1,25 @@
+#include "storm/storage/dd/bisimulation/Signature.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Signature<DdType, ValueType>::Signature(storm::dd::Add<DdType, ValueType> const& signatureAdd) : signatureAdd(signatureAdd) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Add<DdType, ValueType> const& Signature<DdType, ValueType>::getSignatureAdd() const {
+                return signatureAdd;
+            }
+            
+            template class Signature<storm::dd::DdType::CUDD, double>;
+            
+            template class Signature<storm::dd::DdType::Sylvan, double>;
+            template class Signature<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class Signature<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/Signature.h b/src/storm/storage/dd/bisimulation/Signature.h
new file mode 100644
index 000000000..b355b3021
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/Signature.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include "storm/storage/dd/DdType.h"
+
+#include "storm/storage/dd/bisimulation/Partition.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class Signature {
+            public:
+                Signature() = default;
+                Signature(storm::dd::Add<DdType, ValueType> const& signatureAdd);
+                
+                storm::dd::Add<DdType, ValueType> const& getSignatureAdd() const;
+                
+            private:
+                storm::dd::Add<DdType, ValueType> signatureAdd;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/SignatureComputer.cpp b/src/storm/storage/dd/bisimulation/SignatureComputer.cpp
new file mode 100644
index 000000000..b4336adc8
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/SignatureComputer.cpp
@@ -0,0 +1,159 @@
+#include "storm/storage/dd/bisimulation/SignatureComputer.h"
+
+#include "storm/storage/dd/DdManager.h"
+
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/OutOfRangeException.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureIterator<DdType, ValueType>::SignatureIterator(SignatureComputer<DdType, ValueType> const& signatureComputer, Partition<DdType, ValueType> const& partition) : signatureComputer(signatureComputer), partition(partition), position(0) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool SignatureIterator<DdType, ValueType>::hasNext() const {
+                switch (signatureComputer.getSignatureMode()) {
+                    case SignatureMode::Qualitative:
+                    case SignatureMode::Eager: return position < 1;
+                    case SignatureMode::Lazy: return position < 2;
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Signature<DdType, ValueType> SignatureIterator<DdType, ValueType>::next() {
+                auto mode = signatureComputer.getSignatureMode();
+                STORM_LOG_THROW((mode == SignatureMode::Eager && position < 1) || (mode == SignatureMode::Lazy && position < 2) || (mode == SignatureMode::Qualitative && position < 1), storm::exceptions::OutOfRangeException, "Iterator is out of range.");
+                Signature<DdType, ValueType> result;
+                
+                if (mode == SignatureMode::Eager) {
+                    if (position == 0) {
+                        result = signatureComputer.getFullSignature(partition);
+                    }
+                } else if (mode == SignatureMode::Lazy) {
+                    if (position == 0) {
+                        result = signatureComputer.getQualitativeSignature(partition);
+                    } else {
+                        result = signatureComputer.getFullSignature(partition);
+                    }
+                } else if (mode == SignatureMode::Qualitative) {
+                    if (position == 0) {
+                        result = signatureComputer.getQualitativeSignature(partition);
+                    }
+                } else {
+                    STORM_LOG_ASSERT(false, "Unknown signature mode.");
+                }
+                
+                ++position;
+                return result;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureComputer<DdType, ValueType>::SignatureComputer(storm::models::symbolic::Model<DdType, ValueType> const& model, SignatureMode const& mode, bool ensureQualitative) : SignatureComputer(model.getTransitionMatrix(), boost::none, model.getColumnVariables(), mode, ensureQualitative) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureComputer<DdType, ValueType>::SignatureComputer(storm::dd::Add<DdType, ValueType> const& transitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode, bool ensureQualitative) : SignatureComputer(transitionMatrix, boost::none, columnVariables, mode, ensureQualitative) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureComputer<DdType, ValueType>::SignatureComputer(storm::dd::Bdd<DdType> const& qualitativeTransitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode, bool ensureQualitative) : SignatureComputer(qualitativeTransitionMatrix.template toAdd<ValueType>(), boost::none, columnVariables, mode, ensureQualitative) {
+                // Intentionally left empty.
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureComputer<DdType, ValueType>::SignatureComputer(storm::dd::Add<DdType, ValueType> const& transitionMatrix, boost::optional<storm::dd::Bdd<DdType>> const& qualitativeTransitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode, bool ensureQualitative) : transitionMatrix(transitionMatrix), columnVariables(columnVariables), mode(mode), ensureQualitative(ensureQualitative) {
+                if (DdType == storm::dd::DdType::Sylvan) {
+                    this->transitionMatrix = this->transitionMatrix.notZero().ite(this->transitionMatrix, this->transitionMatrix.getDdManager().template getAddUndefined<ValueType>());
+                }
+                
+                if (qualitativeTransitionMatrix) {
+                    if (DdType == storm::dd::DdType::Sylvan || ensureQualitative) {
+                        this->transitionMatrix01 = qualitativeTransitionMatrix.get();
+                    } else {
+                        this->transitionMatrix01 = qualitativeTransitionMatrix.get().template toAdd<ValueType>();
+                    }
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureIterator<DdType, ValueType> SignatureComputer<DdType, ValueType>::compute(Partition<DdType, ValueType> const& partition) {
+                return SignatureIterator<DdType, ValueType>(*this, partition);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            void SignatureComputer<DdType, ValueType>::setSignatureMode(SignatureMode const& newMode) {
+                this->mode = newMode;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureMode const& SignatureComputer<DdType, ValueType>::getSignatureMode() const {
+                return mode;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Signature<DdType, ValueType> SignatureComputer<DdType, ValueType>::getFullSignature(Partition<DdType, ValueType> const& partition) const {
+                if (partition.storedAsBdd()) {
+                    return Signature<DdType, ValueType>(this->transitionMatrix.multiplyMatrix(partition.asBdd(), columnVariables));
+                } else {
+                    return Signature<DdType, ValueType>(this->transitionMatrix.multiplyMatrix(partition.asAdd(), columnVariables));
+                }
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Signature<DdType, ValueType> SignatureComputer<DdType, ValueType>::getQualitativeSignature(Partition<DdType, ValueType> const& partition) const {
+                if (!transitionMatrix01) {
+                    if (DdType == storm::dd::DdType::Sylvan || this->ensureQualitative) {
+                        this->transitionMatrix01 = this->transitionMatrix.notZero();
+                    } else {
+                        this->transitionMatrix01 = this->transitionMatrix.notZero().template toAdd<ValueType>();
+                    }
+                }
+
+                if (partition.storedAsBdd()) {
+                    return this->getQualitativeTransitionMatrixAsBdd().andExists(partition.asBdd(), columnVariables).template toAdd<ValueType>();
+                } else {
+                    if (this->qualitativeTransitionMatrixIsBdd()) {
+                        return Signature<DdType, ValueType>(this->getQualitativeTransitionMatrixAsBdd().andExists(partition.asAdd().toBdd(), columnVariables).template toAdd<ValueType>());
+                    } else {
+                        return Signature<DdType, ValueType>(this->getQualitativeTransitionMatrixAsAdd().multiplyMatrix(partition.asAdd(), columnVariables));
+                    }
+                }
+            }
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            bool SignatureComputer<DdType, ValueType>::qualitativeTransitionMatrixIsBdd() const {
+                return transitionMatrix01.get().which() == 0;
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Bdd<DdType> const& SignatureComputer<DdType, ValueType>::getQualitativeTransitionMatrixAsBdd() const {
+                STORM_LOG_ASSERT(this->transitionMatrix01, "Missing qualitative transition matrix.");
+                return boost::get<storm::dd::Bdd<DdType>>(this->transitionMatrix01.get());
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            storm::dd::Add<DdType, ValueType> const& SignatureComputer<DdType, ValueType>::getQualitativeTransitionMatrixAsAdd() const {
+                STORM_LOG_ASSERT(this->transitionMatrix01, "Missing qualitative transition matrix.");
+                return boost::get<storm::dd::Add<DdType, ValueType>>(this->transitionMatrix01.get());
+            }
+            
+            template class SignatureIterator<storm::dd::DdType::CUDD, double>;
+            template class SignatureIterator<storm::dd::DdType::Sylvan, double>;
+            template class SignatureIterator<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class SignatureIterator<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            
+            template class SignatureComputer<storm::dd::DdType::CUDD, double>;
+            template class SignatureComputer<storm::dd::DdType::Sylvan, double>;
+            template class SignatureComputer<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class SignatureComputer<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/SignatureComputer.h b/src/storm/storage/dd/bisimulation/SignatureComputer.h
new file mode 100644
index 000000000..421acc90a
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/SignatureComputer.h
@@ -0,0 +1,83 @@
+#pragma once
+
+#include <boost/optional.hpp>
+
+#include "storm/storage/dd/DdType.h"
+
+#include "storm/storage/dd/bisimulation/Signature.h"
+#include "storm/storage/dd/bisimulation/Partition.h"
+#include "storm/storage/dd/bisimulation/SignatureMode.h"
+
+#include "storm/models/symbolic/Model.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            class SignatureComputer;
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class SignatureIterator {
+            public:
+                SignatureIterator(SignatureComputer<DdType, ValueType> const& signatureComputer, Partition<DdType, ValueType> const& partition);
+
+                bool hasNext() const;
+                
+                Signature<DdType, ValueType> next();
+                
+            private:
+                // The signature computer to use.
+                SignatureComputer<DdType, ValueType> const& signatureComputer;
+                
+                // The current partition.
+                Partition<DdType, ValueType> const& partition;
+                
+                // The position in the enumeration.
+                uint64_t position;
+            };
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            class SignatureComputer {
+            public:
+                friend class SignatureIterator<DdType, ValueType>;
+                
+                SignatureComputer(storm::models::symbolic::Model<DdType, ValueType> const& model, SignatureMode const& mode = SignatureMode::Eager, bool ensureQualitative = false);
+                SignatureComputer(storm::dd::Add<DdType, ValueType> const& transitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode = SignatureMode::Eager, bool ensureQualitative = false);
+                SignatureComputer(storm::dd::Bdd<DdType> const& qualitativeTransitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode = SignatureMode::Eager, bool ensureQualitative = false);
+                SignatureComputer(storm::dd::Add<DdType, ValueType> const& transitionMatrix, boost::optional<storm::dd::Bdd<DdType>> const& qualitativeTransitionMatrix, std::set<storm::expressions::Variable> const& columnVariables, SignatureMode const& mode = SignatureMode::Eager, bool ensureQualitative = false);
+
+                void setSignatureMode(SignatureMode const& newMode);
+
+                SignatureIterator<DdType, ValueType> compute(Partition<DdType, ValueType> const& partition);
+                
+            private:
+                /// Methods to compute the signatures.
+                Signature<DdType, ValueType> getFullSignature(Partition<DdType, ValueType> const& partition) const;
+                Signature<DdType, ValueType> getQualitativeSignature(Partition<DdType, ValueType> const& partition) const;
+                
+                bool qualitativeTransitionMatrixIsBdd() const;
+                storm::dd::Bdd<DdType> const& getQualitativeTransitionMatrixAsBdd() const;
+                storm::dd::Add<DdType, ValueType> const& getQualitativeTransitionMatrixAsAdd() const;
+                
+                SignatureMode const& getSignatureMode() const;
+                                
+                /// The transition matrix to use for the signature computation.
+                storm::dd::Add<DdType, ValueType> transitionMatrix;
+                
+                /// The set of variables from which to abstract when performing matrix-vector multiplication.
+                std::set<storm::expressions::Variable> columnVariables;
+
+                /// The mode to use for signature computation.
+                SignatureMode mode;
+                
+                /// A flag indicating whether the qualitative signature needs to make sure that the result is in fact qualitative.
+                bool ensureQualitative;
+                
+                /// Only used when using lazy signatures is enabled.
+                mutable boost::optional<boost::variant<storm::dd::Bdd<DdType>, storm::dd::Add<DdType, ValueType>>> transitionMatrix01;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/SignatureMode.h b/src/storm/storage/dd/bisimulation/SignatureMode.h
new file mode 100644
index 000000000..41457ba76
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/SignatureMode.h
@@ -0,0 +1,11 @@
+#pragma once
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+        
+            enum class SignatureMode { Eager, Lazy, Qualitative };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/SignatureRefiner.cpp b/src/storm/storage/dd/bisimulation/SignatureRefiner.cpp
new file mode 100644
index 000000000..3dcb5a5b9
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/SignatureRefiner.cpp
@@ -0,0 +1,496 @@
+#include "storm/storage/dd/bisimulation/SignatureRefiner.h"
+
+#include <cstdio>
+
+#include <unordered_map>
+#include <boost/container/flat_map.hpp>
+
+#include "storm/storage/dd/DdManager.h"
+
+#include "storm/storage/dd/cudd/InternalCuddDdManager.h"
+
+#include "storm/storage/dd/cudd/utility.h"
+#include "storm/storage/dd/sylvan/utility.h"
+
+#include "storm/utility/macros.h"
+#include "storm/exceptions/InvalidSettingsException.h"
+#include "storm/exceptions/NotImplementedException.h"
+#include "storm/exceptions/InvalidArgumentException.h"
+
+#include "storm/settings/SettingsManager.h"
+#include "storm/settings/modules/BisimulationSettings.h"
+
+#include <sparsepp/spp.h>
+
+#include "sylvan_cache.h"
+#include "sylvan_table.h"
+#include "sylvan_int.h"
+
+// FIXME: remove
+#include "storm/storage/dd/DdManager.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class InternalSignatureRefiner;
+            
+            class ReuseWrapper {
+            public:
+                ReuseWrapper() : ReuseWrapper(false) {
+                    // Intentionally left empty.
+                }
+                
+                ReuseWrapper(bool value) : value(value) {
+                    // Intentionally left empty.
+                }
+                
+                bool isReused() const {
+                    return value;
+                }
+                
+                void setReused() {
+                    value = true;
+                }
+                
+            private:
+                bool value;
+            };
+            
+            template<typename ValueType>
+            class InternalSignatureRefiner<storm::dd::DdType::CUDD, ValueType> {
+            public:
+                InternalSignatureRefiner(storm::dd::DdManager<storm::dd::DdType::CUDD> const& manager, storm::expressions::Variable const& blockVariable, storm::dd::Bdd<storm::dd::DdType::CUDD> const& nondeterminismVariables, storm::dd::Bdd<storm::dd::DdType::CUDD> const& nonBlockVariables) : manager(manager), internalDdManager(manager.getInternalDdManager()), ddman(internalDdManager.getCuddManager().getManager()), blockVariable(blockVariable), nondeterminismVariables(nondeterminismVariables), nonBlockVariables(nonBlockVariables), nextFreeBlockIndex(0), numberOfRefinements(0), lastNumberOfVisitedNodes(10000), signatureCache(lastNumberOfVisitedNodes), reuseBlocksCache(lastNumberOfVisitedNodes) {
+
+                    // Initialize precomputed data.
+                    auto const& ddMetaVariable = manager.getMetaVariable(blockVariable);
+                    blockDdVariableIndices = ddMetaVariable.getIndices();
+                    
+                    // Create initialized block encoding where all variables are "don't care".
+                    blockEncoding = std::vector<int>(static_cast<uint64_t>(internalDdManager.getCuddManager().ReadSize()), static_cast<int>(2));
+                }
+                
+                Partition<storm::dd::DdType::CUDD, ValueType> refine(Partition<storm::dd::DdType::CUDD, ValueType> const& oldPartition, Signature<storm::dd::DdType::CUDD, ValueType> const& signature) {
+                    storm::dd::Add<storm::dd::DdType::CUDD, ValueType> newPartitionAdd = refine(oldPartition, signature.getSignatureAdd());
+                    ++numberOfRefinements;
+                    return oldPartition.replacePartition(newPartitionAdd, nextFreeBlockIndex);
+                }
+                
+            private:
+                storm::dd::Add<storm::dd::DdType::CUDD, ValueType> refine(Partition<storm::dd::DdType::CUDD, ValueType> const& oldPartition, storm::dd::Add<storm::dd::DdType::CUDD, ValueType> const& signatureAdd) {
+                    STORM_LOG_ASSERT(oldPartition.storedAsAdd(), "Expecting partition to be stored as ADD for CUDD.");
+                    
+                    // Clear the caches.
+                    signatureCache.clear();
+                    reuseBlocksCache.clear();
+                    nextFreeBlockIndex = oldPartition.getNextFreeBlockIndex();
+
+                    // Perform the actual recursive refinement step.
+                    DdNodePtr result = refine(oldPartition.asAdd().getInternalAdd().getCuddDdNode(), signatureAdd.getInternalAdd().getCuddDdNode(), nondeterminismVariables.getInternalBdd().getCuddDdNode(), nonBlockVariables.getInternalBdd().getCuddDdNode());
+
+                    // Construct resulting ADD from the obtained node and the meta information.
+                    storm::dd::InternalAdd<storm::dd::DdType::CUDD, ValueType> internalNewPartitionAdd(&internalDdManager, cudd::ADD(internalDdManager.getCuddManager(), result));
+                    storm::dd::Add<storm::dd::DdType::CUDD, ValueType> newPartitionAdd(oldPartition.asAdd().getDdManager(), internalNewPartitionAdd, oldPartition.asAdd().getContainedMetaVariables());
+                    
+                    return newPartitionAdd;
+                }
+                
+                DdNodePtr encodeBlock(uint64_t blockIndex) {
+                    for (auto const& blockDdVariableIndex : blockDdVariableIndices) {
+                        blockEncoding[blockDdVariableIndex] = blockIndex & 1 ? 1 : 0;
+                        blockIndex >>= 1;
+                    }
+                    DdNodePtr bddEncoding = Cudd_CubeArrayToBdd(ddman, blockEncoding.data());
+                    Cudd_Ref(bddEncoding);
+                    DdNodePtr result = Cudd_BddToAdd(ddman, bddEncoding);
+                    Cudd_Ref(result);
+                    Cudd_RecursiveDeref(ddman, bddEncoding);
+                    Cudd_Deref(result);
+                    return result;
+                }
+                
+                DdNodePtr refine(DdNode* partitionNode, DdNode* signatureNode, DdNode* nondeterminismVariablesNode, DdNode* nonBlockVariablesNode) {
+                    // If we arrived at the constant zero node, then this was an illegal state encoding (we require
+                    // all states to be non-deadlock).
+                    if (partitionNode == Cudd_ReadZero(ddman)) {
+                        return partitionNode;
+                    }
+                    
+                    // Check the cache whether we have seen the same node before.
+                    auto nodePair = std::make_pair(signatureNode, partitionNode);
+                    auto it = signatureCache.find(nodePair);
+                    if (it != signatureCache.end()) {
+                        // If so, we return the corresponding result.
+                        return it->second;
+                    }
+                    
+                    // If there are no more non-block variables, we hit the signature.
+                    if (Cudd_IsConstant(nonBlockVariablesNode)) {
+                        // If this is the first time (in this traversal) that we encounter this signature, we check
+                        // whether we can assign the old block number to it.
+                        auto& reuseEntry = reuseBlocksCache[partitionNode];
+                        if (!reuseEntry.isReused()) {
+                            reuseEntry.setReused();
+                            signatureCache[nodePair] = partitionNode;
+                            return partitionNode;
+                        } else {
+                            DdNode* result = encodeBlock(nextFreeBlockIndex++);
+                            signatureCache[nodePair] = result;
+                            return result;
+                        }
+                    } else {
+                        // If there are more variables that belong to the non-block part of the encoding, we need to recursively descend.
+                        
+                        bool skippedBoth = true;
+                        DdNode* partitionThen;
+                        DdNode* partitionElse;
+                        DdNode* signatureThen;
+                        DdNode* signatureElse;
+                        short offset = 1;
+                        while (skippedBoth && !Cudd_IsConstant(nonBlockVariablesNode)) {
+                            // Remember an offset that indicates whether the top variable is a nondeterminism variable or not.
+                            offset = 1;
+                            if (!Cudd_IsConstant(nondeterminismVariablesNode) && Cudd_NodeReadIndex(nondeterminismVariablesNode) == Cudd_NodeReadIndex(nonBlockVariablesNode)) {
+                                offset = 0;
+                            }
+                            
+                            if (Cudd_NodeReadIndex(partitionNode) - offset == Cudd_NodeReadIndex(nonBlockVariablesNode)) {
+                                partitionThen = Cudd_T(partitionNode);
+                                partitionElse = Cudd_E(partitionNode);
+                                skippedBoth = false;
+                            } else {
+                                partitionThen = partitionElse = partitionNode;
+                            }
+                            
+                            if (Cudd_NodeReadIndex(signatureNode) == Cudd_NodeReadIndex(nonBlockVariablesNode)) {
+                                signatureThen = Cudd_T(signatureNode);
+                                signatureElse = Cudd_E(signatureNode);
+                                skippedBoth = false;
+                            } else {
+                                signatureThen = signatureElse = signatureNode;
+                            }
+                            
+                            // If both (signature and partition) skipped the next variable, we fast-forward.
+                            if (skippedBoth) {
+                                // If the current variable is a nondeterminism variable, we need to advance both variable sets otherwise just the non-block variables.
+                                nonBlockVariablesNode = Cudd_T(nonBlockVariablesNode);
+                                if (offset == 0) {
+                                    nondeterminismVariablesNode = Cudd_T(nondeterminismVariablesNode);
+                                }
+                            }
+                        }
+                        
+                        // If there are no more non-block variables remaining, make a recursive call to enter the base case.
+                        if (Cudd_IsConstant(nonBlockVariablesNode)) {
+                            return refine(partitionNode, signatureNode, nondeterminismVariablesNode, nonBlockVariablesNode);
+                        }
+                        
+                        DdNode* thenResult = refine(partitionThen, signatureThen, offset == 0 ? Cudd_T(nondeterminismVariablesNode) : nondeterminismVariablesNode, Cudd_T(nonBlockVariablesNode));
+                        Cudd_Ref(thenResult);
+                        DdNode* elseResult = refine(partitionElse, signatureElse, offset == 0 ? Cudd_T(nondeterminismVariablesNode) : nondeterminismVariablesNode, Cudd_T(nonBlockVariablesNode));
+                        Cudd_Ref(elseResult);
+
+                        DdNode* result;
+                        if (thenResult == elseResult) {
+                            Cudd_Deref(thenResult);
+                            Cudd_Deref(elseResult);
+                            result = thenResult;
+                        } else {
+                            // Get the node to connect the subresults.
+                            bool complemented = Cudd_IsComplement(thenResult);
+                            result = cuddUniqueInter(ddman, Cudd_NodeReadIndex(nonBlockVariablesNode) + offset, Cudd_Regular(thenResult), complemented ? Cudd_Not(elseResult) : elseResult);
+                            if (complemented) {
+                                result = Cudd_Not(result);
+                            }
+                            Cudd_Deref(thenResult);
+                            Cudd_Deref(elseResult);
+                        }
+                        
+                        // Store the result in the cache.
+                        signatureCache[nodePair] = result;
+                        
+                        return result;
+                    }
+                }
+                
+                storm::dd::DdManager<storm::dd::DdType::CUDD> const& manager;
+                storm::dd::InternalDdManager<storm::dd::DdType::CUDD> const& internalDdManager;
+                ::DdManager* ddman;
+                storm::expressions::Variable const& blockVariable;
+                
+                // The cubes representing all non-block and all nondeterminism variables, respectively.
+                storm::dd::Bdd<storm::dd::DdType::CUDD> nondeterminismVariables;
+                storm::dd::Bdd<storm::dd::DdType::CUDD> nonBlockVariables;
+                
+                // The indices of the DD variables associated with the block variable.
+                std::vector<uint64_t> blockDdVariableIndices;
+                
+                // A vector used for encoding block indices.
+                std::vector<int> blockEncoding;
+                
+                // The current number of blocks of the new partition.
+                uint64_t nextFreeBlockIndex;
+                
+                // The number of completed refinements.
+                uint64_t numberOfRefinements;
+                
+                // The number of nodes visited in the last refinement operation.
+                uint64_t lastNumberOfVisitedNodes;
+                
+                // The cache used to identify states with identical signature.
+                spp::sparse_hash_map<std::pair<DdNode const*, DdNode const*>, DdNode*, CuddPointerPairHash> signatureCache;
+                
+                // The cache used to identify which old block numbers have already been reused.
+                spp::sparse_hash_map<DdNode const*, ReuseWrapper> reuseBlocksCache;
+            };
+            
+            template<typename ValueType>
+            class InternalSignatureRefiner<storm::dd::DdType::Sylvan, ValueType> {
+            public:
+                InternalSignatureRefiner(storm::dd::DdManager<storm::dd::DdType::Sylvan> const& manager, storm::expressions::Variable const& blockVariable, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nondeterminismVariables, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& nonBlockVariables) : manager(manager), internalDdManager(manager.getInternalDdManager()), blockVariable(blockVariable), nondeterminismVariables(nondeterminismVariables), nonBlockVariables(nonBlockVariables), numberOfBlockVariables(manager.getMetaVariable(blockVariable).getNumberOfDdVariables()), blockCube(manager.getMetaVariable(blockVariable).getCube()), nextFreeBlockIndex(0), numberOfRefinements(0), signatureCache() {
+                    // Perform garbage collection to clean up stuff not needed anymore.
+                    LACE_ME;
+                    sylvan_gc();
+                }
+                
+                Partition<storm::dd::DdType::Sylvan, ValueType> refine(Partition<storm::dd::DdType::Sylvan, ValueType> const& oldPartition, Signature<storm::dd::DdType::Sylvan, ValueType> const& signature) {
+                    storm::dd::Bdd<storm::dd::DdType::Sylvan> newPartitionBdd = refine(oldPartition, signature.getSignatureAdd());
+                    return oldPartition.replacePartition(newPartitionBdd, nextFreeBlockIndex);
+                }
+                
+            private:
+                storm::dd::Bdd<storm::dd::DdType::Sylvan> refine(Partition<storm::dd::DdType::Sylvan, ValueType> const& oldPartition, storm::dd::Add<storm::dd::DdType::Sylvan, ValueType> const& signatureAdd) {
+                    STORM_LOG_ASSERT(oldPartition.storedAsBdd(), "Expecting partition to be stored as BDD for Sylvan.");
+
+                    LACE_ME;
+                    
+                    // Set up next refinement.
+                    ++numberOfRefinements;
+                    
+                    // Clear the caches.
+                    std::size_t oldSize = signatureCache.size();
+                    signatureCache.clear();
+                    signatureCache.reserve(3 * oldSize);
+                    reuseBlocksCache.clear();
+                    reuseBlocksCache.reserve(3 * oldPartition.getNumberOfBlocks());
+                    nextFreeBlockIndex = oldPartition.getNextFreeBlockIndex();
+
+                    // Clear performance counters.
+//                    signatureCacheLookups = 0;
+//                    signatureCacheHits = 0;
+//                    numberOfVisitedNodes = 0;
+//                    totalSignatureCacheLookupTime = std::chrono::high_resolution_clock::duration(0);
+//                    totalSignatureCacheStoreTime = std::chrono::high_resolution_clock::duration(0);
+//                    totalReuseBlocksLookupTime = std::chrono::high_resolution_clock::duration(0);
+//                    totalLevelLookupTime = std::chrono::high_resolution_clock::duration(0);
+//                    totalBlockEncodingTime = std::chrono::high_resolution_clock::duration(0);
+//                    totalMakeNodeTime = std::chrono::high_resolution_clock::duration(0);
+                    
+                    // Perform the actual recursive refinement step.
+                    BDD result = refine(oldPartition.asBdd().getInternalBdd().getSylvanBdd().GetBDD(), signatureAdd.getInternalAdd().getSylvanMtbdd().GetMTBDD(), nondeterminismVariables.getInternalBdd().getSylvanBdd().GetBDD(), nonBlockVariables.getInternalBdd().getSylvanBdd().GetBDD());
+
+                    // Construct resulting BDD from the obtained node and the meta information.
+                    storm::dd::InternalBdd<storm::dd::DdType::Sylvan> internalNewPartitionBdd(&internalDdManager, sylvan::Bdd(result));
+                    storm::dd::Bdd<storm::dd::DdType::Sylvan> newPartitionBdd(oldPartition.asBdd().getDdManager(), internalNewPartitionBdd, oldPartition.asBdd().getContainedMetaVariables());
+                    
+//                    // Display some statistics.
+//                    STORM_LOG_TRACE("Refinement visited " << numberOfVisitedNodes << " nodes.");
+//                    STORM_LOG_TRACE("Current #nodes in table: " << llmsset_count_marked(nodes) << " of " << llmsset_get_size(nodes) << ", cache: " << cache_getused() << " of " << cache_getsize() << ".");
+//                    STORM_LOG_TRACE("Signature cache hits: " << signatureCacheHits << ", misses: " << (signatureCacheLookups - signatureCacheHits) << ".");
+//                    STORM_LOG_TRACE("Signature cache lookup time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalSignatureCacheLookupTime).count() << "ms");
+//                    STORM_LOG_TRACE("Signature cache store time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalSignatureCacheStoreTime).count() << "ms");
+//                    STORM_LOG_TRACE("Signature cache total time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalSignatureCacheStoreTime + totalSignatureCacheLookupTime).count() << "ms");
+//                    STORM_LOG_TRACE("Reuse blocks lookup time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalReuseBlocksLookupTime).count() << "ms");
+//                    STORM_LOG_TRACE("Level lookup time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalLevelLookupTime).count() << "ms");
+//                    STORM_LOG_TRACE("Block encoding time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalBlockEncodingTime).count() << "ms");
+//                    STORM_LOG_TRACE("Make node time: " << std::chrono::duration_cast<std::chrono::milliseconds>(totalMakeNodeTime).count() << "ms");
+                    
+                    return newPartitionBdd;
+                }
+                
+                BDD encodeBlock(uint64_t blockIndex) {
+                    std::vector<uint8_t> e(numberOfBlockVariables);
+                    for (uint64_t i = 0; i < numberOfBlockVariables; ++i) {
+                        e[i] = blockIndex & 1 ? 1 : 0;
+                        blockIndex >>= 1;
+                    }
+                    return sylvan_cube(blockCube.getInternalBdd().getSylvanBdd().GetBDD(), e.data());
+                }
+                
+                BDD refine(BDD partitionNode, MTBDD signatureNode, BDD nondeterminismVariablesNode, BDD nonBlockVariablesNode) {
+                    LACE_ME;
+                    
+                    // If we arrived at the constant zero node, then this was an illegal state encoding (we require
+                    // all states to be non-deadlock).
+                    if (partitionNode == sylvan_false) {
+                        return partitionNode;
+                    }
+                    
+                    STORM_LOG_ASSERT(partitionNode != mtbdd_false, "Expected non-false node.");
+
+                    // Check the cache whether we have seen the same node before.
+                    auto nodePair = std::make_pair(signatureNode, partitionNode);
+                    auto it = signatureCache.find(nodePair);
+                    if (it != signatureCache.end()) {
+                        // If so, we return the corresponding result.
+                        return it->second;
+                    }
+                    
+                    sylvan_gc_test();
+                    
+                    // If there are no more non-block variables, we hit the signature.
+                    if (sylvan_isconst(nonBlockVariablesNode)) {
+                        // If this is the first time (in this traversal) that we encounter this signature, we check
+                        // whether we can assign the old block number to it.
+
+                        auto& reuseBlockEntry = reuseBlocksCache[partitionNode];
+                        if (!reuseBlockEntry.isReused()) {
+                            reuseBlockEntry.setReused();
+                            reuseBlocksCache.emplace(partitionNode, true);
+                            signatureCache[nodePair] = partitionNode;
+                            return partitionNode;
+                        } else {
+                            BDD result = encodeBlock(nextFreeBlockIndex++);
+                            signatureCache[nodePair] = result;
+                            return result;
+                        }
+                    } else {
+                        // If there are more variables that belong to the non-block part of the encoding, we need to recursively descend.
+                        
+                        bool skippedBoth = true;
+                        BDD partitionThen;
+                        BDD partitionElse;
+                        MTBDD signatureThen;
+                        MTBDD signatureElse;
+                        short offset = 1;
+                        while (skippedBoth && !sylvan_isconst(nonBlockVariablesNode)) {
+                            // Remember an offset that indicates whether the top variable is a nondeterminism variable or not.
+                            offset = 1;
+                            if (!sylvan_isconst(nondeterminismVariablesNode) && sylvan_var(nondeterminismVariablesNode) == sylvan_var(nonBlockVariablesNode)) {
+                                offset = 0;
+                            }
+
+                            if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(partitionNode, sylvan_var(nonBlockVariablesNode), -offset)) {
+                                partitionThen = sylvan_high(partitionNode);
+                                partitionElse = sylvan_low(partitionNode);
+                                skippedBoth = false;
+                            } else {
+                                partitionThen = partitionElse = partitionNode;
+                            }
+                            
+                            if (storm::dd::InternalAdd<storm::dd::DdType::Sylvan, ValueType>::matchesVariableIndex(signatureNode, sylvan_var(nonBlockVariablesNode))) {
+                                signatureThen = sylvan_high(signatureNode);
+                                signatureElse = sylvan_low(signatureNode);
+                                skippedBoth = false;
+                            } else {
+                                signatureThen = signatureElse = signatureNode;
+                            }
+                            
+                            // If both (signature and partition) skipped the next variable, we fast-forward.
+                            if (skippedBoth) {
+                                // If the current variable is a nondeterminism variable, we need to advance both variable sets otherwise just the non-block variables.
+                                nonBlockVariablesNode = sylvan_high(nonBlockVariablesNode);
+                                if (offset == 0) {
+                                    nondeterminismVariablesNode = sylvan_high(nondeterminismVariablesNode);
+                                }
+                            }
+                        }
+                        
+                        // If there are no more non-block variables remaining, make a recursive call to enter the base case.
+                        if (sylvan_isconst(nonBlockVariablesNode)) {
+                            return refine(partitionNode, signatureNode, nondeterminismVariablesNode, nonBlockVariablesNode);
+                        }
+                        
+                        BDD thenResult = refine(partitionThen, signatureThen, offset == 0 ? sylvan_high(nondeterminismVariablesNode) : nondeterminismVariablesNode, sylvan_high(nonBlockVariablesNode));
+                        bdd_refs_push(thenResult);
+                        BDD elseResult = refine(partitionElse, signatureElse, offset == 0 ? sylvan_high(nondeterminismVariablesNode) : nondeterminismVariablesNode, sylvan_high(nonBlockVariablesNode));
+                        bdd_refs_push(elseResult);
+                        
+                        BDD result;
+                        if (thenResult == elseResult) {
+                            result = thenResult;
+                        } else {
+                            // Get the node to connect the subresults.
+                            result = sylvan_makenode(sylvan_var(nonBlockVariablesNode) + offset, elseResult, thenResult);
+                        }
+                        
+                        // Dispose of the intermediate results.
+                        bdd_refs_pop(2);
+                        
+                        // Store the result in the cache.
+                        signatureCache[nodePair] = result;
+                        
+                        return result;
+                    }
+                }
+                
+                storm::dd::DdManager<storm::dd::DdType::Sylvan> const& manager;
+                storm::dd::InternalDdManager<storm::dd::DdType::Sylvan> const& internalDdManager;
+                storm::expressions::Variable const& blockVariable;
+
+                storm::dd::Bdd<storm::dd::DdType::Sylvan> nondeterminismVariables;
+                storm::dd::Bdd<storm::dd::DdType::Sylvan> nonBlockVariables;
+                
+                uint64_t numberOfBlockVariables;
+                
+                storm::dd::Bdd<storm::dd::DdType::Sylvan> blockCube;
+                
+                // The current number of blocks of the new partition.
+                uint64_t nextFreeBlockIndex;
+                
+                // The number of completed refinements.
+                uint64_t numberOfRefinements;
+                
+                // The cache used to identify states with identical signature.
+                spp::sparse_hash_map<std::pair<MTBDD, MTBDD>, MTBDD, SylvanMTBDDPairHash> signatureCache;
+                
+                // The cache used to identify which old block numbers have already been reused.
+                spp::sparse_hash_map<MTBDD, ReuseWrapper> reuseBlocksCache;
+                
+                // Performance counters.
+//                uint64_t signatureCacheLookups;
+//                uint64_t signatureCacheHits;
+//                uint64_t numberOfVisitedNodes;
+//                std::chrono::high_resolution_clock::duration totalSignatureCacheLookupTime;
+//                std::chrono::high_resolution_clock::duration totalSignatureCacheStoreTime;
+//                std::chrono::high_resolution_clock::duration totalReuseBlocksLookupTime;
+//                std::chrono::high_resolution_clock::duration totalLevelLookupTime;
+//                std::chrono::high_resolution_clock::duration totalBlockEncodingTime;
+//                std::chrono::high_resolution_clock::duration totalMakeNodeTime;
+                
+            };
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            SignatureRefiner<DdType, ValueType>::SignatureRefiner(storm::dd::DdManager<DdType> const& manager, storm::expressions::Variable const& blockVariable, std::set<storm::expressions::Variable> const& stateVariables, std::set<storm::expressions::Variable> const& nondeterminismVariables) : manager(&manager), stateVariables(stateVariables) {
+                
+                storm::dd::Bdd<DdType> nonBlockVariablesCube = manager.getBddOne();
+                storm::dd::Bdd<DdType> nondeterminismVariablesCube = manager.getBddOne();
+                for (auto const& var : nondeterminismVariables) {
+                    auto cube = manager.getMetaVariable(var).getCube();
+                    nonBlockVariablesCube &= cube;
+                    nondeterminismVariablesCube &= cube;
+                }
+                for (auto const& var : stateVariables) {
+                    auto cube = manager.getMetaVariable(var).getCube();
+                    nonBlockVariablesCube &= cube;
+                }
+                
+                internalRefiner = std::make_unique<InternalSignatureRefiner<DdType, ValueType>>(manager, blockVariable, nondeterminismVariablesCube, nonBlockVariablesCube);
+            }
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            Partition<DdType, ValueType> SignatureRefiner<DdType, ValueType>::refine(Partition<DdType, ValueType> const& oldPartition, Signature<DdType, ValueType> const& signature) {
+                Partition<DdType, ValueType> result = internalRefiner->refine(oldPartition, signature);
+                return result;
+            }
+            
+            template class SignatureRefiner<storm::dd::DdType::CUDD, double>;
+            
+            template class SignatureRefiner<storm::dd::DdType::Sylvan, double>;
+            template class SignatureRefiner<storm::dd::DdType::Sylvan, storm::RationalNumber>;
+            template class SignatureRefiner<storm::dd::DdType::Sylvan, storm::RationalFunction>;
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/SignatureRefiner.h b/src/storm/storage/dd/bisimulation/SignatureRefiner.h
new file mode 100644
index 000000000..72498152c
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/SignatureRefiner.h
@@ -0,0 +1,37 @@
+#pragma once
+
+#include <memory>
+
+#include "storm/storage/dd/DdType.h"
+
+#include "storm/storage/dd/bisimulation/Partition.h"
+#include "storm/storage/dd/bisimulation/Signature.h"
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+
+            template<storm::dd::DdType DdType, typename ValueType>
+            class InternalSignatureRefiner;
+            
+            template<storm::dd::DdType DdType, typename ValueType>
+            class SignatureRefiner {
+            public:
+                SignatureRefiner(storm::dd::DdManager<DdType> const& manager, storm::expressions::Variable const& blockVariable, std::set<storm::expressions::Variable> const& stateVariables, std::set<storm::expressions::Variable> const& nondeterminismVariables = std::set<storm::expressions::Variable>());
+                
+                Partition<DdType, ValueType> refine(Partition<DdType, ValueType> const& oldPartition, Signature<DdType, ValueType> const& signature);
+
+            private:
+                // The manager responsible for the DDs.
+                storm::dd::DdManager<DdType> const* manager;
+                
+                // The variables encodin the states.
+                std::set<storm::expressions::Variable> stateVariables;
+                
+                // The internal refiner.
+                std::shared_ptr<InternalSignatureRefiner<DdType, ValueType>> internalRefiner;
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/bisimulation/Status.h b/src/storm/storage/dd/bisimulation/Status.h
new file mode 100644
index 000000000..532f61d92
--- /dev/null
+++ b/src/storm/storage/dd/bisimulation/Status.h
@@ -0,0 +1,13 @@
+#pragma once
+
+namespace storm {
+    namespace dd {
+        namespace bisimulation {
+ 
+            enum class Status {
+                Initialized, InComputation, FixedPoint
+            };
+            
+        }
+    }
+}
diff --git a/src/storm/storage/dd/cudd/InternalCuddAdd.cpp b/src/storm/storage/dd/cudd/InternalCuddAdd.cpp
index d02222df0..187968f23 100644
--- a/src/storm/storage/dd/cudd/InternalCuddAdd.cpp
+++ b/src/storm/storage/dd/cudd/InternalCuddAdd.cpp
@@ -209,9 +209,16 @@ namespace storm {
                 summationAdds.push_back(ddVariable.toAdd<ValueType>().getCuddAdd());
             }
             
+//            return InternalAdd<DdType::CUDD, ValueType>(ddManager, this->getCuddAdd().TimesPlus(otherMatrix.getCuddAdd(), summationAdds));
+//            return InternalAdd<DdType::CUDD, ValueType>(ddManager, this->getCuddAdd().Triangle(otherMatrix.getCuddAdd(), summationAdds));
             return InternalAdd<DdType::CUDD, ValueType>(ddManager, this->getCuddAdd().MatrixMultiply(otherMatrix.getCuddAdd(), summationAdds));
         }
         
+        template<typename ValueType>
+        InternalAdd<DdType::CUDD, ValueType> InternalAdd<DdType::CUDD, ValueType>::multiplyMatrix(InternalBdd<DdType::CUDD> const& otherMatrix, std::vector<InternalBdd<DdType::CUDD>> const& summationDdVariables) const {
+            return this->multiplyMatrix(otherMatrix.template toAdd<ValueType>(), summationDdVariables);
+        }
+        
         template<typename ValueType>
         InternalBdd<DdType::CUDD> InternalAdd<DdType::CUDD, ValueType>::greater(ValueType const& value) const {
             return InternalBdd<DdType::CUDD>(ddManager, this->getCuddAdd().BddStrictThreshold(value));
@@ -315,7 +322,7 @@ namespace storm {
         }
         
         template<typename ValueType>
-        void InternalAdd<DdType::CUDD, ValueType>::exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const {
+        void InternalAdd<DdType::CUDD, ValueType>::exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible) const {
             // Build the name input of the DD.
             std::vector<char*> ddNames;
             std::string ddName("f");
@@ -332,7 +339,11 @@ namespace storm {
             // Open the file, dump the DD and close it again.
             FILE* filePointer = fopen(filename.c_str() , "w");
             std::vector<cudd::ADD> cuddAddVector = { this->getCuddAdd() };
-            ddManager->getCuddManager().DumpDot(cuddAddVector, &ddVariableNames[0], &ddNames[0], filePointer);
+            if (showVariablesIfPossible) {
+                ddManager->getCuddManager().DumpDot(cuddAddVector, ddVariableNames.data(), &ddNames[0], filePointer);
+            } else {
+                ddManager->getCuddManager().DumpDot(cuddAddVector, nullptr, &ddNames[0], filePointer);
+            }
             fclose(filePointer);
             
             // Finally, delete the names.
@@ -425,6 +436,11 @@ namespace storm {
             }
         }
 
+        template<typename ValueType>
+        InternalDdManager<DdType::CUDD> const& InternalAdd<DdType::CUDD, ValueType>::getInternalDdManager() const {
+            return *ddManager;
+        }
+        
         template<typename ValueType>
         void InternalAdd<DdType::CUDD, ValueType>::composeWithExplicitVector(storm::dd::Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const {
             composeWithExplicitVectorRec(this->getCuddDdNode(), nullptr, 0, ddVariableIndices.size(), 0, odd, ddVariableIndices, targetVector, function);
diff --git a/src/storm/storage/dd/cudd/InternalCuddAdd.h b/src/storm/storage/dd/cudd/InternalCuddAdd.h
index 360a7b0f9..cc619cd11 100644
--- a/src/storm/storage/dd/cudd/InternalCuddAdd.h
+++ b/src/storm/storage/dd/cudd/InternalCuddAdd.h
@@ -38,12 +38,19 @@ namespace storm {
         
         template<DdType LibraryType, typename ValueType>
         class AddIterator;
+
+        namespace bisimulation {
+            template<DdType LibraryType, typename ValueType>
+            class InternalSignatureRefiner;
+        }
         
         template<typename ValueType>
         class InternalAdd<DdType::CUDD, ValueType> {
         public:
             friend class InternalBdd<DdType::CUDD>;
             
+            friend class bisimulation::InternalSignatureRefiner<DdType::CUDD, ValueType>;
+            
             /*!
              * Creates an ADD that encapsulates the given CUDD ADD.
              *
@@ -321,7 +328,17 @@ namespace storm {
              * @return An ADD representing the result of the matrix-matrix multiplication.
              */
             InternalAdd<DdType::CUDD, ValueType> multiplyMatrix(InternalAdd<DdType::CUDD, ValueType> const& otherMatrix, std::vector<InternalBdd<DdType::CUDD>> const& summationDdVariables) const;
-            
+
+            /*!
+             * Multiplies the current ADD (representing a matrix) with the given matrix by summing over the given meta
+             * variables.
+             *
+             * @param otherMatrix The matrix with which to multiply.
+             * @param summationDdVariables The DD variables (represented as ADDs) over which to sum.
+             * @return An ADD representing the result of the matrix-matrix multiplication.
+             */
+            InternalAdd<DdType::CUDD, ValueType> multiplyMatrix(InternalBdd<DdType::CUDD> const& otherMatrix, std::vector<InternalBdd<DdType::CUDD>> const& summationDdVariables) const;
+
             /*!
              * Computes a BDD that represents the function in which all assignments with a function value strictly
              * larger than the given value are mapped to one and all others to zero.
@@ -477,7 +494,7 @@ namespace storm {
              * @param filename The name of the file to which the DD is to be exported.
              * @param ddVariableNamesAsString The names of the DD variables to display in the dot file.
              */
-            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const;
+            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible = true) const;
             
             /*!
              * Retrieves an iterator that points to the first meta variable assignment with a non-zero function value.
@@ -576,7 +593,8 @@ namespace storm {
              */
             Odd createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const;
             
-        private:
+            InternalDdManager<DdType::CUDD> const& getInternalDdManager() const;
+            
             /*!
              * Retrieves the CUDD ADD object associated with this ADD.
              *
@@ -591,6 +609,7 @@ namespace storm {
              */
             DdNode* getCuddDdNode() const;
             
+        private:
             /*!
              * Performs a recursive step to perform the given function between the given DD-based vector and the given
              * explicit vector.
diff --git a/src/storm/storage/dd/cudd/InternalCuddBdd.cpp b/src/storm/storage/dd/cudd/InternalCuddBdd.cpp
index acdd67e63..87051bfc6 100644
--- a/src/storm/storage/dd/cudd/InternalCuddBdd.cpp
+++ b/src/storm/storage/dd/cudd/InternalCuddBdd.cpp
@@ -179,7 +179,7 @@ namespace storm {
             return static_cast<uint_fast64_t>(ddManager->getCuddManager().ReadPerm(this->getIndex()));
         }
         
-        void InternalBdd<DdType::CUDD>::exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const {
+        void InternalBdd<DdType::CUDD>::exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible) const {
             // Build the name input of the DD.
             std::vector<char*> ddNames;
             std::string ddName("f");
@@ -196,7 +196,11 @@ namespace storm {
             // Open the file, dump the DD and close it again.
             std::vector<cudd::BDD> cuddBddVector = { this->getCuddBdd() };
             FILE* filePointer = fopen(filename.c_str() , "w");
-            ddManager->getCuddManager().DumpDot(cuddBddVector, &ddVariableNames[0], &ddNames[0], filePointer);
+            if (showVariablesIfPossible) {
+                ddManager->getCuddManager().DumpDot(cuddBddVector, ddVariableNames.data(), &ddNames[0], filePointer);
+            } else {
+                ddManager->getCuddManager().DumpDot(cuddBddVector, nullptr, &ddNames[0], filePointer);
+            }
             fclose(filePointer);
             
             // Finally, delete the names.
@@ -313,10 +317,10 @@ namespace storm {
         
         Odd InternalBdd<DdType::CUDD>::createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const {
             // Prepare a unique table for each level that keeps the constructed ODD nodes unique.
-            std::vector<std::unordered_map<std::pair<DdNode const*, bool>, std::shared_ptr<Odd>, HashFunctor>> uniqueTableForLevels(ddVariableIndices.size() + 1);
+            std::vector<std::unordered_map<DdNode const*, std::shared_ptr<Odd>>> uniqueTableForLevels(ddVariableIndices.size() + 1);
             
             // Now construct the ODD structure from the BDD.
-            std::shared_ptr<Odd> rootOdd = createOddRec(Cudd_Regular(this->getCuddDdNode()), ddManager->getCuddManager(), 0, Cudd_IsComplement(this->getCuddDdNode()), ddVariableIndices.size(), ddVariableIndices, uniqueTableForLevels);
+            std::shared_ptr<Odd> rootOdd = createOddRec(this->getCuddDdNode(), ddManager->getCuddManager(), 0, ddVariableIndices.size(), ddVariableIndices, uniqueTableForLevels);
             
             // Return a copy of the root node to remove the shared_ptr encapsulation.
             return Odd(*rootOdd);
@@ -329,57 +333,44 @@ namespace storm {
             return result;
         }
         
-        std::shared_ptr<Odd> InternalBdd<DdType::CUDD>::createOddRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<std::pair<DdNode const*, bool>, std::shared_ptr<Odd>, HashFunctor>>& uniqueTableForLevels) {
+        std::shared_ptr<Odd> InternalBdd<DdType::CUDD>::createOddRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<DdNode const*, std::shared_ptr<Odd>>>& uniqueTableForLevels) {
             // Check whether the ODD for this node has already been computed (for this level) and if so, return this instead.
-            auto const& iterator = uniqueTableForLevels[currentLevel].find(std::make_pair(dd, complement));
-            if (iterator != uniqueTableForLevels[currentLevel].end()) {
-                return iterator->second;
+            auto it = uniqueTableForLevels[currentLevel].find(dd);
+            if (it != uniqueTableForLevels[currentLevel].end()) {
+                return it->second;
             } else {
                 // Otherwise, we need to recursively compute the ODD.
                 
                 // If we are already at the maximal level that is to be considered, we can simply create an Odd without
                 // successors
                 if (currentLevel == maxLevel) {
-                    uint_fast64_t elseOffset = 0;
-                    uint_fast64_t thenOffset = 0;
-                    
-                    // If the DD is not the zero leaf, then the then-offset is 1.
-                    if (dd != Cudd_ReadZero(manager.getManager())) {
-                        thenOffset = 1;
-                    }
-                    
-                    // If we need to complement the 'terminal' node, we need to negate its offset.
-                    if (complement) {
-                        thenOffset = 1 - thenOffset;
-                    }
-                    
-                    auto oddNode = std::make_shared<Odd>(nullptr, elseOffset, nullptr, thenOffset);
-                    uniqueTableForLevels[currentLevel].emplace(std::make_pair(dd, complement), oddNode);
+                    auto oddNode = std::make_shared<Odd>(nullptr, 0, nullptr, dd != Cudd_ReadLogicZero(manager.getManager()) ? 1 : 0);
+                    uniqueTableForLevels[currentLevel].emplace(dd, oddNode);
                     return oddNode;
                 } else if (ddVariableIndices[currentLevel] < Cudd_NodeReadIndex(dd)) {
                     // If we skipped the level in the DD, we compute the ODD just for the else-successor and use the same
                     // node for the then-successor as well.
-                    std::shared_ptr<Odd> elseNode = createOddRec(dd, manager, currentLevel + 1, complement, maxLevel, ddVariableIndices, uniqueTableForLevels);
+                    std::shared_ptr<Odd> elseNode = createOddRec(dd, manager, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
                     std::shared_ptr<Odd> thenNode = elseNode;
-                    uint_fast64_t totalOffset = elseNode->getElseOffset() + elseNode->getThenOffset();
                     
-                    auto oddNode = std::make_shared<Odd>(elseNode, totalOffset, thenNode, totalOffset);
-                    uniqueTableForLevels[currentLevel].emplace(std::make_pair(dd, complement), oddNode);
+                    auto oddNode = std::make_shared<Odd>(elseNode, elseNode->getTotalOffset(), thenNode, elseNode->getTotalOffset());
+                    uniqueTableForLevels[currentLevel].emplace(dd, oddNode);
                     return oddNode;
                 } else {
                     // Otherwise, we compute the ODDs for both the then- and else successors.
                     DdNode const* thenDdNode = Cudd_T_const(dd);
                     DdNode const* elseDdNode = Cudd_E_const(dd);
                     
-                    // Determine whether we have to evaluate the successors as if they were complemented.
-                    bool elseComplemented = Cudd_IsComplement(elseDdNode) ^ complement;
-                    bool thenComplemented = Cudd_IsComplement(thenDdNode) ^ complement;
+                    if (Cudd_IsComplement(dd)) {
+                        thenDdNode = Cudd_Not(thenDdNode);
+                        elseDdNode = Cudd_Not(elseDdNode);
+                    }
                     
-                    std::shared_ptr<Odd> elseNode = createOddRec(Cudd_Regular(elseDdNode), manager, currentLevel + 1, elseComplemented, maxLevel, ddVariableIndices, uniqueTableForLevels);
-                    std::shared_ptr<Odd> thenNode = createOddRec(Cudd_Regular(thenDdNode), manager, currentLevel + 1, thenComplemented, maxLevel, ddVariableIndices, uniqueTableForLevels);
+                    std::shared_ptr<Odd> elseNode = createOddRec(elseDdNode, manager, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
+                    std::shared_ptr<Odd> thenNode = createOddRec(thenDdNode, manager, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
                     
-                    auto oddNode = std::make_shared<Odd>(elseNode, elseNode->getElseOffset() + elseNode->getThenOffset(), thenNode, thenNode->getElseOffset() + thenNode->getThenOffset());
-                    uniqueTableForLevels[currentLevel].emplace(std::make_pair(dd, complement), oddNode);
+                    auto oddNode = std::make_shared<Odd>(elseNode, elseNode->getTotalOffset(), thenNode, thenNode->getTotalOffset());
+                    uniqueTableForLevels[currentLevel].emplace(dd, oddNode);
                     return oddNode;
                 }
             }
diff --git a/src/storm/storage/dd/cudd/InternalCuddBdd.h b/src/storm/storage/dd/cudd/InternalCuddBdd.h
index 3ca4f47d7..93c54c0c8 100644
--- a/src/storm/storage/dd/cudd/InternalCuddBdd.h
+++ b/src/storm/storage/dd/cudd/InternalCuddBdd.h
@@ -327,7 +327,7 @@ namespace storm {
              * @param filename The name of the file to which the BDD is to be exported.
              * @param ddVariableNamesAsStrings The names of the variables to display in the dot file.
              */
-            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const;
+            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible = true) const;
                         
             /*!
              * Converts a BDD to an equivalent ADD.
@@ -387,7 +387,6 @@ namespace storm {
             
             friend struct std::hash<storm::dd::InternalBdd<storm::dd::DdType::CUDD>>;
             
-        private:
             /*!
              * Retrieves the CUDD BDD object associated with this DD.
              *
@@ -402,6 +401,7 @@ namespace storm {
              */
             DdNode* getCuddDdNode() const;
             
+        private:
             /*!
              * Builds a BDD representing the values that make the given filter function evaluate to true.
              *
@@ -443,14 +443,13 @@ namespace storm {
              * @param dd The DD for which to build the ODD.
              * @param manager The manager responsible for the DD.
              * @param currentLevel The currently considered level in the DD.
-             * @param complement A flag indicating whether or not the given node is to be considered as complemented.
              * @param maxLevel The number of levels that need to be considered.
              * @param ddVariableIndices The (sorted) indices of all DD variables that need to be considered.
              * @param uniqueTableForLevels A vector of unique tables, one for each level to be considered, that keeps
              * ODD nodes for the same DD and level unique.
              * @return A pointer to the constructed ODD for the given arguments.
              */
-            static std::shared_ptr<Odd> createOddRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<std::pair<DdNode const*, bool>, std::shared_ptr<Odd>, HashFunctor>>& uniqueTableForLevels);
+            static std::shared_ptr<Odd> createOddRec(DdNode const* dd, cudd::Cudd const& manager, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<DdNode const*, std::shared_ptr<Odd>>>& uniqueTableForLevels);
             
             /*!
              * Adds the selected values the target vector.
diff --git a/src/storm/storage/dd/cudd/InternalCuddDdManager.cpp b/src/storm/storage/dd/cudd/InternalCuddDdManager.cpp
index 3f4986393..7dd5b75f6 100644
--- a/src/storm/storage/dd/cudd/InternalCuddDdManager.cpp
+++ b/src/storm/storage/dd/cudd/InternalCuddDdManager.cpp
@@ -3,15 +3,19 @@
 #include "storm/settings/SettingsManager.h"
 #include "storm/settings/modules/CuddSettings.h"
 
+#include "storm/exceptions/NotSupportedException.h"
+
 namespace storm {
     namespace dd {
         
         InternalDdManager<DdType::CUDD>::InternalDdManager() : cuddManager(), reorderingTechnique(CUDD_REORDER_NONE), numberOfDdVariables(0) {
             this->cuddManager.SetMaxMemory(static_cast<unsigned long>(storm::settings::getModule<storm::settings::modules::CuddSettings>().getMaximalMemory() * 1024ul * 1024ul));
-            this->cuddManager.SetEpsilon(storm::settings::getModule<storm::settings::modules::CuddSettings>().getConstantPrecision());
+            
+            auto const& settings = storm::settings::getModule<storm::settings::modules::CuddSettings>();
+            this->cuddManager.SetEpsilon(settings.getConstantPrecision());
             
             // Now set the selected reordering technique.
-            storm::settings::modules::CuddSettings::ReorderingTechnique reorderingTechniqueAsSetting = storm::settings::getModule<storm::settings::modules::CuddSettings>().getReorderingTechnique();
+            storm::settings::modules::CuddSettings::ReorderingTechnique reorderingTechniqueAsSetting = settings.getReorderingTechnique();
             switch (reorderingTechniqueAsSetting) {
                 case storm::settings::modules::CuddSettings::ReorderingTechnique::None: this->reorderingTechnique = CUDD_REORDER_NONE; break;
                 case storm::settings::modules::CuddSettings::ReorderingTechnique::Random: this->reorderingTechnique = CUDD_REORDER_RANDOM; break;
@@ -32,6 +36,8 @@ namespace storm {
                 case storm::settings::modules::CuddSettings::ReorderingTechnique::Genetic: this->reorderingTechnique = CUDD_REORDER_GENETIC; break;
                 case storm::settings::modules::CuddSettings::ReorderingTechnique::Exact: this->reorderingTechnique = CUDD_REORDER_EXACT; break;
             }
+            
+            this->allowDynamicReordering(settings.isReorderingEnabled());
         }
         
         InternalDdManager<DdType::CUDD>::~InternalDdManager() {
@@ -51,11 +57,46 @@ namespace storm {
             return InternalBdd<DdType::CUDD>(this, cuddManager.bddZero());
         }
         
+        InternalBdd<DdType::CUDD> InternalDdManager<DdType::CUDD>::getBddEncodingLessOrEqualThan(uint64_t bound, InternalBdd<DdType::CUDD> const& cube, uint64_t numberOfDdVariables) const {
+            return InternalBdd<DdType::CUDD>(this, cudd::BDD(cuddManager, this->getBddEncodingLessOrEqualThanRec(0, (1ull << numberOfDdVariables) - 1, bound, cube.getCuddDdNode(), numberOfDdVariables)));
+        }
+        
+        DdNodePtr InternalDdManager<DdType::CUDD>::getBddEncodingLessOrEqualThanRec(uint64_t minimalValue, uint64_t maximalValue, uint64_t bound, DdNodePtr cube, uint64_t remainingDdVariables) const {
+            if (maximalValue <= bound) {
+                return Cudd_ReadOne(cuddManager.getManager());
+            } else if (minimalValue > bound) {
+                return Cudd_ReadLogicZero(cuddManager.getManager());
+            }
+            
+            STORM_LOG_ASSERT(remainingDdVariables > 0, "Expected more remaining DD variables.");
+            STORM_LOG_ASSERT(!Cudd_IsConstant(cube), "Expected non-constant cube.");
+            uint64_t newRemainingDdVariables = remainingDdVariables - 1;
+            DdNodePtr elseResult = getBddEncodingLessOrEqualThanRec(minimalValue, maximalValue & ~(1ull << newRemainingDdVariables), bound, Cudd_T(cube), newRemainingDdVariables);
+            Cudd_Ref(elseResult);
+            DdNodePtr thenResult = getBddEncodingLessOrEqualThanRec(minimalValue | (1ull << newRemainingDdVariables), maximalValue, bound, Cudd_T(cube), newRemainingDdVariables);
+            Cudd_Ref(thenResult);
+            STORM_LOG_ASSERT(thenResult != elseResult, "Expected different results.");
+            
+            bool complemented = Cudd_IsComplement(thenResult);
+            DdNodePtr result = cuddUniqueInter(cuddManager.getManager(), Cudd_NodeReadIndex(cube), Cudd_Regular(thenResult), complemented ? Cudd_Not(elseResult) : elseResult);
+            if (complemented) {
+                result = Cudd_Not(result);
+            }
+            Cudd_Deref(thenResult);
+            Cudd_Deref(elseResult);
+            return result;
+        }
+        
         template<typename ValueType>
         InternalAdd<DdType::CUDD, ValueType> InternalDdManager<DdType::CUDD>::getAddZero() const {
             return InternalAdd<DdType::CUDD, ValueType>(this, cuddManager.addZero());
         }
-        
+
+        template<typename ValueType>
+        InternalAdd<DdType::CUDD, ValueType> InternalDdManager<DdType::CUDD>::getAddUndefined() const {
+            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Undefined values are not supported by CUDD.");
+        }
+
         template<typename ValueType>
         InternalAdd<DdType::CUDD, ValueType> InternalDdManager<DdType::CUDD>::getConstant(ValueType const& value) const {
             return InternalAdd<DdType::CUDD, ValueType>(this, cuddManager.constant(value));
@@ -74,7 +115,9 @@ namespace storm {
                 }
             }
             
-            // Connect the two variables so they are not 'torn apart' during dynamic reordering.
+            // Connect the variables so they are not 'torn apart' by dynamic reordering.
+            // Note that MTR_FIXED preserves the order of the layers. While this is not always necessary to preserve,
+            // (for example) the hybrid engine relies on this connection, so we choose MTR_FIXED instead of MTR_DEFAULT.
             cuddManager.MakeTreeNode(result.front().getIndex(), numberOfLayers, MTR_FIXED);
             
             // Keep track of the number of variables.
@@ -104,6 +147,11 @@ namespace storm {
             this->getCuddManager().ReduceHeap(this->reorderingTechnique, 0);
         }
         
+        void InternalDdManager<DdType::CUDD>::debugCheck() const {
+            this->getCuddManager().CheckKeys();
+            this->getCuddManager().DebugCheck();
+        }
+        
         cudd::Cudd& InternalDdManager<DdType::CUDD>::getCuddManager() {
             return cuddManager;
         }
diff --git a/src/storm/storage/dd/cudd/InternalCuddDdManager.h b/src/storm/storage/dd/cudd/InternalCuddDdManager.h
index 17d715206..3b1c74e16 100644
--- a/src/storm/storage/dd/cudd/InternalCuddDdManager.h
+++ b/src/storm/storage/dd/cudd/InternalCuddDdManager.h
@@ -59,6 +59,13 @@ namespace storm {
              */
             InternalBdd<DdType::CUDD> getBddZero() const;
             
+            /*!
+             * Retrieves a BDD that maps to true iff the encoding is less or equal than the given bound.
+             *
+             * @return A BDD with encodings corresponding to values less or equal than the bound.
+             */
+            InternalBdd<DdType::CUDD> getBddEncodingLessOrEqualThan(uint64_t bound, InternalBdd<DdType::CUDD> const& cube, uint64_t numberOfDdVariables) const;
+            
             /*!
              * Retrieves an ADD representing the constant zero function.
              *
@@ -66,7 +73,15 @@ namespace storm {
              */
             template<typename ValueType>
             InternalAdd<DdType::CUDD, ValueType> getAddZero() const;
-            
+
+            /*!
+             * Retrieves an ADD representing an undefined value.
+             *
+             * @return An ADD representing an undefined value.
+             */
+            template<typename ValueType>
+            InternalAdd<DdType::CUDD, ValueType> getAddUndefined() const;
+
             /*!
              * Retrieves an ADD representing the constant function with the given value.
              *
@@ -111,6 +126,11 @@ namespace storm {
              */
             void triggerReordering();
             
+            /*!
+             * Performs a debug check if available.
+             */
+            void debugCheck() const;
+            
             /*!
              * Retrieves the number of DD variables managed by this manager.
              *
@@ -118,20 +138,23 @@ namespace storm {
              */
             uint_fast64_t getNumberOfDdVariables() const;
 
-        private:
             /*!
              * Retrieves the underlying CUDD manager.
              *
              * @return The underlying CUDD manager.
              */
             cudd::Cudd& getCuddManager();
-            
+
             /*!
              * Retrieves the underlying CUDD manager.
              *
              * @return The underlying CUDD manager.
              */
             cudd::Cudd const& getCuddManager() const;
+
+        private:
+            // Helper function to create the BDD whose encodings are below a given bound.
+            DdNodePtr getBddEncodingLessOrEqualThanRec(uint64_t minimalValue, uint64_t maximalValue, uint64_t bound, DdNodePtr cube, uint64_t remainingDdVariables) const;
             
             // The manager responsible for the DDs created/modified with this DdManager.
             cudd::Cudd cuddManager;
diff --git a/src/storm/storage/dd/cudd/utility.h b/src/storm/storage/dd/cudd/utility.h
new file mode 100644
index 000000000..3b7935890
--- /dev/null
+++ b/src/storm/storage/dd/cudd/utility.h
@@ -0,0 +1,21 @@
+#pragma once
+
+#include <boost/functional/hash.hpp>
+
+// Include the C++-interface of CUDD.
+#include "cuddObj.hh"
+
+namespace storm {
+    namespace dd {
+        
+        struct CuddPointerPairHash {
+            std::size_t operator()(std::pair<DdNode const*, DdNode const*> const& pair) const {
+                std::hash<DdNode const*> hasher;
+                std::size_t seed = hasher(pair.first);
+                boost::hash_combine(seed, hasher(pair.second));
+                return seed;
+            }
+        };
+        
+    }
+}
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanAdd.cpp b/src/storm/storage/dd/sylvan/InternalSylvanAdd.cpp
index dfa171eef..46167f116 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanAdd.cpp
+++ b/src/storm/storage/dd/sylvan/InternalSylvanAdd.cpp
@@ -81,6 +81,11 @@ namespace storm {
         }
 #endif
         
+        template<typename ValueType>
+        bool InternalAdd<DdType::Sylvan, ValueType>::matchesVariableIndex(MTBDD const& node, uint64_t variableIndex, int64_t offset) {
+            return !mtbdd_isleaf(node) && static_cast<uint64_t>(sylvan_var(node) + offset) == variableIndex;
+        }
+        
         template<typename ValueType>
         bool InternalAdd<DdType::Sylvan, ValueType>::operator==(InternalAdd<DdType::Sylvan, ValueType> const& other) const {
             return this->sylvanMtbdd == other.sylvanMtbdd;
@@ -581,6 +586,18 @@ namespace storm {
             
             return InternalAdd<DdType::Sylvan, ValueType>(ddManager, this->sylvanMtbdd.AndExists(otherMatrix.sylvanMtbdd, summationVariables.getSylvanBdd()));
         }
+        
+#ifdef STORM_HAVE_CARL
+        template<>
+        InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalAdd<DdType::Sylvan, storm::RationalFunction>::multiplyMatrix(InternalAdd<DdType::Sylvan, storm::RationalFunction> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
+            InternalBdd<DdType::Sylvan> summationVariables = ddManager->getBddOne();
+            for (auto const& ddVariable : summationDdVariables) {
+                summationVariables &= ddVariable;
+            }
+            
+            return InternalAdd<DdType::Sylvan, storm::RationalFunction>(ddManager, this->sylvanMtbdd.AndExistsRF(otherMatrix.sylvanMtbdd, summationVariables.getSylvanBdd()));
+        }
+#endif
 
         template<>
         InternalAdd<DdType::Sylvan, storm::RationalNumber> InternalAdd<DdType::Sylvan, storm::RationalNumber>::multiplyMatrix(InternalAdd<DdType::Sylvan, storm::RationalNumber> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
@@ -591,19 +608,39 @@ namespace storm {
             
             return InternalAdd<DdType::Sylvan, storm::RationalNumber>(ddManager, this->sylvanMtbdd.AndExistsRN(otherMatrix.sylvanMtbdd, summationVariables.getSylvanBdd()));
         }
-        
+
+        template<typename ValueType>
+        InternalAdd<DdType::Sylvan, ValueType> InternalAdd<DdType::Sylvan, ValueType>::multiplyMatrix(InternalBdd<DdType::Sylvan> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
+            InternalBdd<DdType::Sylvan> summationVariables = ddManager->getBddOne();
+            for (auto const& ddVariable : summationDdVariables) {
+                summationVariables &= ddVariable;
+            }
+            
+            return InternalAdd<DdType::Sylvan, ValueType>(ddManager, this->sylvanMtbdd.AndExists(sylvan::Bdd(otherMatrix.getSylvanBdd().GetBDD()), summationVariables.getSylvanBdd()));
+        }
+
 #ifdef STORM_HAVE_CARL
         template<>
-        InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalAdd<DdType::Sylvan, storm::RationalFunction>::multiplyMatrix(InternalAdd<DdType::Sylvan, storm::RationalFunction> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
+        InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalAdd<DdType::Sylvan, storm::RationalFunction>::multiplyMatrix(InternalBdd<DdType::Sylvan> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
             InternalBdd<DdType::Sylvan> summationVariables = ddManager->getBddOne();
             for (auto const& ddVariable : summationDdVariables) {
                 summationVariables &= ddVariable;
             }
             
-            return InternalAdd<DdType::Sylvan, storm::RationalFunction>(ddManager, this->sylvanMtbdd.AndExistsRF(otherMatrix.sylvanMtbdd, summationVariables.getSylvanBdd()));
+            return InternalAdd<DdType::Sylvan, storm::RationalFunction>(ddManager, this->sylvanMtbdd.AndExistsRF(sylvan::Bdd(otherMatrix.getSylvanBdd().GetBDD()), summationVariables.getSylvanBdd()));
         }
 #endif
-        
+
+        template<>
+        InternalAdd<DdType::Sylvan, storm::RationalNumber> InternalAdd<DdType::Sylvan, storm::RationalNumber>::multiplyMatrix(InternalBdd<DdType::Sylvan> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const {
+            InternalBdd<DdType::Sylvan> summationVariables = ddManager->getBddOne();
+            for (auto const& ddVariable : summationDdVariables) {
+                summationVariables &= ddVariable;
+            }
+            
+            return InternalAdd<DdType::Sylvan, storm::RationalNumber>(ddManager, this->sylvanMtbdd.AndExistsRN(sylvan::Bdd(otherMatrix.getSylvanBdd().GetBDD()), summationVariables.getSylvanBdd()));
+        }
+
         template<typename ValueType>
         InternalBdd<DdType::Sylvan> InternalAdd<DdType::Sylvan, ValueType>::greater(ValueType const& value) const {
             return InternalBdd<DdType::Sylvan>(ddManager, this->sylvanMtbdd.BddStrictThreshold(value));
@@ -751,7 +788,7 @@ namespace storm {
         }
         
         template<typename ValueType>
-        void InternalAdd<DdType::Sylvan, ValueType>::exportToDot(std::string const& filename, std::vector<std::string> const&) const {
+        void InternalAdd<DdType::Sylvan, ValueType>::exportToDot(std::string const& filename, std::vector<std::string> const&, bool) const {
             // Open the file, dump the DD and close it again.
             FILE* filePointer = fopen(filename.c_str() , "w");
             this->sylvanMtbdd.PrintDot(filePointer);
@@ -828,6 +865,11 @@ namespace storm {
             }
         }
         
+        template<typename ValueType>
+        InternalDdManager<DdType::Sylvan> const& InternalAdd<DdType::Sylvan, ValueType>::getInternalDdManager() const {
+            return *ddManager;
+        }
+        
         template<typename ValueType>
         void InternalAdd<DdType::Sylvan, ValueType>::composeWithExplicitVector(storm::dd::Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const {
             composeWithExplicitVectorRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), mtbdd_hascomp(this->getSylvanMtbdd().GetMTBDD()), nullptr, 0, ddVariableIndices.size(), 0, odd, ddVariableIndices, targetVector, function);
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanAdd.h b/src/storm/storage/dd/sylvan/InternalSylvanAdd.h
index cb11bcb5b..201048b6f 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanAdd.h
+++ b/src/storm/storage/dd/sylvan/InternalSylvanAdd.h
@@ -331,7 +331,17 @@ namespace storm {
              * @return An ADD representing the result of the matrix-matrix multiplication.
              */
             InternalAdd<DdType::Sylvan, ValueType> multiplyMatrix(InternalAdd<DdType::Sylvan, ValueType> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const;
-            
+
+            /*!
+             * Multiplies the current ADD (representing a matrix) with the given matrix by summing over the given meta
+             * variables.
+             *
+             * @param otherMatrix The matrix with which to multiply.
+             * @param summationDdVariables The DD variables (represented as ADDs) over which to sum.
+             * @return An ADD representing the result of the matrix-matrix multiplication.
+             */
+            InternalAdd<DdType::Sylvan, ValueType> multiplyMatrix(InternalBdd<DdType::Sylvan> const& otherMatrix, std::vector<InternalBdd<DdType::Sylvan>> const& summationDdVariables) const;
+
             /*!
              * Computes a BDD that represents the function in which all assignments with a function value strictly
              * larger than the given value are mapped to one and all others to zero.
@@ -487,7 +497,7 @@ namespace storm {
              * @param filename The name of the file to which the DD is to be exported.
              * @param ddVariableNamesAsString The names of the DD variables to display in the dot file.
              */
-            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const;
+            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible = true) const;
             
             /*!
              * Retrieves an iterator that points to the first meta variable assignment with a non-zero function value.
@@ -586,6 +596,32 @@ namespace storm {
              */
             Odd createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const;
             
+            InternalDdManager<DdType::Sylvan> const& getInternalDdManager() const;
+
+            /*!
+             * Retrieves the underlying sylvan MTBDD.
+             *
+             * @return The sylvan MTBDD.
+             */
+            sylvan::Mtbdd getSylvanMtbdd() const;
+
+            /*!
+             * Retrieves the value of the given node (that must be a leaf).
+             *
+             * @return The value of the leaf.
+             */
+            static ValueType getValue(MTBDD const& node);
+
+            /*!
+             * Retrieves whether the topmost variable in the MTBDD is the one with the given index.
+             *
+             * @param The top node of the MTBDD.
+             * @param variableIndex The variable index.
+             * @param offset An offset that is applied to the index of the top variable in the MTBDD.
+             * @return True iff the MTBDD's top variable has the given index.
+             */
+            static bool matchesVariableIndex(MTBDD const& node, uint64_t variableIndex, int64_t offset = 0);
+            
         private:
             /*!
              * Recursively builds the ODD from an ADD.
@@ -714,20 +750,6 @@ namespace storm {
 			static MTBDD getLeaf(storm::RationalFunction const& value);
 #endif
             
-            /*!
-             * Retrieves the value of the given node (that must be a leaf).
-             *
-             * @return The value of the leaf.
-             */
-            static ValueType getValue(MTBDD const& node);
-            
-            /*!
-             * Retrieves the underlying sylvan MTBDD.
-             *
-             * @return The sylvan MTBDD.
-             */
-            sylvan::Mtbdd getSylvanMtbdd() const;
-            
             // The manager responsible for this MTBDD.
             InternalDdManager<DdType::Sylvan> const* ddManager;
             
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp b/src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp
index 83a7f26b7..00e12dfdb 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp
+++ b/src/storm/storage/dd/sylvan/InternalSylvanBdd.cpp
@@ -213,7 +213,7 @@ namespace storm {
         
         uint_fast64_t InternalBdd<DdType::Sylvan>::getNodeCount() const {
             // We have to add one to also count the false-leaf, which is the only leaf appearing in BDDs.
-            return static_cast<uint_fast64_t>(this->sylvanBdd.NodeCount()) + 1;
+            return static_cast<uint_fast64_t>(this->sylvanBdd.NodeCount());
         }
         
         bool InternalBdd<DdType::Sylvan>::isOne() const {
@@ -232,7 +232,7 @@ namespace storm {
             return this->getIndex();
         }
         
-        void InternalBdd<DdType::Sylvan>::exportToDot(std::string const& filename, std::vector<std::string> const&) const {
+        void InternalBdd<DdType::Sylvan>::exportToDot(std::string const& filename, std::vector<std::string> const&, bool) const {
             FILE* filePointer = fopen(filename.c_str() , "w");
             this->sylvanBdd.PrintDot(filePointer);
             fclose(filePointer);
@@ -283,7 +283,7 @@ namespace storm {
             // If we are at the maximal level, the value to be set is stored as a constant in the DD.
             if (currentRowLevel == maxLevel) {
                 result.set(currentRowOffset, true);
-            } else if (ddRowVariableIndices[currentRowLevel] < sylvan_var(dd)) {
+            } else if (bdd_isterminal(dd) || ddRowVariableIndices[currentRowLevel] < sylvan_var(dd)) {
                 toVectorRec(dd, result, rowOdd.getElseSuccessor(), complement, currentRowLevel + 1, maxLevel, currentRowOffset, ddRowVariableIndices);
                 toVectorRec(dd, result, rowOdd.getThenSuccessor(), complement, currentRowLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), ddRowVariableIndices);
             } else {
@@ -390,7 +390,7 @@ namespace storm {
             
             if (currentLevel == maxLevel) {
                 result[currentIndex++] = values[currentOffset];
-            } else if (ddVariableIndices[currentLevel] < sylvan_var(dd)) {
+            } else if (bdd_isterminal(dd) || ddVariableIndices[currentLevel] < sylvan_var(dd)) {
                 // If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
                 // and for the one in which it is not set.
                 filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
@@ -424,7 +424,7 @@ namespace storm {
             
             if (currentLevel == maxLevel) {
                 result.set(currentIndex++, values.get(currentOffset));
-            } else if (ddVariableIndices[currentLevel] < sylvan_var(dd)) {
+            } else if (bdd_isterminal(dd) || ddVariableIndices[currentLevel] < sylvan_var(dd)) {
                 // If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
                 // and for the one in which it is not set.
                 filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
@@ -527,6 +527,10 @@ namespace storm {
             return newNodeVariable;
         }
         
+        bool InternalBdd<DdType::Sylvan>::matchesVariableIndex(BDD const& node, uint64_t variableIndex, int64_t offset) {
+            return !sylvan_isconst(node) && static_cast<uint64_t>(sylvan_var(node) + offset) == variableIndex;
+        }
+        
         template InternalAdd<DdType::Sylvan, double> InternalBdd<DdType::Sylvan>::toAdd() const;
         template InternalAdd<DdType::Sylvan, uint_fast64_t> InternalBdd<DdType::Sylvan>::toAdd() const;
         template InternalAdd<DdType::Sylvan, storm::RationalNumber> InternalBdd<DdType::Sylvan>::toAdd() const;
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanBdd.h b/src/storm/storage/dd/sylvan/InternalSylvanBdd.h
index e44cc10a7..6c07449c7 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanBdd.h
+++ b/src/storm/storage/dd/sylvan/InternalSylvanBdd.h
@@ -316,7 +316,7 @@ namespace storm {
              * @param filename The name of the file to which the BDD is to be exported.
              * @param ddVariableNamesAsStrings The names of the variables to display in the dot file.
              */
-            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings) const;
+            void exportToDot(std::string const& filename, std::vector<std::string> const& ddVariableNamesAsStrings, bool showVariablesIfPossible = true) const;
             
             /*!
              * Converts a BDD to an equivalent ADD.
@@ -374,8 +374,32 @@ namespace storm {
              */
             void filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, storm::storage::BitVector const& sourceValues, storm::storage::BitVector& targetValues) const;
 
+            /*!
+             * Retrieves whether the topmost variable in the BDD is the one with the given index.
+             *
+             * @param The top node of the BDD.
+             * @param variableIndex The variable index.
+             * @param offset An offset that is applied to the index of the top variable in the BDD.
+             * @return True iff the BDD's top variable has the given index.
+             */
+            static bool matchesVariableIndex(BDD const& node, uint64_t variableIndex, int64_t offset = 0);
+            
             friend struct std::hash<storm::dd::InternalBdd<storm::dd::DdType::Sylvan>>;
             
+            /*!
+             * Retrieves the sylvan BDD.
+             *
+             * @return The sylvan BDD.
+             */
+            sylvan::Bdd& getSylvanBdd();
+            
+            /*!
+             * Retrieves the sylvan BDD.
+             *
+             * @return The sylvan BDD.
+             */
+            sylvan::Bdd const& getSylvanBdd() const;
+            
         private:
             /*!
              * Builds a BDD representing the values that make the given filter function evaluate to true.
@@ -474,19 +498,6 @@ namespace storm {
              */
             static storm::expressions::Variable toExpressionRec(BDD dd, storm::expressions::ExpressionManager& manager, std::vector<storm::expressions::Expression>& expressions, std::unordered_map<uint_fast64_t, storm::expressions::Variable>& indexToVariableMap, std::unordered_map<std::pair<uint_fast64_t, uint_fast64_t>, storm::expressions::Variable>& countIndexToVariablePair, std::unordered_map<BDD, uint_fast64_t>& nodeToCounterMap, std::vector<uint_fast64_t>& nextCounterForIndex);
             
-            /*!
-             * Retrieves the sylvan BDD.
-             *
-             * @return The sylvan BDD.
-             */
-            sylvan::Bdd& getSylvanBdd();
-
-            /*!
-             * Retrieves the sylvan BDD.
-             *
-             * @return The sylvan BDD.
-             */
-            sylvan::Bdd const& getSylvanBdd() const;
             
             // The internal manager responsible for this BDD.
             InternalDdManager<DdType::Sylvan> const* ddManager;
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanDdManager.cpp b/src/storm/storage/dd/sylvan/InternalSylvanDdManager.cpp
index c026fbb1f..29f9a3208 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanDdManager.cpp
+++ b/src/storm/storage/dd/sylvan/InternalSylvanDdManager.cpp
@@ -9,6 +9,7 @@
 #include "storm/utility/constants.h"
 #include "storm/utility/macros.h"
 #include "storm/exceptions/NotSupportedException.h"
+#include "storm/exceptions/InvalidSettingsException.h"
 
 #include "storm/utility/sylvan.h"
 
@@ -16,6 +17,17 @@
 
 namespace storm {
     namespace dd {
+        
+#ifndef NDEBUG
+        VOID_TASK_0(gc_start) {
+            STORM_LOG_TRACE("Starting sylvan garbage collection...");
+        }
+        
+        VOID_TASK_0(gc_end) {
+            STORM_LOG_TRACE("Sylvan garbage collection done.");
+        }
+#endif
+        
         uint_fast64_t InternalDdManager<DdType::Sylvan>::numberOfInstances = 0;
         
         // It is important that the variable pairs start at an even offset, because sylvan assumes this to be true for
@@ -35,9 +47,9 @@ namespace storm {
             if (numberOfInstances == 0) {
                 storm::settings::modules::SylvanSettings const& settings = storm::settings::getModule<storm::settings::modules::SylvanSettings>();
                 if (settings.isNumberOfThreadsSet()) {
-                    lace_init(settings.getNumberOfThreads(), 1000000);
+                    lace_init(settings.getNumberOfThreads(), 1024*1024*16);
                 } else {
-                    lace_init(0, 1000000);
+                    lace_init(0, 1024*1024*16);
                 }
                 lace_startup(0, 0, 0);
                 
@@ -47,10 +59,29 @@ namespace storm {
                 // Compute the power of two that still fits within the total numbers to store.
                 uint_fast64_t powerOfTwo = findLargestPowerOfTwoFitting(totalNodesToStore);
                 
-                sylvan::Sylvan::initPackage(1ull << std::max(16ull, powerOfTwo > 24 ? powerOfTwo - 8 : 0ull), 1ull << (powerOfTwo - 1), 1ull << std::max(16ull, powerOfTwo > 24 ? powerOfTwo - 12 : 0ull), 1ull << (powerOfTwo - 1));
+                STORM_LOG_THROW(powerOfTwo >= 16, storm::exceptions::InvalidSettingsException, "Too little memory assigned to sylvan.");
+                
+                uint64_t maxTableSize = 1ull << powerOfTwo;
+                uint64_t maxCacheSize = 1ull << (powerOfTwo - 1);
+                if (maxTableSize + maxCacheSize > totalNodesToStore) {
+                    maxTableSize >>= 1;
+                }
+                
+                uint64_t initialTableSize = 1ull << std::max(powerOfTwo - 4, static_cast<uint_fast64_t>(16));
+                uint64_t initialCacheSize = initialTableSize;
+                
+                STORM_LOG_DEBUG("Initializing sylvan. Initial/max table size: " << initialTableSize << "/" << maxTableSize << ", initial/max cache size: " << initialCacheSize << "/" << maxCacheSize << ".");
+                sylvan::Sylvan::initPackage(initialTableSize, maxTableSize, initialCacheSize, maxCacheSize);
+
                 sylvan::Sylvan::initBdd();
                 sylvan::Sylvan::initMtbdd();
                 sylvan::Sylvan::initCustomMtbdd();
+                
+#ifndef NDEBUG
+                sylvan_gc_hook_pregc(TASK(gc_start));
+                sylvan_gc_hook_postgc(TASK(gc_end));
+#endif
+
             }
             ++numberOfInstances;
         }
@@ -98,6 +129,28 @@ namespace storm {
             return InternalBdd<DdType::Sylvan>(this, sylvan::Bdd::bddZero());
         }
         
+        InternalBdd<DdType::Sylvan> InternalDdManager<DdType::Sylvan>::getBddEncodingLessOrEqualThan(uint64_t bound, InternalBdd<DdType::Sylvan> const& cube, uint64_t numberOfDdVariables) const {
+            return InternalBdd<DdType::Sylvan>(this, sylvan::Bdd(this->getBddEncodingLessOrEqualThanRec(0, (1ull << numberOfDdVariables) - 1, bound, cube.getSylvanBdd().GetBDD(), numberOfDdVariables)));
+        }
+        
+        BDD InternalDdManager<DdType::Sylvan>::getBddEncodingLessOrEqualThanRec(uint64_t minimalValue, uint64_t maximalValue, uint64_t bound, BDD cube, uint64_t remainingDdVariables) const {
+            if (maximalValue <= bound) {
+                return sylvan_true;
+            } else if (minimalValue > bound) {
+                return sylvan_false;
+            }
+            
+            STORM_LOG_ASSERT(remainingDdVariables > 0, "Expected more remaining DD variables.");
+            uint64_t newRemainingDdVariables = remainingDdVariables - 1;
+            BDD elseResult = getBddEncodingLessOrEqualThanRec(minimalValue, maximalValue & ~(1ull << newRemainingDdVariables), bound, sylvan_high(cube), newRemainingDdVariables);
+            bdd_refs_push(elseResult);
+            BDD thenResult = getBddEncodingLessOrEqualThanRec(minimalValue | (1ull << newRemainingDdVariables), maximalValue, bound, sylvan_high(cube), newRemainingDdVariables);
+            bdd_refs_push(elseResult);
+            BDD result = sylvan_makenode(sylvan_var(cube), elseResult, thenResult);
+            bdd_refs_pop(2);
+            return result;
+        }
+        
         template<>
         InternalAdd<DdType::Sylvan, double> InternalDdManager<DdType::Sylvan>::getAddZero() const {
             return InternalAdd<DdType::Sylvan, double>(this, sylvan::Mtbdd::doubleTerminal(storm::utility::zero<double>()));
@@ -119,7 +172,12 @@ namespace storm {
 			return InternalAdd<DdType::Sylvan, storm::RationalFunction>(this, sylvan::Mtbdd::stormRationalFunctionTerminal(storm::utility::zero<storm::RationalFunction>()));
 		}
 #endif
-        
+
+        template<typename ValueType>
+        InternalAdd<DdType::Sylvan, ValueType> InternalDdManager<DdType::Sylvan>::getAddUndefined() const {
+            return InternalAdd<DdType::Sylvan, ValueType>(this, sylvan::Mtbdd(sylvan::Bdd::bddZero()));
+        }
+
         template<>
         InternalAdd<DdType::Sylvan, double> InternalDdManager<DdType::Sylvan>::getConstant(double const& value) const {
             return InternalAdd<DdType::Sylvan, double>(this, sylvan::Mtbdd::doubleTerminal(value));
@@ -171,6 +229,10 @@ namespace storm {
             STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation is not supported by sylvan.");
         }
         
+        void InternalDdManager<DdType::Sylvan>::debugCheck() const {
+            STORM_LOG_THROW(false, storm::exceptions::NotSupportedException, "Operation is not supported by sylvan.");
+        }
+        
         uint_fast64_t InternalDdManager<DdType::Sylvan>::getNumberOfDdVariables() const {
             return nextFreeVariableIndex;
         }
@@ -192,6 +254,15 @@ namespace storm {
 #ifdef STORM_HAVE_CARL
 		template InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalDdManager<DdType::Sylvan>::getAddZero() const;
 #endif
+
+        template InternalAdd<DdType::Sylvan, double> InternalDdManager<DdType::Sylvan>::getAddUndefined() const;
+        template InternalAdd<DdType::Sylvan, uint_fast64_t> InternalDdManager<DdType::Sylvan>::getAddUndefined() const;
+        
+        template InternalAdd<DdType::Sylvan, storm::RationalNumber> InternalDdManager<DdType::Sylvan>::getAddUndefined() const;
+
+#ifdef STORM_HAVE_CARL
+        template InternalAdd<DdType::Sylvan, storm::RationalFunction> InternalDdManager<DdType::Sylvan>::getAddUndefined() const;
+#endif
         
         template InternalAdd<DdType::Sylvan, double> InternalDdManager<DdType::Sylvan>::getConstant(double const& value) const;
         template InternalAdd<DdType::Sylvan, uint_fast64_t> InternalDdManager<DdType::Sylvan>::getConstant(uint_fast64_t const& value) const;
diff --git a/src/storm/storage/dd/sylvan/InternalSylvanDdManager.h b/src/storm/storage/dd/sylvan/InternalSylvanDdManager.h
index 24e3a354a..408817965 100644
--- a/src/storm/storage/dd/sylvan/InternalSylvanDdManager.h
+++ b/src/storm/storage/dd/sylvan/InternalSylvanDdManager.h
@@ -60,6 +60,13 @@ namespace storm {
              */
             InternalBdd<DdType::Sylvan> getBddZero() const;
             
+            /*!
+             * Retrieves a BDD that maps to true iff the encoding is less or equal than the given bound.
+             *
+             * @return A BDD with encodings corresponding to values less or equal than the bound.
+             */
+            InternalBdd<DdType::Sylvan> getBddEncodingLessOrEqualThan(uint64_t bound, InternalBdd<DdType::Sylvan> const& cube, uint64_t numberOfDdVariables) const;
+
             /*!
              * Retrieves an ADD representing the constant zero function.
              *
@@ -68,6 +75,14 @@ namespace storm {
             template<typename ValueType>
             InternalAdd<DdType::Sylvan, ValueType> getAddZero() const;
             
+            /*!
+             * Retrieves an ADD representing an undefined value.
+             *
+             * @return An ADD representing an undefined value.
+             */
+            template<typename ValueType>
+            InternalAdd<DdType::Sylvan, ValueType> getAddUndefined() const;
+            
             /*!
              * Retrieves an ADD representing the constant function with the given value.
              *
@@ -112,6 +127,11 @@ namespace storm {
              */
             void triggerReordering();
             
+            /*!
+             * Performs a debug check if available.
+             */
+            void debugCheck() const;
+            
             /*!
              * Retrieves the number of DD variables managed by this manager.
              *
@@ -120,6 +140,9 @@ namespace storm {
             uint_fast64_t getNumberOfDdVariables() const;
             
         private:
+            // Helper function to create the BDD whose encodings are below a given bound.
+            BDD getBddEncodingLessOrEqualThanRec(uint64_t minimalValue, uint64_t maximalValue, uint64_t bound, BDD cube, uint64_t remainingDdVariables) const;
+            
             // A counter for the number of instances of this class. This is used to determine when to initialize and
             // quit the sylvan. This is because Sylvan does not know the concept of managers but implicitly has a
             // 'global' manager.
diff --git a/src/storm/storage/dd/sylvan/utility.h b/src/storm/storage/dd/sylvan/utility.h
new file mode 100644
index 000000000..02a06a3bf
--- /dev/null
+++ b/src/storm/storage/dd/sylvan/utility.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "storm/utility/sylvan.h"
+
+#include <boost/functional/hash.hpp>
+
+namespace storm {
+    namespace dd {
+        
+        struct SylvanMTBDDPairHash {
+            std::size_t operator()(std::pair<MTBDD, MTBDD> const& pair) const {
+                std::hash<MTBDD> hasher;
+                std::size_t seed = hasher(pair.first);
+                boost::hash_combine(seed, hasher(pair.second));
+                return seed;
+            }
+        };
+        
+        struct SylvanMTBDDPairLess {
+            std::size_t operator()(std::pair<MTBDD, MTBDD> const& a, std::pair<MTBDD, MTBDD> const& b) const {
+                if (a.first < b.first) {
+                    return true;
+                } else if (a.first == b.first && a.second < b.second) {
+                    return true;
+                }
+                return false;
+            }
+        };
+        
+    }
+}
diff --git a/src/storm/storage/expressions/LinearityCheckVisitor.cpp b/src/storm/storage/expressions/LinearityCheckVisitor.cpp
index 18198ca0e..9eeb3ea56 100644
--- a/src/storm/storage/expressions/LinearityCheckVisitor.cpp
+++ b/src/storm/storage/expressions/LinearityCheckVisitor.cpp
@@ -125,7 +125,7 @@ namespace storm {
             STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Illegal unary numerical expression operator.");
         }
         
-        boost::any LinearityCheckVisitor::visit(BooleanLiteralExpression const& expression, boost::any const& data) {
+        boost::any LinearityCheckVisitor::visit(BooleanLiteralExpression const&, boost::any const& data) {
             bool booleanIsLinear = boost::any_cast<bool>(data);
             
             if (booleanIsLinear) {
diff --git a/src/storm/storage/expressions/ToRationalNumberVisitor.cpp b/src/storm/storage/expressions/ToRationalNumberVisitor.cpp
index f7aa3236d..15a8d61eb 100644
--- a/src/storm/storage/expressions/ToRationalNumberVisitor.cpp
+++ b/src/storm/storage/expressions/ToRationalNumberVisitor.cpp
@@ -119,6 +119,8 @@ namespace storm {
                     return result;
                     break;
             }
+            // Dummy return.
+            return result;
         }
         
         template<typename RationalNumberType>
diff --git a/src/storm/storage/prism/ToJaniConverter.cpp b/src/storm/storage/prism/ToJaniConverter.cpp
index 6bc39b83c..7719b08e5 100644
--- a/src/storm/storage/prism/ToJaniConverter.cpp
+++ b/src/storm/storage/prism/ToJaniConverter.cpp
@@ -113,7 +113,7 @@ namespace storm {
             // edges and transient assignments that are added to the locations.
             std::map<uint_fast64_t, std::vector<storm::jani::Assignment>> transientEdgeAssignments;
             for (auto const& rewardModel : program.getRewardModels()) {
-                auto newExpressionVariable = manager->declareRationalVariable(rewardModel.getName().empty() ? "default" : rewardModel.getName());
+                auto newExpressionVariable = manager->declareRationalVariable(rewardModel.getName().empty() ? "default_reward_model" : rewardModel.getName());
                 storm::jani::RealVariable const& newTransientVariable = janiModel.addVariable(storm::jani::RealVariable(rewardModel.getName().empty() ? "default" : rewardModel.getName(), newExpressionVariable, manager->rational(0.0), true));
                 
                 if (rewardModel.hasStateRewards()) {
diff --git a/src/storm/transformer/ChoiceSelector.cpp b/src/storm/transformer/ChoiceSelector.cpp
new file mode 100644
index 000000000..1044cb42c
--- /dev/null
+++ b/src/storm/transformer/ChoiceSelector.cpp
@@ -0,0 +1,26 @@
+#include "storm/transformer/ChoiceSelector.h"
+#include "storm/models/sparse/Mdp.h"
+
+namespace  storm {
+    namespace transformer {
+        template <typename ValueType, typename RewardModelType>
+        std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> ChoiceSelector<ValueType, RewardModelType>::transform(storm::storage::BitVector const& enabledActions) const
+        {
+            storm::storage::sparse::ModelComponents<ValueType, RewardModelType> newComponents(inputModel.getTransitionMatrix().restrictRows(enabledActions));
+            newComponents.stateLabeling = inputModel.getStateLabeling();
+            for (auto const& rewardModel : inputModel.getRewardModels()) {
+                newComponents.rewardModels.emplace(rewardModel.first, rewardModel.second.restrictActions(enabledActions));
+            }
+            if (inputModel.hasChoiceLabeling()) {
+                newComponents.choiceLabeling = inputModel.getChoiceLabeling().getSubLabeling(enabledActions);
+            }
+            newComponents.stateValuations = inputModel.getOptionalStateValuations();
+            if (inputModel.hasChoiceOrigins()) {
+                newComponents.choiceOrigins = inputModel.getChoiceOrigins()->selectChoices(enabledActions);
+            }
+            return std::make_shared<storm::models::sparse::Mdp<ValueType, RewardModelType>>(std::move(newComponents));
+        }
+
+        template class ChoiceSelector<double>;
+    }
+}
diff --git a/src/storm/transformer/ChoiceSelector.h b/src/storm/transformer/ChoiceSelector.h
new file mode 100644
index 000000000..70ed20974
--- /dev/null
+++ b/src/storm/transformer/ChoiceSelector.h
@@ -0,0 +1,30 @@
+#pragma once
+
+#include "storm/models/sparse/StandardRewardModel.h"
+#include "storm/models/sparse/NondeterministicModel.h"
+
+
+namespace storm {
+    namespace transformer {
+
+        template<typename ValueType, typename RewardModelType = storm::models::sparse::StandardRewardModel<ValueType>>
+        class ChoiceSelector {
+        public:
+            ChoiceSelector(storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel) : inputModel(inputModel) {
+
+            }
+
+            /*!
+             * Constructs an MDP by copying the current MDP and restricting the choices of each state to the ones given by the bitvector.
+             *
+             * @param enabledActions A BitVector of lenght numberOfChoices(), which is one iff the action should be kept.
+             * @return A subMDP.
+            */
+            std::shared_ptr<storm::models::sparse::NondeterministicModel<ValueType, RewardModelType>> transform(storm::storage::BitVector const& enabledActions) const;
+        private:
+
+            storm::models::sparse::NondeterministicModel<ValueType, RewardModelType> const& inputModel;
+        };
+
+    }
+}
\ No newline at end of file
diff --git a/src/storm/transformer/SymbolicToSparseTransformer.cpp b/src/storm/transformer/SymbolicToSparseTransformer.cpp
index 69b02acfb..8ebac17de 100644
--- a/src/storm/transformer/SymbolicToSparseTransformer.cpp
+++ b/src/storm/transformer/SymbolicToSparseTransformer.cpp
@@ -13,6 +13,7 @@ namespace storm {
 
         template<storm::dd::DdType Type, typename ValueType>
         std::shared_ptr<storm::models::sparse::Dtmc<ValueType>> SymbolicDtmcToSparseDtmcTransformer<Type, ValueType>::translate(storm::models::symbolic::Dtmc<Type, ValueType> const& symbolicDtmc) {
+            
             this->odd = symbolicDtmc.getReachableStates().createOdd();
             storm::storage::SparseMatrix<ValueType> transitionMatrix = symbolicDtmc.getTransitionMatrix().toMatrix(this->odd, this->odd);
             std::unordered_map<std::string, storm::models::sparse::StandardRewardModel<ValueType>> rewardModels;
diff --git a/src/storm/utility/DirectEncodingExporter.cpp b/src/storm/utility/DirectEncodingExporter.cpp
index 8c93b3b7f..6bdd629a7 100644
--- a/src/storm/utility/DirectEncodingExporter.cpp
+++ b/src/storm/utility/DirectEncodingExporter.cpp
@@ -88,7 +88,19 @@ namespace storm {
                 // Iterate over all actions
                 for (typename storm::storage::SparseMatrix<ValueType>::index_type row = start; row < end; ++row) {
                     // Print the actual row.
-                    os << "\taction " << row - start;
+                    if (sparseModel->hasChoiceLabeling()) {
+                        os << "\taction ";
+                        bool lfirst = true;
+                        for (auto const& label : sparseModel->getChoiceLabeling().getLabelsOfChoice(row)) {
+                            if (!lfirst) {
+                                os << "_";
+                            }
+                            os << label;
+                            lfirst = false;
+                        }
+                    } else {
+                        os << "\taction " << row - start;
+                    }
                     bool first = true;
                     // Write transition rewards
                     for (auto const& rewardModelEntry : sparseModel->getRewardModels()) {
@@ -110,11 +122,6 @@ namespace storm {
                         os << "]";
                     }
 
-                    // Write choice labeling
-                    if(sparseModel->hasChoiceLabeling()) {
-                        // TODO export choice labeling
-                        STORM_LOG_WARN("Choice labeling was not exported.");
-                    }
                     os << std::endl;
                     
                     // Write probabilities
diff --git a/src/storm/utility/dd.cpp b/src/storm/utility/dd.cpp
index b4b74a19a..47d15347e 100644
--- a/src/storm/utility/dd.cpp
+++ b/src/storm/utility/dd.cpp
@@ -45,38 +45,17 @@ namespace storm {
                 return reachableStates;
             }
             
-            template <storm::dd::DdType Type, typename ValueType>
-            storm::dd::Add<Type, ValueType> getRowColumnDiagonal(storm::dd::DdManager<Type> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) {
-                storm::dd::Add<Type, ValueType> result = ddManager.template getAddOne<ValueType>();
-                for (auto const& pair : rowColumnMetaVariablePairs) {
-                    result *= ddManager.template getIdentity<ValueType>(pair.first).equals(ddManager.template getIdentity<ValueType>(pair.second)).template toAdd<ValueType>();
-                    result *= ddManager.getRange(pair.first).template toAdd<ValueType>() * ddManager.getRange(pair.second).template toAdd<ValueType>();
-                }
-                return result;
-            }
-            
             template <storm::dd::DdType Type>
             storm::dd::Bdd<Type> getRowColumnDiagonal(storm::dd::DdManager<Type> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs) {
-                storm::dd::Bdd<Type> diagonal = ddManager.getBddOne();
-                for (auto const& pair : rowColumnMetaVariablePairs) {
-                    diagonal &= ddManager.template getIdentity<uint64_t>(pair.first).equals(ddManager.template getIdentity<uint64_t>(pair.second));
-                    diagonal &= ddManager.getRange(pair.first) && ddManager.getRange(pair.second);
-                }
-                return diagonal;
+                return ddManager.getIdentity(rowColumnMetaVariablePairs);
             }
             
             template storm::dd::Bdd<storm::dd::DdType::CUDD> computeReachableStates(storm::dd::Bdd<storm::dd::DdType::CUDD> const& initialStates, storm::dd::Bdd<storm::dd::DdType::CUDD> const& transitions, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables);
             template storm::dd::Bdd<storm::dd::DdType::Sylvan> computeReachableStates(storm::dd::Bdd<storm::dd::DdType::Sylvan> const& initialStates, storm::dd::Bdd<storm::dd::DdType::Sylvan> const& transitions, std::set<storm::expressions::Variable> const& rowMetaVariables, std::set<storm::expressions::Variable> const& columnMetaVariables);
 
-            template storm::dd::Add<storm::dd::DdType::CUDD, double> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::CUDD> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
-            template storm::dd::Add<storm::dd::DdType::Sylvan, double> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::Sylvan> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
-
             template storm::dd::Bdd<storm::dd::DdType::CUDD> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::CUDD> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
             template storm::dd::Bdd<storm::dd::DdType::Sylvan> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::Sylvan> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
 
-            template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalNumber> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::Sylvan> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
-            template storm::dd::Add<storm::dd::DdType::Sylvan, storm::RationalFunction> getRowColumnDiagonal(storm::dd::DdManager<storm::dd::DdType::Sylvan> const& ddManager, std::vector<std::pair<storm::expressions::Variable, storm::expressions::Variable>> const& rowColumnMetaVariablePairs);
-
         }
     }
 }
diff --git a/src/storm/utility/sylvan.h b/src/storm/utility/sylvan.h
index c068b6ab9..5f351b946 100644
--- a/src/storm/utility/sylvan.h
+++ b/src/storm/utility/sylvan.h
@@ -13,6 +13,7 @@
 #pragma GCC system_header // Only way to suppress some warnings atm.
 
 #include "sylvan_obj.hpp"
+#include "sylvan_mtbdd_storm.h"
 #include "sylvan_storm_rational_number.h"
 #include "sylvan_storm_rational_function.h"
 
diff --git a/src/test/storage/SymbolicBisimulationDecompositionTest.cpp b/src/test/storage/SymbolicBisimulationDecompositionTest.cpp
new file mode 100644
index 000000000..4948f7906
--- /dev/null
+++ b/src/test/storage/SymbolicBisimulationDecompositionTest.cpp
@@ -0,0 +1,27 @@
+#include "gtest/gtest.h"
+#include "storm-config.h"
+#include "storm/parser/PrismParser.h"
+#include "storm/storage/SymbolicModelDescription.h"
+#include "storm/builder/DdPrismModelBuilder.h"
+#include "storm/models/symbolic/Dtmc.h"
+#include "storm/storage/dd/BisimulationDecomposition.h"
+
+TEST(SymbolicBisimulationDecompositionTest_Cudd, Die) {
+    storm::storage::SymbolicModelDescription modelDescription = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/dtmc/die.pm");
+    storm::prism::Program program = modelDescription.preprocess().asPrismProgram();
+    
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(program);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::dd::bisimulation::Partition<storm::dd::DdType::CUDD, double>::create(*model, {"one"}));
+    decomposition.compute();
+}
+
+TEST(SymbolicBisimulationDecompositionTest_Cudd, Crowds) {
+    storm::storage::SymbolicModelDescription modelDescription = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/dtmc/crowds-5-5.pm");
+    storm::prism::Program program = modelDescription.preprocess().asPrismProgram();
+    
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(program);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::dd::bisimulation::Partition<storm::dd::DdType::CUDD, double>::create(*model, {"observe0Greater1"}));
+    decomposition.compute();
+}
diff --git a/src/test/storm/storage/CuddDdTest.cpp b/src/test/storm/storage/CuddDdTest.cpp
index 7cfac18a5..499be2a12 100644
--- a/src/test/storm/storage/CuddDdTest.cpp
+++ b/src/test/storm/storage/CuddDdTest.cpp
@@ -414,7 +414,7 @@ TEST(CuddDd, RangeTest) {
     
     storm::dd::Bdd<storm::dd::DdType::CUDD> range;
     ASSERT_NO_THROW(range = manager->getRange(x.first));
-    
+        
     EXPECT_EQ(9ul, range.getNonZeroCount());
     EXPECT_EQ(1ul, range.getLeafCount());
     EXPECT_EQ(5ul, range.getNodeCount());
diff --git a/src/test/storm/storage/SymbolicBisimulationDecompositionTest.cpp b/src/test/storm/storage/SymbolicBisimulationDecompositionTest.cpp
new file mode 100644
index 000000000..7c174b018
--- /dev/null
+++ b/src/test/storm/storage/SymbolicBisimulationDecompositionTest.cpp
@@ -0,0 +1,146 @@
+#include "gtest/gtest.h"
+#include "storm-config.h"
+
+#include "storm/parser/PrismParser.h"
+#include "storm/parser/FormulaParser.h"
+
+#include "storm/builder/DdPrismModelBuilder.h"
+
+#include "storm/storage/dd/BisimulationDecomposition.h"
+#include "storm/storage/SymbolicModelDescription.h"
+
+#include "storm/models/sparse/Mdp.h"
+#include "storm/models/sparse/StandardRewardModel.h"
+
+#include "storm/models/symbolic/Mdp.h"
+#include "storm/models/symbolic/StandardRewardModel.h"
+
+TEST(SymbolicModelBisimulationDecomposition, Die_Cudd) {
+    storm::prism::Program program = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/dtmc/die.pm");
+    
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(program);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::storage::BisimulationType::Strong);
+    decomposition.compute();
+    std::shared_ptr<storm::models::Model<double>> quotient = decomposition.getQuotient();
+    
+    EXPECT_EQ(11ul, quotient->getNumberOfStates());
+    EXPECT_EQ(17ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Dtmc, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    
+    storm::parser::FormulaParser formulaParser;
+    std::shared_ptr<storm::logic::Formula const> formula = formulaParser.parseSingleFormulaFromString("P=? [F \"two\"]");
+    
+    std::vector<std::shared_ptr<storm::logic::Formula const>> formulas;
+    formulas.push_back(formula);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition2(*model, formulas, storm::storage::BisimulationType::Strong);
+    decomposition2.compute();
+    quotient = decomposition2.getQuotient();
+    
+    EXPECT_EQ(5ul, quotient->getNumberOfStates());
+    EXPECT_EQ(8ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Dtmc, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+}
+
+TEST(SymbolicModelBisimulationDecomposition, Crowds_Cudd) {
+    storm::storage::SymbolicModelDescription smd = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/dtmc/crowds5_5.pm");
+    
+    // Preprocess model to substitute all constants.
+    smd = smd.preprocess();
+    
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(smd.asPrismProgram());
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::storage::BisimulationType::Strong);
+    decomposition.compute();
+    std::shared_ptr<storm::models::Model<double>> quotient = decomposition.getQuotient();
+    
+    EXPECT_EQ(2007ul, quotient->getNumberOfStates());
+    EXPECT_EQ(3738ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Dtmc, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    
+    storm::parser::FormulaParser formulaParser;
+    std::shared_ptr<storm::logic::Formula const> formula = formulaParser.parseSingleFormulaFromString("P=? [F \"observe0Greater1\"]");
+    
+    std::vector<std::shared_ptr<storm::logic::Formula const>> formulas;
+    formulas.push_back(formula);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition2(*model, formulas, storm::storage::BisimulationType::Strong);
+    decomposition2.compute();
+    quotient = decomposition2.getQuotient();
+    
+    EXPECT_EQ(65ul, quotient->getNumberOfStates());
+    EXPECT_EQ(105ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Dtmc, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+}
+
+TEST(SymbolicModelBisimulationDecomposition, TwoDice_Cudd) {
+    storm::prism::Program program = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/mdp/two_dice.nm");
+
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(program);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::storage::BisimulationType::Strong);
+    decomposition.compute();
+    std::shared_ptr<storm::models::Model<double>> quotient = decomposition.getQuotient();
+
+    EXPECT_EQ(77ul, quotient->getNumberOfStates());
+    EXPECT_EQ(210ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Mdp, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    EXPECT_EQ(116ul, (quotient->as<storm::models::symbolic::Mdp<storm::dd::DdType::CUDD, double>>()->getNumberOfChoices()));
+
+    storm::parser::FormulaParser formulaParser;
+    std::shared_ptr<storm::logic::Formula const> formula = formulaParser.parseSingleFormulaFromString("Pmin=? [F \"two\"]");
+    
+    std::vector<std::shared_ptr<storm::logic::Formula const>> formulas;
+    formulas.push_back(formula);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition2(*model, formulas, storm::storage::BisimulationType::Strong);
+    decomposition2.compute();
+    quotient = decomposition2.getQuotient();
+    
+    EXPECT_EQ(19ul, quotient->getNumberOfStates());
+    EXPECT_EQ(58ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Mdp, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    EXPECT_EQ(34ul, (quotient->as<storm::models::symbolic::Mdp<storm::dd::DdType::CUDD, double>>()->getNumberOfChoices()));
+}
+
+TEST(SymbolicModelBisimulationDecomposition, AsynchronousLeader_Cudd) {
+    storm::storage::SymbolicModelDescription smd = storm::parser::PrismParser::parse(STORM_TEST_RESOURCES_DIR "/mdp/leader4.nm");
+    
+    // Preprocess model to substitute all constants.
+    smd = smd.preprocess();
+
+    storm::parser::FormulaParser formulaParser;
+    std::shared_ptr<storm::logic::Formula const> formula = formulaParser.parseSingleFormulaFromString("Rmax=? [F \"elected\"]");
+    
+    std::shared_ptr<storm::models::symbolic::Model<storm::dd::DdType::CUDD, double>> model = storm::builder::DdPrismModelBuilder<storm::dd::DdType::CUDD, double>().build(smd.asPrismProgram(), *formula);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition(*model, storm::storage::BisimulationType::Strong);
+    decomposition.compute();
+    std::shared_ptr<storm::models::Model<double>> quotient = decomposition.getQuotient();
+    
+    EXPECT_EQ(252ul, quotient->getNumberOfStates());
+    EXPECT_EQ(624ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Mdp, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    EXPECT_EQ(500ul, (quotient->as<storm::models::symbolic::Mdp<storm::dd::DdType::CUDD, double>>()->getNumberOfChoices()));
+    
+    std::vector<std::shared_ptr<storm::logic::Formula const>> formulas;
+    formulas.push_back(formula);
+    
+    storm::dd::BisimulationDecomposition<storm::dd::DdType::CUDD, double> decomposition2(*model, formulas, storm::storage::BisimulationType::Strong);
+    decomposition2.compute();
+    quotient = decomposition2.getQuotient();
+    
+    EXPECT_EQ(252ul, quotient->getNumberOfStates());
+    EXPECT_EQ(624ul, quotient->getNumberOfTransitions());
+    EXPECT_EQ(storm::models::ModelType::Mdp, quotient->getType());
+    EXPECT_TRUE(quotient->isSymbolicModel());
+    EXPECT_EQ(500ul, (quotient->as<storm::models::symbolic::Mdp<storm::dd::DdType::CUDD, double>>()->getNumberOfChoices()));
+}
diff --git a/travis/mtime_cache/globs.txt b/travis/mtime_cache/globs.txt
index cd0d4d2ff..a09cba27a 100644
--- a/travis/mtime_cache/globs.txt
+++ b/travis/mtime_cache/globs.txt
@@ -1,4 +1,5 @@
 src/**/*.{%{cpp}}
 src/**/CMakeLists.txt
+CMakeLists.txt
 resources/3rdparty/**/*.{%{cpp}}
-resources/3rdparty/eigen-3.3-beta1/StormEigen/*
+resources/3rdparty/eigen-3.3-beta1/StormEigen/**/*