Browse Source

more work on sylvan ODD-related stuff

Former-commit-id: 142f57620a
tempestpy_adaptions
dehnert 9 years ago
parent
commit
36a6e9e76e
  1. 2
      resources/3rdparty/sylvan/src/sylvan_bdd.h
  2. 3
      resources/3rdparty/sylvan/src/sylvan_bdd_storm.h
  3. 2
      resources/3rdparty/sylvan/src/sylvan_mtbdd.c
  4. 2
      resources/3rdparty/sylvan/src/sylvan_mtbdd.h
  5. 11
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c
  6. 4
      resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h
  7. 6
      src/storage/dd/cudd/InternalCuddAdd.cpp
  8. 18
      src/storage/dd/cudd/InternalCuddBdd.cpp
  9. 299
      src/storage/dd/sylvan/InternalSylvanAdd.cpp
  10. 124
      src/storage/dd/sylvan/InternalSylvanAdd.h
  11. 178
      src/storage/dd/sylvan/InternalSylvanBdd.cpp
  12. 64
      src/storage/dd/sylvan/InternalSylvanBdd.h
  13. 7
      test/functional/storage/CuddDdTest.cpp
  14. 133
      test/functional/storage/SylvanDdTest.cpp

2
resources/3rdparty/sylvan/src/sylvan_bdd.h

@ -414,6 +414,8 @@ bdd_refs_sync(BDD result)
return result;
}
#include "sylvan_bdd_storm.h"
#ifdef __cplusplus
}
#endif /* __cplusplus */

3
resources/3rdparty/sylvan/src/sylvan_bdd_storm.h

@ -0,0 +1,3 @@
#define bdd_isnegated(dd) ((dd & sylvan_complement) ? 1 : 0)
#define bdd_regular(dd) (dd & ~sylvan_complement)
#define bdd_isterminal(dd) (dd == sylvan_false || dd == sylvan_true)

2
resources/3rdparty/sylvan/src/sylvan_mtbdd.c

@ -2736,4 +2736,4 @@ mtbdd_map_removeall(MTBDDMAP map, MTBDD variables)
}
}
#include "sylvan_storm_custom.c"
#include "sylvan_mtbdd_storm.c"

2
resources/3rdparty/sylvan/src/sylvan_mtbdd.h

@ -583,7 +583,7 @@ mtbdd_refs_sync(MTBDD result)
return result;
}
#include "sylvan_storm_custom.h"
#include "sylvan_mtbdd_storm.h"
#ifdef __cplusplus
}

11
resources/3rdparty/sylvan/src/sylvan_storm_custom.c → resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.c

@ -511,4 +511,15 @@ TASK_IMPL_2(double, mtbdd_non_zero_count, MTBDD, dd, size_t, nvars)
cache_put3(CACHE_MTBDD_NONZERO_COUNT, dd, 0, nvars, hack.s);
return hack.d;
}
int mtbdd_iszero(MTBDD dd) {
if (mtbdd_gettype(dd) == 0) {
return mtbdd_getuint64(dd) == 0;
} else if (mtbdd_gettype(dd) == 1) {
return mtbdd_getdouble(dd) == 0;
} else if (mtbdd_gettype(dd) == 2) {
return mtbdd_getnumer(dd) == 0;
}
return 0;
}

4
resources/3rdparty/sylvan/src/sylvan_storm_custom.h → resources/3rdparty/sylvan/src/sylvan_mtbdd_storm.h

@ -100,3 +100,7 @@ TASK_DECL_1(MTBDD, mtbdd_bool_to_double, MTBDD)
TASK_DECL_2(double, mtbdd_non_zero_count, MTBDD, size_t);
#define mtbdd_non_zero_count(dd, nvars) CALL(mtbdd_non_zero_count, dd, nvars)
// Checks whether the given MTBDD represents a zero leaf.
int mtbdd_iszero(MTBDD);
#define mtbdd_regular(dd) (dd & ~mtbdd_complement)

6
src/storage/dd/cudd/InternalCuddAdd.cpp

@ -374,13 +374,13 @@ namespace storm {
return std::make_shared<Odd>(elseNode, elseNode->getElseOffset() + elseNode->getThenOffset(), thenNode, thenNode->getElseOffset() + thenNode->getThenOffset());
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
bool elseComplemented = Cudd_IsComplement(Cudd_E(dd));
bool thenComplemented = Cudd_IsComplement(Cudd_T(dd));
std::shared_ptr<Odd> elseNode = createOddRec(Cudd_E(dd), manager, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
std::shared_ptr<Odd> thenNode = createOddRec(Cudd_T(dd), manager, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
uint_fast64_t totalElseOffset = elseNode->getElseOffset() + elseNode->getThenOffset();
uint_fast64_t totalThenOffset = thenNode->getElseOffset() + thenNode->getThenOffset();
return std::make_shared<Odd>(elseNode, elseComplemented ? (1ull << (maxLevel - currentLevel - 1)) - totalElseOffset : totalElseOffset, thenNode, thenComplemented ? (1ull << (maxLevel - currentLevel - 1)) - totalThenOffset : totalThenOffset);
return std::make_shared<Odd>(elseNode, totalElseOffset, thenNode, totalThenOffset);
}
}
}

18
src/storage/dd/cudd/InternalCuddBdd.cpp

@ -248,26 +248,26 @@ namespace storm {
Cudd_Ref(thenSuccessor);
// Create a node representing ITE(currentVar, thenSuccessor, elseSuccessor);
DdNode* result = Cudd_bddIthVar(manager, static_cast<int>(ddVariableIndices[currentLevel]));
DdNode* currentVar = Cudd_bddIthVar(manager, static_cast<int>(ddVariableIndices[currentLevel]));
Cudd_Ref(currentVar);
DdNode* result = Cudd_bddIte(manager, currentVar, thenSuccessor, elseSuccessor);
Cudd_Ref(result);
DdNode* newResult = Cudd_bddIte(manager, result, thenSuccessor, elseSuccessor);
Cudd_Ref(newResult);
// Dispose of the intermediate results
Cudd_RecursiveDeref(manager, result);
Cudd_RecursiveDeref(manager, currentVar);
Cudd_RecursiveDeref(manager, thenSuccessor);
Cudd_RecursiveDeref(manager, elseSuccessor);
// Before returning, we remove the protection imposed by the previous call to Cudd_Ref.
Cudd_Deref(newResult);
Cudd_Deref(result);
return newResult;
return result;
}
}
storm::storage::BitVector InternalBdd<DdType::CUDD>::toVector(storm::dd::Odd const& rowOdd, std::vector<uint_fast64_t> const& ddVariableIndices) const {
storm::storage::BitVector result(rowOdd.getTotalOffset());
this->toVectorRec(this->getCuddDdNode(), ddManager->getCuddManager(), result, rowOdd, Cudd_IsComplement(this->getCuddDdNode()), 0, ddVariableIndices.size(), 0, ddVariableIndices);
this->toVectorRec(Cudd_Regular(this->getCuddDdNode()), ddManager->getCuddManager(), result, rowOdd, Cudd_IsComplement(this->getCuddDdNode()), 0, ddVariableIndices.size(), 0, ddVariableIndices);
return result;
}
@ -325,7 +325,7 @@ namespace storm {
} else {
// Otherwise, we need to recursively compute the ODD.
// If we are already past the maximal level that is to be considered, we can simply create an Odd without
// If we are already at the maximal level that is to be considered, we can simply create an Odd without
// successors
if (currentLevel == maxLevel) {
uint_fast64_t elseOffset = 0;
@ -369,7 +369,7 @@ namespace storm {
template<typename ValueType>
void InternalBdd<DdType::CUDD>::filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType> const& sourceValues, std::vector<ValueType>& targetValues) const {
uint_fast64_t currentIndex = 0;
filterExplicitVectorRec(this->getCuddDdNode(), ddManager->getCuddManager(), 0, Cudd_IsComplement(this->getCuddDdNode()), ddVariableIndices.size(), ddVariableIndices, 0, odd, targetValues, currentIndex, sourceValues);
filterExplicitVectorRec(Cudd_Regular(this->getCuddDdNode()), ddManager->getCuddManager(), 0, Cudd_IsComplement(this->getCuddDdNode()), ddVariableIndices.size(), ddVariableIndices, 0, odd, targetValues, currentIndex, sourceValues);
}
template<typename ValueType>

299
src/storage/dd/sylvan/InternalSylvanAdd.cpp

@ -1,10 +1,11 @@
#include "src/storage/dd/sylvan/InternalSylvanAdd.h"
#include <iostream>
#include "src/storage/dd/sylvan/InternalSylvanDdManager.h"
#include "src/storage/SparseMatrix.h"
#include "src/utility/macros.h"
#include "src/utility/constants.h"
#include "src/exceptions/NotImplementedException.h"
namespace storm {
@ -293,37 +294,317 @@ namespace storm {
template<typename ValueType>
Odd InternalAdd<DdType::Sylvan, ValueType>::createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
// Prepare a unique table for each level that keeps the constructed ODD nodes unique.
std::vector<std::unordered_map<BDD, std::shared_ptr<Odd>>> uniqueTableForLevels(ddVariableIndices.size() + 1);
// Now construct the ODD structure from the ADD.
std::shared_ptr<Odd> rootOdd = createOddRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), 0, ddVariableIndices.size(), ddVariableIndices, uniqueTableForLevels);
// Return a copy of the root node to remove the shared_ptr encapsulation.
return Odd(*rootOdd);
}
template<typename ValueType>
std::shared_ptr<Odd> InternalAdd<DdType::Sylvan, ValueType>::createOddRec(BDD dd, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<BDD, std::shared_ptr<Odd>>>& uniqueTableForLevels) {
// Check whether the ODD for this node has already been computed (for this level) and if so, return this instead.
auto const& iterator = uniqueTableForLevels[currentLevel].find(dd);
if (iterator != uniqueTableForLevels[currentLevel].end()) {
return iterator->second;
} else {
// Otherwise, we need to recursively compute the ODD.
// If we are already past the maximal level that is to be considered, we can simply create an Odd without
// successors
if (currentLevel == maxLevel) {
uint_fast64_t elseOffset = 0;
uint_fast64_t thenOffset = 0;
STORM_LOG_ASSERT(mtbdd_isleaf(dd), "Expected leaf at last level.");
// If the DD is not the zero leaf, then the then-offset is 1.
if (!mtbdd_iszero(dd)) {
thenOffset = 1;
}
return std::make_shared<Odd>(nullptr, elseOffset, nullptr, thenOffset);
} else if (mtbdd_isleaf(dd) || ddVariableIndices[currentLevel] < mtbdd_getvar(dd)) {
// If we skipped the level in the DD, we compute the ODD just for the else-successor and use the same
// node for the then-successor as well.
std::shared_ptr<Odd> elseNode = createOddRec(dd, currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
std::shared_ptr<Odd> thenNode = elseNode;
return std::make_shared<Odd>(elseNode, elseNode->getElseOffset() + elseNode->getThenOffset(), thenNode, thenNode->getElseOffset() + thenNode->getThenOffset());
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
std::shared_ptr<Odd> elseNode = createOddRec(mtbdd_regular(mtbdd_getlow(dd)), currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
std::shared_ptr<Odd> thenNode = createOddRec(mtbdd_regular(mtbdd_gethigh(dd)), currentLevel + 1, maxLevel, ddVariableIndices, uniqueTableForLevels);
uint_fast64_t totalElseOffset = elseNode->getElseOffset() + elseNode->getThenOffset();
uint_fast64_t totalThenOffset = thenNode->getElseOffset() + thenNode->getThenOffset();
return std::make_shared<Odd>(elseNode, totalElseOffset, thenNode, totalThenOffset);
}
}
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::composeWithExplicitVector(storm::dd::Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
composeWithExplicitVectorRec(this->getSylvanMtbdd().GetMTBDD(), nullptr, 0, ddVariableIndices.size(), 0, odd, ddVariableIndices, targetVector, function);
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::composeWithExplicitVector(storm::dd::Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<uint_fast64_t> const& offsets, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
composeWithExplicitVectorRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), &offsets, 0, ddVariableIndices.size(), 0, odd, ddVariableIndices, targetVector, function);
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::composeWithExplicitVectorRec(MTBDD dd, std::vector<uint_fast64_t> const* offsets, uint_fast64_t currentLevel, uint_fast64_t maxLevel, uint_fast64_t currentOffset, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const {
// For the empty DD, we do not need to add any entries.
if (mtbdd_isleaf(dd) && mtbdd_iszero(dd)) {
return;
}
// If we are at the maximal level, the value to be set is stored as a constant in the DD.
if (currentLevel == maxLevel) {
ValueType& targetValue = targetVector[offsets != nullptr ? (*offsets)[currentOffset] : currentOffset];
targetValue = function(targetValue, getValue(dd));
} else if (mtbdd_isleaf(dd) || ddVariableIndices[currentLevel] < mtbdd_getvar(dd)) {
// If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
// and for the one in which it is not set.
composeWithExplicitVectorRec(dd, offsets, currentLevel + 1, maxLevel, currentOffset, odd.getElseSuccessor(), ddVariableIndices, targetVector, function);
composeWithExplicitVectorRec(dd, offsets, currentLevel + 1, maxLevel, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), ddVariableIndices, targetVector, function);
} else {
// Otherwise, we simply recursively call the function for both (different) cases.
composeWithExplicitVectorRec(mtbdd_regular(mtbdd_getlow(dd)), offsets, currentLevel + 1, maxLevel, currentOffset, odd.getElseSuccessor(), ddVariableIndices, targetVector, function);
composeWithExplicitVectorRec(mtbdd_regular(mtbdd_gethigh(dd)), offsets, currentLevel + 1, maxLevel, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), ddVariableIndices, targetVector, function);
}
}
template<typename ValueType>
std::vector<InternalAdd<DdType::Sylvan, ValueType>> InternalAdd<DdType::Sylvan, ValueType>::splitIntoGroups(std::vector<uint_fast64_t> const& ddGroupVariableIndices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
std::vector<InternalAdd<DdType::Sylvan, ValueType>> result;
splitIntoGroupsRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), mtbdd_isnegated(this->getSylvanMtbdd().GetMTBDD()), result, ddGroupVariableIndices, 0, ddGroupVariableIndices.size());
return result;
}
template<typename ValueType>
std::vector<std::pair<InternalAdd<DdType::Sylvan, ValueType>, InternalAdd<DdType::Sylvan, ValueType>>> InternalAdd<DdType::Sylvan, ValueType>::splitIntoGroups(InternalAdd<DdType::Sylvan, ValueType> vector, std::vector<uint_fast64_t> const& ddGroupVariableIndices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
std::vector<std::pair<InternalAdd<DdType::Sylvan, ValueType>, InternalAdd<DdType::Sylvan, ValueType>>> result;
splitIntoGroupsRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), mtbdd_isnegated(this->getSylvanMtbdd().GetMTBDD()), mtbdd_regular(vector.getSylvanMtbdd().GetMTBDD()), mtbdd_isnegated(vector.getSylvanMtbdd().GetMTBDD()), result, ddGroupVariableIndices, 0, ddGroupVariableIndices.size());
return result;
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::splitIntoGroupsRec(MTBDD dd, bool negated, std::vector<InternalAdd<DdType::Sylvan, ValueType>>& groups, std::vector<uint_fast64_t> const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const {
// For the empty DD, we do not need to create a group.
if (mtbdd_isleaf(dd) && mtbdd_iszero(dd)) {
return;
}
if (currentLevel == maxLevel) {
groups.push_back(InternalAdd<DdType::Sylvan, ValueType>(ddManager, sylvan::Mtbdd(negated ? mtbdd_negate(dd) : dd)));
} else if (mtbdd_isleaf(dd) || ddGroupVariableIndices[currentLevel] < mtbdd_getvar(dd)) {
splitIntoGroupsRec(dd, negated, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(dd, negated, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
MTBDD thenDdNode = mtbdd_gethigh(dd);
MTBDD elseDdNode = mtbdd_getlow(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = mtbdd_isnegated(elseDdNode) ^ negated;
bool thenComplemented = mtbdd_isnegated(thenDdNode) ^ negated;
splitIntoGroupsRec(mtbdd_regular(elseDdNode), elseComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(mtbdd_regular(thenDdNode), thenComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
}
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::splitIntoGroupsRec(MTBDD dd1, bool negated1, MTBDD dd2, bool negated2, std::vector<std::pair<InternalAdd<DdType::Sylvan, ValueType>, InternalAdd<DdType::Sylvan, ValueType>>>& groups, std::vector<uint_fast64_t> const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const {
// For the empty DD, we do not need to create a group.
if (mtbdd_isleaf(dd1) && mtbdd_isleaf(dd2) && mtbdd_iszero(dd1) && mtbdd_iszero(dd2)) {
return;
}
if (currentLevel == maxLevel) {
groups.push_back(std::make_pair(InternalAdd<DdType::Sylvan, ValueType>(ddManager, sylvan::Mtbdd(negated1 ? mtbdd_negate(dd1) : dd1 )), InternalAdd<DdType::Sylvan, ValueType>(ddManager, sylvan::Mtbdd(negated2 ? mtbdd_negate(dd2) : dd2))));
} else if (mtbdd_isleaf(dd1) || ddGroupVariableIndices[currentLevel] < mtbdd_getvar(dd1)) {
if (mtbdd_isleaf(dd2) || ddGroupVariableIndices[currentLevel] < mtbdd_getvar(dd2)) {
splitIntoGroupsRec(dd1, negated1, dd2, negated2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(dd1, negated1, dd2, negated2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
} else {
MTBDD dd2ThenNode = mtbdd_gethigh(dd2);
MTBDD dd2ElseNode = mtbdd_getlow(dd2);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = mtbdd_isnegated(dd2ElseNode) ^ negated2;
bool thenComplemented = mtbdd_isnegated(dd2ThenNode) ^ negated2;
splitIntoGroupsRec(dd1, negated1, mtbdd_regular(dd2ThenNode), thenComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(dd1, negated1, mtbdd_regular(dd2ElseNode), elseComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
}
} else if (mtbdd_isleaf(dd2) || ddGroupVariableIndices[currentLevel] < mtbdd_getvar(dd2)) {
MTBDD dd1ThenNode = mtbdd_gethigh(dd1);
MTBDD dd1ElseNode = mtbdd_getlow(dd1);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = mtbdd_isnegated(dd1ElseNode) ^ negated1;
bool thenComplemented = mtbdd_isnegated(dd1ThenNode) ^ negated1;
splitIntoGroupsRec(mtbdd_regular(dd1ThenNode), thenComplemented, dd2, negated2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(mtbdd_regular(dd1ElseNode), elseComplemented, dd2, negated2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
} else {
MTBDD dd1ThenNode = mtbdd_gethigh(dd1);
MTBDD dd1ElseNode = mtbdd_getlow(dd1);
MTBDD dd2ThenNode = mtbdd_gethigh(dd2);
MTBDD dd2ElseNode = mtbdd_getlow(dd2);
// Determine whether we have to evaluate the successors as if they were complemented.
bool dd1ElseComplemented = mtbdd_isnegated(dd1ElseNode) ^ negated1;
bool dd1ThenComplemented = mtbdd_isnegated(dd1ThenNode) ^ negated1;
bool dd2ElseComplemented = mtbdd_isnegated(dd2ElseNode) ^ negated2;
bool dd2ThenComplemented = mtbdd_isnegated(dd2ThenNode) ^ negated2;
splitIntoGroupsRec(mtbdd_regular(dd1ThenNode), dd1ThenComplemented, mtbdd_regular(dd2ThenNode), dd2ThenComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
splitIntoGroupsRec(mtbdd_regular(dd1ElseNode), dd1ElseComplemented, mtbdd_regular(dd2ElseNode), dd2ElseComplemented, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel);
}
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::toMatrixComponents(std::vector<uint_fast64_t> const& rowGroupIndices, std::vector<uint_fast64_t>& rowIndications, std::vector<storm::storage::MatrixEntry<uint_fast64_t, ValueType>>& columnsAndValues, Odd const& rowOdd, Odd const& columnOdd, std::vector<uint_fast64_t> const& ddRowVariableIndices, std::vector<uint_fast64_t> const& ddColumnVariableIndices, bool writeValues) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
return toMatrixComponentsRec(mtbdd_regular(this->getSylvanMtbdd().GetMTBDD()), mtbdd_isnegated(this->getSylvanMtbdd().GetMTBDD()), rowGroupIndices, rowIndications, columnsAndValues, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, writeValues);
}
template<typename ValueType>
void InternalAdd<DdType::Sylvan, ValueType>::toMatrixComponentsRec(MTBDD dd, bool negated, std::vector<uint_fast64_t> const& rowGroupOffsets, std::vector<uint_fast64_t>& rowIndications, std::vector<storm::storage::MatrixEntry<uint_fast64_t, ValueType>>& columnsAndValues, Odd const& rowOdd, Odd const& columnOdd, uint_fast64_t currentRowLevel, uint_fast64_t currentColumnLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, uint_fast64_t currentColumnOffset, std::vector<uint_fast64_t> const& ddRowVariableIndices, std::vector<uint_fast64_t> const& ddColumnVariableIndices, bool generateValues) const {
// For the empty DD, we do not need to add any entries.
if (mtbdd_isleaf(dd) && mtbdd_iszero(dd)) {
return;
}
// If we are at the maximal level, the value to be set is stored as a constant in the DD.
if (currentRowLevel + currentColumnLevel == maxLevel) {
if (generateValues) {
columnsAndValues[rowIndications[rowGroupOffsets[currentRowOffset]]] = storm::storage::MatrixEntry<uint_fast64_t, ValueType>(currentColumnOffset, negated ? -getValue(dd) : getValue(dd));
}
++rowIndications[rowGroupOffsets[currentRowOffset]];
} else {
MTBDD elseElse;
MTBDD elseThen;
MTBDD thenElse;
MTBDD thenThen;
if (mtbdd_isleaf(dd) || ddColumnVariableIndices[currentColumnLevel] < mtbdd_getvar(dd)) {
elseElse = elseThen = thenElse = thenThen = dd;
} else if (ddRowVariableIndices[currentColumnLevel] < mtbdd_getvar(dd)) {
elseElse = thenElse = mtbdd_getlow(dd);
elseThen = thenThen = mtbdd_gethigh(dd);
} else {
MTBDD elseNode = mtbdd_getlow(dd);
if (mtbdd_isleaf(elseNode) || ddColumnVariableIndices[currentColumnLevel] < mtbdd_getvar(elseNode)) {
elseElse = elseThen = elseNode;
} else {
elseElse = mtbdd_getlow(elseNode);
elseThen = mtbdd_gethigh(elseNode);
}
MTBDD thenNode = mtbdd_gethigh(dd);
if (mtbdd_isleaf(thenNode) || ddColumnVariableIndices[currentColumnLevel] < mtbdd_getvar(thenNode)) {
thenElse = thenThen = thenNode;
} else {
thenElse = mtbdd_getlow(thenNode);
thenThen = mtbdd_gethigh(thenNode);
}
}
// Visit else-else.
toMatrixComponentsRec(mtbdd_regular(elseElse), mtbdd_isnegated(elseElse) ^ negated, rowGroupOffsets, rowIndications, columnsAndValues, rowOdd.getElseSuccessor(), columnOdd.getElseSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset, currentColumnOffset, ddRowVariableIndices, ddColumnVariableIndices, generateValues);
// Visit else-then.
toMatrixComponentsRec(mtbdd_regular(elseThen), mtbdd_isnegated(elseThen) ^ negated, rowGroupOffsets, rowIndications, columnsAndValues, rowOdd.getElseSuccessor(), columnOdd.getThenSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset, currentColumnOffset + columnOdd.getElseOffset(), ddRowVariableIndices, ddColumnVariableIndices, generateValues);
// Visit then-else.
toMatrixComponentsRec(mtbdd_regular(thenElse), mtbdd_isnegated(thenElse) ^ negated, rowGroupOffsets, rowIndications, columnsAndValues, rowOdd.getThenSuccessor(), columnOdd.getElseSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), currentColumnOffset, ddRowVariableIndices, ddColumnVariableIndices, generateValues);
// Visit then-then.
toMatrixComponentsRec(mtbdd_regular(thenThen), mtbdd_isnegated(thenThen) ^ negated, rowGroupOffsets, rowIndications, columnsAndValues, rowOdd.getThenSuccessor(), columnOdd.getThenSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), currentColumnOffset + columnOdd.getElseOffset(), ddRowVariableIndices, ddColumnVariableIndices, generateValues);
}
}
template<typename ValueType>
InternalAdd<DdType::Sylvan, ValueType> InternalAdd<DdType::Sylvan, ValueType>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<ValueType> const& values, storm::dd::Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices) {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
uint_fast64_t offset = 0;
return InternalAdd<DdType::Sylvan, ValueType>(ddManager, sylvan::Mtbdd(fromVectorRec(offset, 0, ddVariableIndices.size(), values, odd, ddVariableIndices)));
}
template<typename ValueType>
MTBDD InternalAdd<DdType::Sylvan, ValueType>::fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices) {
if (currentLevel == maxLevel) {
// If we are in a terminal node of the ODD, we need to check whether the then-offset of the ODD is one
// (meaning the encoding is a valid one) or zero (meaning the encoding is not valid). Consequently, we
// need to copy the next value of the vector iff the then-offset is greater than zero.
if (odd.getThenOffset() > 0) {
return getLeaf(values[currentOffset++]);
} else {
return getLeaf(storm::utility::zero<ValueType>());
}
} else {
// If the total offset is zero, we can just return the constant zero DD.
if (odd.getThenOffset() + odd.getElseOffset() == 0) {
return getLeaf(storm::utility::zero<ValueType>());
}
// Determine the new else-successor.
MTBDD elseSuccessor;
if (odd.getElseOffset() > 0) {
elseSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getElseSuccessor(), ddVariableIndices);
} else {
elseSuccessor = getLeaf(storm::utility::zero<ValueType>());
}
mtbdd_refs_push(elseSuccessor);
// Determine the new then-successor.
MTBDD thenSuccessor;
if (odd.getThenOffset() > 0) {
thenSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getThenSuccessor(), ddVariableIndices);
} else {
thenSuccessor = getLeaf(storm::utility::zero<ValueType>());
}
mtbdd_refs_push(thenSuccessor);
// Create a node representing ITE(currentVar, thenSuccessor, elseSuccessor);
MTBDD currentVar = mtbdd_makenode(ddVariableIndices[currentLevel], mtbdd_false, mtbdd_true);
mtbdd_refs_push(thenSuccessor);
LACE_ME;
MTBDD result = mtbdd_ite(currentVar, thenSuccessor, elseSuccessor);
// Dispose of the intermediate results
mtbdd_refs_pop(3);
return result;
}
}
template<typename ValueType>
MTBDD InternalAdd<DdType::Sylvan, ValueType>::getLeaf(double value) {
return mtbdd_double(value);
}
template<typename ValueType>
MTBDD InternalAdd<DdType::Sylvan, ValueType>::getLeaf(uint_fast64_t value) {
return mtbdd_uint64(value);
}
template<typename ValueType>
ValueType InternalAdd<DdType::Sylvan, ValueType>::getValue(MTBDD const& node) {
STORM_LOG_ASSERT(mtbdd_isleaf(node), "Expected leaf.");
if (std::is_same<ValueType, double>::value) {
STORM_LOG_ASSERT(mtbdd_gettype(node) == 1, "Expected a double value.");
return mtbdd_getuint64(node);
} else if (std::is_same<ValueType, uint_fast64_t>::value) {
STORM_LOG_ASSERT(mtbdd_gettype(node) == 0, "Expected an unsigned value.");
return mtbdd_getdouble(node);
} else {
STORM_LOG_ASSERT(false, "Illegal or unknown type in MTBDD.");
}
}
template<typename ValueType>

124
src/storage/dd/sylvan/InternalSylvanAdd.h

@ -546,10 +546,134 @@ namespace storm {
Odd createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const;
private:
/*!
* Recursively builds the ODD from an ADD.
*
* @param dd The DD for which to build the ODD.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param ddVariableIndices The (sorted) indices of all DD variables that need to be considered.
* @param uniqueTableForLevels A vector of unique tables, one for each level to be considered, that keeps
* ODD nodes for the same DD and level unique.
* @return A pointer to the constructed ODD for the given arguments.
*/
static std::shared_ptr<Odd> createOddRec(BDD dd, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<BDD, std::shared_ptr<Odd>>>& uniqueTableForLevels);
/*!
* Performs a recursive step to perform the given function between the given DD-based vector and the given
* explicit vector.
*
* @param dd The DD to add to the explicit vector.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param currentOffset The current offset.
* @param odd The ODD used for the translation.
* @param ddVariableIndices The (sorted) indices of all DD variables that need to be considered.
* @param targetVector The vector to which the translated DD-based vector is to be added.
*/
void composeWithExplicitVectorRec(MTBDD dd, std::vector<uint_fast64_t> const* offsets, uint_fast64_t currentLevel, uint_fast64_t maxLevel, uint_fast64_t currentOffset, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType>& targetVector, std::function<ValueType (ValueType const&, ValueType const&)> const& function) const;
/*!
* Splits the given matrix DD into the groups using the given group variables.
*
* @param dd The DD to split.
* @param negated A flag indicating whether the given DD is to be interpreted as negated.
* @param groups A vector that is to be filled with the DDs for the individual groups.
* @param ddGroupVariableIndices The (sorted) indices of all DD group variables that need to be considered.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param remainingMetaVariables The meta variables that remain in the DDs after the groups have been split.
*/
void splitIntoGroupsRec(MTBDD dd, bool negated, std::vector<InternalAdd<DdType::Sylvan, ValueType>>& groups, std::vector<uint_fast64_t> const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const;
/*!
* Splits the given DDs into the groups using the given group variables.
*
* @param dd1 The first DD to split.
* @param negated1 A flag indicating whether the first DD is to be interpreted as negated.
* @param dd2 The second DD to split.
* @param negated2 A flag indicating whether the second DD is to be interpreted as negated.
* @param groups A vector that is to be filled with the DDs for the individual groups.
* @param ddGroupVariableIndices The (sorted) indices of all DD group variables that need to be considered.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param remainingMetaVariables The meta variables that remain in the DDs after the groups have been split.
*/
void splitIntoGroupsRec(MTBDD dd1, bool negated1, MTBDD dd2, bool negated2, std::vector<std::pair<InternalAdd<DdType::Sylvan, ValueType>, InternalAdd<DdType::Sylvan, ValueType>>>& groups, std::vector<uint_fast64_t> const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const;
/*!
* Builds an ADD representing the given vector.
*
* @param currentOffset The current offset in the vector.
* @param currentLevel The current level in the DD.
* @param maxLevel The maximal level in the DD.
* @param values The vector that is to be represented by the ADD.
* @param odd The ODD used for the translation.
* @param ddVariableIndices The (sorted) list of DD variable indices to use.
* @return The resulting (CUDD) ADD node.
*/
static MTBDD fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices);
/*!
* Helper function to convert the DD into a (sparse) matrix.
*
* @param dd The DD to convert.
* @param negated A flag indicating whether the given DD is to be interpreted as negated.
* @param rowIndications A vector indicating at which position in the columnsAndValues vector the entries
* of row i start. Note: this vector is modified in the computation. More concretely, each entry i in the
* vector will be increased by the number of entries in the row. This can be used to count the number
* of entries in each row. If the values are not to be modified, a copy needs to be provided or the entries
* need to be restored afterwards.
* @param columnsAndValues The vector that will hold the columns and values of non-zero entries upon successful
* completion.
* @param rowGroupOffsets The row offsets at which a given row group starts.
* @param rowOdd The ODD used for the row translation.
* @param columnOdd The ODD used for the column translation.
* @param currentRowLevel The currently considered row level in the DD.
* @param currentColumnLevel The currently considered row level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param currentRowOffset The current row offset.
* @param currentColumnOffset The current row offset.
* @param ddRowVariableIndices The (sorted) indices of all DD row variables that need to be considered.
* @param ddColumnVariableIndices The (sorted) indices of all DD row variables that need to be considered.
* @param generateValues If set to true, the vector columnsAndValues is filled with the actual entries, which
* only works if the offsets given in rowIndications are already correct. If they need to be computed first,
* this flag needs to be false.
*/
void toMatrixComponentsRec(MTBDD dd, bool negated, std::vector<uint_fast64_t> const& rowGroupOffsets, std::vector<uint_fast64_t>& rowIndications, std::vector<storm::storage::MatrixEntry<uint_fast64_t, ValueType>>& columnsAndValues, Odd const& rowOdd, Odd const& columnOdd, uint_fast64_t currentRowLevel, uint_fast64_t currentColumnLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, uint_fast64_t currentColumnOffset, std::vector<uint_fast64_t> const& ddRowVariableIndices, std::vector<uint_fast64_t> const& ddColumnVariableIndices, bool writeValues) const;
/*!
* Retrieves the sylvan representation of the given double value.
*
* @return The sylvan node for the given value.
*/
static MTBDD getLeaf(double value);
/*!
* Retrieves the sylvan representation of the given unsigned value.
*
* @return The sylvan node for the given value.
*/
static MTBDD getLeaf(uint_fast64_t value);
/*!
* Retrieves the value of the given node (that must be a leaf).
*
* @return The value of the leaf.
*/
static ValueType getValue(MTBDD const& node);
/*!
* Retrieves the underlying sylvan MTBDD.
*
* @return The sylvan MTBDD.
*/
sylvan::Mtbdd getSylvanMtbdd() const;
// The manager responsible for this MTBDD.
InternalDdManager<DdType::Sylvan> const* ddManager;
// The underlying sylvan MTBDD.
sylvan::Mtbdd sylvanMtbdd;
};
}

178
src/storage/dd/sylvan/InternalSylvanBdd.cpp

@ -1,14 +1,13 @@
#include "src/storage/dd/sylvan/InternalSylvanBdd.h"
#include <boost/functional/hash.hpp>
#include "src/storage/dd/sylvan/InternalSylvanDdManager.h"
#include "src/storage/dd/sylvan/InternalSylvanAdd.h"
#include "src/storage/dd/sylvan/SylvanAddIterator.h"
#include "src/storage/BitVector.h"
#include "src/utility/macros.h"
#include "src/exceptions/NotImplementedException.h"
#include <iostream>
namespace storm {
@ -31,17 +30,17 @@ namespace storm {
// need to copy the next value of the vector iff the then-offset is greater than zero.
if (odd.getThenOffset() > 0) {
if (filter(values[currentOffset++])) {
return mtbdd_true;
return sylvan_true;
} else {
return mtbdd_false;
return sylvan_false;
}
} else {
return mtbdd_false;
return sylvan_false;
}
} else {
// If the total offset is zero, we can just return the constant zero DD.
if (odd.getThenOffset() + odd.getElseOffset() == 0) {
return mtbdd_false;
return sylvan_false;
}
// Determine the new else-successor.
@ -49,35 +48,30 @@ namespace storm {
if (odd.getElseOffset() > 0) {
elseSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getElseSuccessor(), ddVariableIndices, filter);
} else {
elseSuccessor = mtbdd_false;
elseSuccessor = sylvan_false;
}
sylvan_ref(elseSuccessor);
bdd_refs_push(elseSuccessor);
// Determine the new then-successor.
BDD thenSuccessor;
if (odd.getThenOffset() > 0) {
thenSuccessor = fromVectorRec(currentOffset, currentLevel + 1, maxLevel, values, odd.getThenSuccessor(), ddVariableIndices, filter);
} else {
thenSuccessor = mtbdd_false;
thenSuccessor = sylvan_false;
}
sylvan_ref(thenSuccessor);
bdd_refs_push(thenSuccessor);
// Create a node representing ITE(currentVar, thenSuccessor, elseSuccessor);
BDD result = sylvan_ithvar(static_cast<BDDVAR>(ddVariableIndices[currentLevel]));
sylvan_ref(result);
BDD currentVar = sylvan_ithvar(static_cast<BDDVAR>(ddVariableIndices[currentLevel]));
bdd_refs_push(currentVar);
LACE_ME;
BDD newResult = sylvan_ite(result, thenSuccessor, elseSuccessor);
sylvan_ref(newResult);
BDD result = sylvan_ite(currentVar, thenSuccessor, elseSuccessor);
// Dispose of the intermediate results
sylvan_deref(result);
sylvan_deref(thenSuccessor);
sylvan_deref(elseSuccessor);
// Dispose of the intermediate results.
bdd_refs_pop(3);
// Before returning, we remove the protection imposed by the previous call to sylvan_ref.
sylvan_deref(newResult);
return newResult;
return result;
}
}
@ -237,16 +231,148 @@ namespace storm {
}
storm::storage::BitVector InternalBdd<DdType::Sylvan>::toVector(storm::dd::Odd const& rowOdd, std::vector<uint_fast64_t> const& ddVariableIndices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
storm::storage::BitVector result(rowOdd.getTotalOffset());
this->toVectorRec(bdd_regular(this->getSylvanBdd().GetBDD()), result, rowOdd, bdd_isnegated(this->getSylvanBdd().GetBDD()), 0, ddVariableIndices.size(), 0, ddVariableIndices);
return result;
}
void InternalBdd<DdType::Sylvan>::toVectorRec(BDD dd, storm::storage::BitVector& result, Odd const& rowOdd, bool complement, uint_fast64_t currentRowLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, std::vector<uint_fast64_t> const& ddRowVariableIndices) const {
// If there are no more values to select, we can directly return.
if (dd == sylvan_false && !complement) {
return;
} else if (dd == sylvan_true && complement) {
return;
}
// If we are at the maximal level, the value to be set is stored as a constant in the DD.
if (currentRowLevel == maxLevel) {
result.set(currentRowOffset, true);
} else if (ddRowVariableIndices[currentRowLevel] < sylvan_var(dd)) {
toVectorRec(dd, result, rowOdd.getElseSuccessor(), complement, currentRowLevel + 1, maxLevel, currentRowOffset, ddRowVariableIndices);
toVectorRec(dd, result, rowOdd.getThenSuccessor(), complement, currentRowLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), ddRowVariableIndices);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
BDD elseDdNode = sylvan_low(dd);
BDD thenDdNode = sylvan_high(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = bdd_isnegated(elseDdNode) ^ complement;
bool thenComplemented = bdd_isnegated(thenDdNode) ^ complement;
toVectorRec(bdd_regular(elseDdNode), result, rowOdd.getElseSuccessor(), elseComplemented, currentRowLevel + 1, maxLevel, currentRowOffset, ddRowVariableIndices);
toVectorRec(bdd_regular(elseDdNode), result, rowOdd.getThenSuccessor(), thenComplemented, currentRowLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), ddRowVariableIndices);
}
}
Odd InternalBdd<DdType::Sylvan>::createOdd(std::vector<uint_fast64_t> const& ddVariableIndices) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
// Prepare a unique table for each level that keeps the constructed ODD nodes unique.
std::vector<std::unordered_map<std::pair<BDD, bool>, std::shared_ptr<Odd>, HashFunctor>> uniqueTableForLevels(ddVariableIndices.size() + 1);
// Now construct the ODD structure from the BDD.
std::shared_ptr<Odd> rootOdd = createOddRec(bdd_regular(this->getSylvanBdd().GetBDD()), 0, bdd_isnegated(this->getSylvanBdd().GetBDD()), ddVariableIndices.size(), ddVariableIndices, uniqueTableForLevels);
// Return a copy of the root node to remove the shared_ptr encapsulation.
return Odd(*rootOdd);
}
std::size_t InternalBdd<DdType::Sylvan>::HashFunctor::operator()(std::pair<BDD, bool> const& key) const {
std::size_t result = 0;
boost::hash_combine(result, key.first);
boost::hash_combine(result, key.second);
return result;
}
std::shared_ptr<Odd> InternalBdd<DdType::Sylvan>::createOddRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<std::pair<BDD, bool>, std::shared_ptr<Odd>, HashFunctor>>& uniqueTableForLevels) {
// Check whether the ODD for this node has already been computed (for this level) and if so, return this instead.
auto const& iterator = uniqueTableForLevels[currentLevel].find(std::make_pair(dd, complement));
if (iterator != uniqueTableForLevels[currentLevel].end()) {
return iterator->second;
} else {
// Otherwise, we need to recursively compute the ODD.
// If we are already at the maximal level that is to be considered, we can simply create an Odd without
// successors.
if (currentLevel == maxLevel) {
uint_fast64_t elseOffset = 0;
uint_fast64_t thenOffset = 0;
// If the DD is not the zero leaf, then the then-offset is 1.
if (dd != mtbdd_false) {
thenOffset = 1;
}
// If we need to complement the 'terminal' node, we need to negate its offset.
if (complement) {
thenOffset = 1 - thenOffset;
}
return std::make_shared<Odd>(nullptr, elseOffset, nullptr, thenOffset);
} else if (bdd_isterminal(dd) || ddVariableIndices[currentLevel] < sylvan_var(dd)) {
// If we skipped the level in the DD, we compute the ODD just for the else-successor and use the same
// node for the then-successor as well.
std::shared_ptr<Odd> elseNode = createOddRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, uniqueTableForLevels);
std::shared_ptr<Odd> thenNode = elseNode;
uint_fast64_t totalOffset = elseNode->getElseOffset() + elseNode->getThenOffset();
return std::make_shared<Odd>(elseNode, totalOffset, thenNode, totalOffset);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
BDD thenDdNode = sylvan_high(dd);
BDD elseDdNode = sylvan_low(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = bdd_isnegated(elseDdNode) ^ complement;
bool thenComplemented = bdd_isnegated(thenDdNode) ^ complement;
std::shared_ptr<Odd> elseNode = createOddRec(bdd_regular(elseDdNode), currentLevel + 1, elseComplemented, maxLevel, ddVariableIndices, uniqueTableForLevels);
std::shared_ptr<Odd> thenNode = createOddRec(bdd_regular(thenDdNode), currentLevel + 1, thenComplemented, maxLevel, ddVariableIndices, uniqueTableForLevels);
return std::make_shared<Odd>(elseNode, elseNode->getElseOffset() + elseNode->getThenOffset(), thenNode, thenNode->getElseOffset() + thenNode->getThenOffset());
}
}
}
template<typename ValueType>
void InternalBdd<DdType::Sylvan>::filterExplicitVector(Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<ValueType> const& sourceValues, std::vector<ValueType>& targetValues) const {
STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Not yet implemented.");
uint_fast64_t currentIndex = 0;
filterExplicitVectorRec(bdd_regular(this->getSylvanBdd().GetBDD()), 0, bdd_isnegated(this->getSylvanBdd().GetBDD()), ddVariableIndices.size(), ddVariableIndices, 0, odd, targetValues, currentIndex, sourceValues);
}
template<typename ValueType>
void InternalBdd<DdType::Sylvan>::filterExplicitVectorRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, std::vector<ValueType>& result, uint_fast64_t& currentIndex, std::vector<ValueType> const& values) {
// If there are no more values to select, we can directly return.
if (dd == sylvan_false && !complement) {
return;
} else if (dd == sylvan_true && complement) {
return;
}
if (currentLevel == maxLevel) {
// If the DD is not the zero leaf, then the then-offset is 1.
bool selected = false;
if (dd != sylvan_false) {
selected = !complement;
}
if (selected) {
result[currentIndex++] = values[currentOffset];
}
} else if (ddVariableIndices[currentLevel] < sylvan_var(dd)) {
// If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set
// and for the one in which it is not set.
filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(dd, currentLevel + 1, complement, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
} else {
// Otherwise, we compute the ODDs for both the then- and else successors.
BDD thenDdNode = sylvan_high(dd);
BDD elseDdNode = sylvan_low(dd);
// Determine whether we have to evaluate the successors as if they were complemented.
bool elseComplemented = bdd_isnegated(elseDdNode) ^ complement;
bool thenComplemented = bdd_isnegated(thenDdNode) ^ complement;
filterExplicitVectorRec(bdd_regular(elseDdNode), currentLevel + 1, elseComplemented, maxLevel, ddVariableIndices, currentOffset, odd.getElseSuccessor(), result, currentIndex, values);
filterExplicitVectorRec(bdd_regular(thenDdNode), currentLevel + 1, thenComplemented, maxLevel, ddVariableIndices, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), result, currentIndex, values);
}
}
template InternalBdd<DdType::Sylvan> InternalBdd<DdType::Sylvan>::fromVector(InternalDdManager<DdType::Sylvan> const* ddManager, std::vector<double> const& values, Odd const& odd, std::vector<uint_fast64_t> const& sortedDdVariableIndices, std::function<bool (double const&)> const& filter);

64
src/storage/dd/sylvan/InternalSylvanBdd.h

@ -2,6 +2,7 @@
#define STORM_STORAGE_DD_SYLVAN_INTERNALSYLVANBDD_H_
#include <vector>
#include <unordered_map>
#include <functional>
#include "src/storage/dd/DdType.h"
@ -341,11 +342,74 @@ namespace storm {
template<typename ValueType>
static BDD fromVectorRec(uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector<ValueType> const& values, Odd const& odd, std::vector<uint_fast64_t> const& ddVariableIndices, std::function<bool (ValueType const&)> const& filter);
// Declare a hash functor that is used for the unique tables in the construction process of ODDs.
class HashFunctor {
public:
std::size_t operator()(std::pair<BDD, bool> const& key) const;
};
/*!
* Recursively builds the ODD from a BDD (that potentially has complement edges).
*
* @param dd The BDD for which to build the ODD.
* @param currentLevel The currently considered level in the DD.
* @param complement A flag indicating whether or not the given node is to be considered as complemented.
* @param maxLevel The number of levels that need to be considered.
* @param ddVariableIndices The (sorted) indices of all DD variables that need to be considered.
* @param uniqueTableForLevels A vector of unique tables, one for each level to be considered, that keeps
* ODD nodes for the same DD and level unique.
* @return A pointer to the constructed ODD for the given arguments.
*/
static std::shared_ptr<Odd> createOddRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, std::vector<std::unordered_map<std::pair<BDD, bool>, std::shared_ptr<Odd>, HashFunctor>>& uniqueTableForLevels);
/*!
* Helper function to convert the DD into a bit vector.
*
* @param dd The DD to convert.
* @param result The vector that will hold the values upon successful completion.
* @param rowOdd The ODD used for the row translation.
* @param complement A flag indicating whether the result is to be interpreted as a complement.
* @param currentRowLevel The currently considered row level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param currentRowOffset The current row offset.
* @param ddRowVariableIndices The (sorted) indices of all DD row variables that need to be considered.
*/
void toVectorRec(BDD dd, storm::storage::BitVector& result, Odd const& rowOdd, bool complement, uint_fast64_t currentRowLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, std::vector<uint_fast64_t> const& ddRowVariableIndices) const;
/*!
* Adds the selected values the target vector.
*
* @param dd The current node of the DD representing the selected values.
* @param currentLevel The currently considered level in the DD.
* @param maxLevel The number of levels that need to be considered.
* @param ddVariableIndices The sorted list of variable indices to use.
* @param currentOffset The offset along the path taken in the DD representing the selected values.
* @param odd The current ODD node.
* @param result The target vector to which to write the values.
* @param currentIndex The index at which the next element is to be written.
* @param values The value vector from which to select the values.
*/
template<typename ValueType>
static void filterExplicitVectorRec(BDD dd, uint_fast64_t currentLevel, bool complement, uint_fast64_t maxLevel, std::vector<uint_fast64_t> const& ddVariableIndices, uint_fast64_t currentOffset, storm::dd::Odd const& odd, std::vector<ValueType>& result, uint_fast64_t& currentIndex, std::vector<ValueType> const& values);
/*!
* Retrieves the sylvan BDD.
*
* @return The sylvan BDD.
*/
sylvan::Bdd& getSylvanBdd();
/*!
* Retrieves the sylvan BDD.
*
* @return The sylvan BDD.
*/
sylvan::Bdd const& getSylvanBdd() const;
// The internal manager responsible for this BDD.
InternalDdManager<DdType::Sylvan> const* ddManager;
// The sylvan BDD.
sylvan::Bdd sylvanBdd;
};
}

7
test/functional/storage/CuddDdTest.cpp

@ -368,10 +368,11 @@ TEST(CuddDd, BddOddTest) {
std::pair<storm::expressions::Variable, storm::expressions::Variable> x = manager->addMetaVariable("x", 1, 9);
storm::dd::Add<storm::dd::DdType::CUDD, double> dd = manager->template getIdentity<double>(x.first);
storm::dd::Bdd<storm::dd::DdType::CUDD> bdd = dd.notZero();
storm::dd::Odd odd;
ASSERT_NO_THROW(odd = dd.createOdd());
ASSERT_NO_THROW(odd = bdd.createOdd());
EXPECT_EQ(9ul, odd.getTotalOffset());
EXPECT_EQ(12ul, odd.getNodeCount());
EXPECT_EQ(5ul, odd.getNodeCount());
std::vector<double> ddAsVector;
ASSERT_NO_THROW(ddAsVector = dd.toVector());
@ -406,4 +407,4 @@ TEST(CuddDd, BddOddTest) {
EXPECT_EQ(9ul, matrix.getRowGroupCount());
EXPECT_EQ(9ul, matrix.getColumnCount());
EXPECT_EQ(106ul, matrix.getNonzeroEntryCount());
}
}

133
test/functional/storage/SylvanDdTest.cpp

@ -320,25 +320,25 @@ TEST(SylvanDd, GetSetValueTest) {
// }
// EXPECT_EQ(1ul, numberOfValuations);
//}
//
//TEST(SylvanDd, AddOddTest) {
// std::shared_ptr<storm::dd::DdManager<storm::dd::DdType::Sylvan>> manager(new storm::dd::DdManager<storm::dd::DdType::Sylvan>());
// std::pair<storm::expressions::Variable, storm::expressions::Variable> a = manager->addMetaVariable("a");
// std::pair<storm::expressions::Variable, storm::expressions::Variable> x = manager->addMetaVariable("x", 1, 9);
//
// storm::dd::Add<storm::dd::DdType::Sylvan, double> dd = manager->template getIdentity<double>(x.first);
// storm::dd::Odd odd;
// ASSERT_NO_THROW(odd = dd.createOdd());
// EXPECT_EQ(9ul, odd.getTotalOffset());
// EXPECT_EQ(12ul, odd.getNodeCount());
//
// std::vector<double> ddAsVector;
// ASSERT_NO_THROW(ddAsVector = dd.toVector());
// EXPECT_EQ(9ul, ddAsVector.size());
// for (uint_fast64_t i = 0; i < ddAsVector.size(); ++i) {
// EXPECT_TRUE(i+1 == ddAsVector[i]);
// }
//
TEST(SylvanDd, AddOddTest) {
std::shared_ptr<storm::dd::DdManager<storm::dd::DdType::Sylvan>> manager(new storm::dd::DdManager<storm::dd::DdType::Sylvan>());
std::pair<storm::expressions::Variable, storm::expressions::Variable> a = manager->addMetaVariable("a");
std::pair<storm::expressions::Variable, storm::expressions::Variable> x = manager->addMetaVariable("x", 1, 9);
storm::dd::Add<storm::dd::DdType::Sylvan, double> dd = manager->template getIdentity<double>(x.first);
storm::dd::Odd odd;
ASSERT_NO_THROW(odd = dd.createOdd());
EXPECT_EQ(9ul, odd.getTotalOffset());
EXPECT_EQ(12ul, odd.getNodeCount());
std::vector<double> ddAsVector;
ASSERT_NO_THROW(ddAsVector = dd.toVector());
EXPECT_EQ(9ul, ddAsVector.size());
for (uint_fast64_t i = 0; i < ddAsVector.size(); ++i) {
EXPECT_TRUE(i+1 == ddAsVector[i]);
}
// // Create a non-trivial matrix.
// dd = manager->template getIdentity<double>(x.first).equals(manager->template getIdentity<double>(x.second)) * manager->getRange(x.first).template toAdd<double>();
// dd += manager->getEncoding(x.first, 1).template toAdd<double>() * manager->getRange(x.second).template toAdd<double>() + manager->getEncoding(x.second, 1).template toAdd<double>() * manager->getRange(x.first).template toAdd<double>();
@ -363,50 +363,51 @@ TEST(SylvanDd, GetSetValueTest) {
// EXPECT_EQ(9ul, matrix.getRowGroupCount());
// EXPECT_EQ(9ul, matrix.getColumnCount());
// EXPECT_EQ(106ul, matrix.getNonzeroEntryCount());
//}
//
//TEST(SylvanDd, BddOddTest) {
// std::shared_ptr<storm::dd::DdManager<storm::dd::DdType::Sylvan>> manager(new storm::dd::DdManager<storm::dd::DdType::Sylvan>());
// std::pair<storm::expressions::Variable, storm::expressions::Variable> a = manager->addMetaVariable("a");
// std::pair<storm::expressions::Variable, storm::expressions::Variable> x = manager->addMetaVariable("x", 1, 9);
//
// storm::dd::Add<storm::dd::DdType::Sylvan, double> dd = manager->template getIdentity<double>(x.first);
// storm::dd::Odd odd;
// ASSERT_NO_THROW(odd = dd.createOdd());
// EXPECT_EQ(9ul, odd.getTotalOffset());
// EXPECT_EQ(12ul, odd.getNodeCount());
//
// std::vector<double> ddAsVector;
// ASSERT_NO_THROW(ddAsVector = dd.toVector());
// EXPECT_EQ(9ul, ddAsVector.size());
// for (uint_fast64_t i = 0; i < ddAsVector.size(); ++i) {
// EXPECT_TRUE(i+1 == ddAsVector[i]);
// }
//
// storm::dd::Add<storm::dd::DdType::Sylvan, double> vectorAdd = storm::dd::Add<storm::dd::DdType::Sylvan, double>::fromVector(manager, ddAsVector, odd, {x.first});
//
// // Create a non-trivial matrix.
// dd = manager->template getIdentity<double>(x.first).equals(manager->template getIdentity<double>(x.second)) * manager->getRange(x.first).template toAdd<double>();
// dd += manager->getEncoding(x.first, 1).template toAdd<double>() * manager->getRange(x.second).template toAdd<double>() + manager->getEncoding(x.second, 1).template toAdd<double>() * manager->getRange(x.first).template toAdd<double>();
//
// // Create the ODDs.
// storm::dd::Odd rowOdd;
// ASSERT_NO_THROW(rowOdd = manager->getRange(x.first).createOdd());
// storm::dd::Odd columnOdd;
// ASSERT_NO_THROW(columnOdd = manager->getRange(x.second).createOdd());
//
// // Try to translate the matrix.
// storm::storage::SparseMatrix<double> matrix;
// ASSERT_NO_THROW(matrix = dd.toMatrix({x.first}, {x.second}, rowOdd, columnOdd));
//
// EXPECT_EQ(9ul, matrix.getRowCount());
// EXPECT_EQ(9ul, matrix.getColumnCount());
// EXPECT_EQ(25ul, matrix.getNonzeroEntryCount());
//
// dd = manager->getRange(x.first).template toAdd<double>() * manager->getRange(x.second).template toAdd<double>() * manager->getEncoding(a.first, 0).template toAdd<double>().ite(dd, dd + manager->template getConstant<double>(1));
// ASSERT_NO_THROW(matrix = dd.toMatrix({a.first}, rowOdd, columnOdd));
// EXPECT_EQ(18ul, matrix.getRowCount());
// EXPECT_EQ(9ul, matrix.getRowGroupCount());
// EXPECT_EQ(9ul, matrix.getColumnCount());
// EXPECT_EQ(106ul, matrix.getNonzeroEntryCount());
//}
}
TEST(SylvanDd, BddOddTest) {
std::shared_ptr<storm::dd::DdManager<storm::dd::DdType::Sylvan>> manager(new storm::dd::DdManager<storm::dd::DdType::Sylvan>());
std::pair<storm::expressions::Variable, storm::expressions::Variable> a = manager->addMetaVariable("a");
std::pair<storm::expressions::Variable, storm::expressions::Variable> x = manager->addMetaVariable("x", 1, 9);
storm::dd::Add<storm::dd::DdType::Sylvan, double> dd = manager->template getIdentity<double>(x.first);
storm::dd::Bdd<storm::dd::DdType::Sylvan> bdd = dd.notZero();
storm::dd::Odd odd;
ASSERT_NO_THROW(odd = bdd.createOdd());
EXPECT_EQ(9ul, odd.getTotalOffset());
EXPECT_EQ(5ul, odd.getNodeCount());
std::vector<double> ddAsVector;
ASSERT_NO_THROW(ddAsVector = dd.toVector());
EXPECT_EQ(9ul, ddAsVector.size());
for (uint_fast64_t i = 0; i < ddAsVector.size(); ++i) {
EXPECT_TRUE(i+1 == ddAsVector[i]);
}
storm::dd::Add<storm::dd::DdType::Sylvan, double> vectorAdd = storm::dd::Add<storm::dd::DdType::Sylvan, double>::fromVector(manager, ddAsVector, odd, {x.first});
// Create a non-trivial matrix.
dd = manager->template getIdentity<double>(x.first).equals(manager->template getIdentity<double>(x.second)) * manager->getRange(x.first).template toAdd<double>();
dd += manager->getEncoding(x.first, 1).template toAdd<double>() * manager->getRange(x.second).template toAdd<double>() + manager->getEncoding(x.second, 1).template toAdd<double>() * manager->getRange(x.first).template toAdd<double>();
// Create the ODDs.
storm::dd::Odd rowOdd;
ASSERT_NO_THROW(rowOdd = manager->getRange(x.first).createOdd());
storm::dd::Odd columnOdd;
ASSERT_NO_THROW(columnOdd = manager->getRange(x.second).createOdd());
// Try to translate the matrix.
storm::storage::SparseMatrix<double> matrix;
ASSERT_NO_THROW(matrix = dd.toMatrix({x.first}, {x.second}, rowOdd, columnOdd));
EXPECT_EQ(9ul, matrix.getRowCount());
EXPECT_EQ(9ul, matrix.getColumnCount());
EXPECT_EQ(25ul, matrix.getNonzeroEntryCount());
dd = manager->getRange(x.first).template toAdd<double>() * manager->getRange(x.second).template toAdd<double>() * manager->getEncoding(a.first, 0).template toAdd<double>().ite(dd, dd + manager->template getConstant<double>(1));
ASSERT_NO_THROW(matrix = dd.toMatrix({a.first}, rowOdd, columnOdd));
EXPECT_EQ(18ul, matrix.getRowCount());
EXPECT_EQ(9ul, matrix.getRowGroupCount());
EXPECT_EQ(9ul, matrix.getColumnCount());
EXPECT_EQ(106ul, matrix.getNonzeroEntryCount());
}
Loading…
Cancel
Save