#include "src/storage/dd/cudd/InternalCuddAdd.h" #include "src/storage/dd/cudd/CuddOdd.h" #include "src/storage/dd/cudd/InternalCuddDdManager.h" #include "src/storage/dd/cudd/InternalCuddBdd.h" #include "src/storage/SparseMatrix.h" #include "src/utility/constants.h" namespace storm { namespace dd { template bool InternalAdd::operator==(InternalAdd const& other) const { return this->getCuddAdd() == other.getCuddAdd(); } template bool InternalAdd::operator!=(InternalAdd const& other) const { return !(*this == other); } template InternalAdd InternalAdd::ite(InternalAdd const& thenDd, InternalAdd const& elseDd) const { return InternalAdd(ddManager, this->getCuddAdd().Ite(thenDd.getCuddAdd(), elseDd.getCuddAdd())); } template InternalAdd InternalAdd::operator!() const { return InternalAdd(ddManager, ~this->getCuddAdd()); } template InternalAdd InternalAdd::operator||(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd() | other.getCuddAdd()); } template InternalAdd& InternalAdd::operator|=(InternalAdd const& other) { this->cuddAdd = this->getCuddAdd() | other.getCuddAdd(); return *this; } template InternalAdd InternalAdd::operator+(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd() + other.getCuddAdd()); } template InternalAdd& InternalAdd::operator+=(InternalAdd const& other) { this->cuddAdd = this->getCuddAdd() + other.getCuddAdd(); return *this; } template InternalAdd InternalAdd::operator*(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd() * other.getCuddAdd()); } template InternalAdd& InternalAdd::operator*=(InternalAdd const& other) { this->cuddAdd = this->getCuddAdd() * other.getCuddAdd(); return *this; } template InternalAdd InternalAdd::operator-(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd() - other.getCuddAdd()); } template InternalAdd& InternalAdd::operator-=(InternalAdd const& other) { this->cuddAdd = this->getCuddAdd() - other.getCuddAdd(); return *this; } template InternalAdd InternalAdd::operator/(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Divide(other.getCuddAdd())); } template InternalAdd& InternalAdd::operator/=(InternalAdd const& other) { this->cuddAdd = this->getCuddAdd().Divide(other.getCuddAdd()); return *this; } template InternalAdd InternalAdd::equals(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Equals(other.getCuddAdd())); } template InternalAdd InternalAdd::notEquals(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().NotEquals(other.getCuddAdd())); } template InternalAdd InternalAdd::less(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().LessThan(other.getCuddAdd())); } template InternalAdd InternalAdd::lessOrEqual(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().LessThanOrEqual(other.getCuddAdd())); } template InternalAdd InternalAdd::greater(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().GreaterThan(other.getCuddAdd())); } template InternalAdd InternalAdd::greaterOrEqual(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().GreaterThanOrEqual(other.getCuddAdd())); } template InternalAdd InternalAdd::pow(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Pow(other.getCuddAdd())); } template InternalAdd InternalAdd::mod(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Mod(other.getCuddAdd())); } template InternalAdd InternalAdd::logxy(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().LogXY(other.getCuddAdd())); } template InternalAdd InternalAdd::floor() const { return InternalAdd(ddManager, this->getCuddAdd().Floor()); } template InternalAdd InternalAdd::ceil() const { return InternalAdd(ddManager, this->getCuddAdd().Ceil()); } template InternalAdd InternalAdd::minimum(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Minimum(other.getCuddAdd())); } template InternalAdd InternalAdd::maximum(InternalAdd const& other) const { return InternalAdd(ddManager, this->getCuddAdd().Maximum(other.getCuddAdd())); } template InternalAdd InternalAdd::sumAbstract(InternalBdd const& cube) const { return InternalAdd(ddManager, this->getCuddAdd().ExistAbstract(cube.toAdd().getCuddAdd())); } template InternalAdd InternalAdd::minAbstract(InternalBdd const& cube) const { return InternalAdd(ddManager, this->getCuddAdd().MinAbstract(cube.toAdd().getCuddAdd())); } template InternalAdd InternalAdd::maxAbstract(InternalBdd const& cube) const { return InternalAdd(ddManager, this->getCuddAdd().MaxAbstract(cube.toAdd().getCuddAdd())); } template bool InternalAdd::equalModuloPrecision(InternalAdd const& other, double precision, bool relative) const { if (relative) { return this->getCuddAdd().EqualSupNormRel(other.getCuddAdd(), precision); } else { return this->getCuddAdd().EqualSupNorm(other.getCuddAdd(), precision); } }; template InternalAdd InternalAdd::swapVariables(std::vector> const& from, std::vector> const& to) const { std::vector fromAdd; std::vector toAdd; for (auto it1 = from.begin(), ite1 = from.end(), it2 = to.begin(); it1 != ite1; ++it1, ++it2) { fromAdd.push_back(it1->getCuddAdd()); toAdd.push_back(it2->getCuddAdd()); } return InternalAdd(ddManager, this->getCuddAdd().SwapVariables(fromAdd, toAdd)); } template InternalAdd InternalAdd::multiplyMatrix(InternalAdd const& otherMatrix, std::vector> const& summationDdVariables) const { // Create the CUDD summation variables. std::vector summationAdds; for (auto const& ddVariable : summationDdVariables) { summationAdds.push_back(ddVariable.getCuddAdd()); } return InternalAdd(ddManager, this->getCuddAdd().MatrixMultiply(otherMatrix.getCuddAdd(), summationAdds)); } template InternalBdd InternalAdd::greater(ValueType const& value) const { return InternalBdd(ddManager, this->getCuddAdd().BddStrictThreshold(value)); } template InternalBdd InternalAdd::greaterOrEqual(ValueType const& value) const { return InternalBdd(ddManager, this->getCuddAdd().BddThreshold(value)); } template InternalBdd InternalAdd::less(ValueType const& value) const { return InternalBdd(ddManager, ~this->getCuddAdd().BddThreshold(value)); } template InternalBdd InternalAdd::lessOrEqual(ValueType const& value) const { return InternalBdd(ddManager, ~this->getCuddAdd().BddStrictThreshold(value)); } template InternalBdd InternalAdd::notZero() const { return this->toBdd(); } template InternalAdd InternalAdd::constrain(InternalAdd const& constraint) const { return InternalAdd(ddManager, this->getCuddAdd().Constrain(constraint.getCuddAdd())); } template InternalAdd InternalAdd::restrict(InternalAdd const& constraint) const { return InternalAdd(ddManager, this->getCuddAdd().Restrict(constraint.getCuddAdd())); } template InternalBdd InternalAdd::getSupport() const { return InternalBdd(ddManager, this->getCuddAdd().Support()); } template uint_fast64_t InternalAdd::getNonZeroCount(uint_fast64_t numberOfDdVariables) const { return static_cast(this->getCuddAdd().CountMinterm(static_cast(numberOfDdVariables))); } template uint_fast64_t InternalAdd::getLeafCount() const { return static_cast(this->getCuddAdd().CountLeaves()); } template uint_fast64_t InternalAdd::getNodeCount() const { return static_cast(this->getCuddAdd().nodeCount()); } template ValueType InternalAdd::getMin() const { ADD constantMinAdd = this->getCuddAdd().FindMin(); return static_cast(Cudd_V(constantMinAdd.getNode())); } template ValueType InternalAdd::getMax() const { ADD constantMaxAdd = this->getCuddAdd().FindMax(); return static_cast(Cudd_V(constantMaxAdd.getNode())); } template InternalBdd InternalAdd::toBdd() const { return InternalBdd(ddManager, this->getCuddAdd().BddPattern()); } template bool InternalAdd::isOne() const { return this->getCuddAdd().IsOne(); } template bool InternalAdd::isZero() const { return this->getCuddAdd().IsZero(); } template bool InternalAdd::isConstant() const { return Cudd_IsConstant(this->getCuddAdd().getNode()); } template uint_fast64_t InternalAdd::getIndex() const { return static_cast(this->getCuddAdd().NodeReadIndex()); } template void InternalAdd::exportToDot(std::string const& filename, std::vector const& ddVariableNamesAsStrings) const { // Build the name input of the DD. std::vector ddNames; std::string ddName("f"); ddNames.push_back(new char[ddName.size() + 1]); std::copy(ddName.c_str(), ddName.c_str() + 2, ddNames.back()); // Now build the variables names. std::vector ddVariableNames; for (auto const& element : ddVariableNamesAsStrings) { ddVariableNames.push_back(new char[element.size() + 1]); std::copy(element.c_str(), element.c_str() + element.size() + 1, ddVariableNames.back()); } // Open the file, dump the DD and close it again. FILE* filePointer = fopen(filename.c_str() , "w"); std::vector cuddAddVector = { this->getCuddAdd() }; ddManager->getCuddManager().DumpDot(cuddAddVector, &ddVariableNames[0], &ddNames[0], filePointer); fclose(filePointer); // Finally, delete the names. for (char* element : ddNames) { delete[] element; } for (char* element : ddVariableNames) { delete[] element; } } template AddIterator InternalAdd::begin(std::shared_ptr const> fullDdManager, std::set const& metaVariables, bool enumerateDontCareMetaVariables) const { int* cube; double value; DdGen* generator = this->getCuddAdd().FirstCube(&cube, &value); return AddIterator(fullDdManager, generator, cube, value, (Cudd_IsGenEmpty(generator) != 0), &metaVariables, enumerateDontCareMetaVariables); } template AddIterator InternalAdd::end(std::shared_ptr const> fullDdManager, bool enumerateDontCareMetaVariables) const { return AddIterator(fullDdManager, nullptr, nullptr, 0, true, nullptr, enumerateDontCareMetaVariables); } template ADD InternalAdd::getCuddAdd() const { return this->cuddAdd; } template DdNode* InternalAdd::getCuddDdNode() const { return this->getCuddAdd().getNode(); } template std::vector InternalAdd::toVector(std::vector const& ddGroupVariableIndices, storm::dd::Odd const& rowOdd, std::vector const& ddRowVariableIndices, std::vector const& groupOffsets) const { // Start by splitting the symbolic vector into groups. std::vector> groups; splitGroupsRec(this->getCuddDdNode(), groups, ddGroupVariableIndices, 0, ddGroupVariableIndices.size()); // Now iterate over the groups and add them to the resulting vector. std::vector result(groupOffsets.back(), storm::utility::zero()); for (uint_fast64_t i = 0; i < groups.size(); ++i) { toVectorRec(groups[i].getCuddDdNode(), result, groupOffsets, rowOdd, 0, ddRowVariableIndices.size(), 0, ddRowVariableIndices); } return result; } template void InternalAdd::addToExplicitVector(storm::dd::Odd const& odd, std::vector const& ddVariableIndices, std::vector& targetVector) const { composeVector(odd, ddVariableIndices, targetVector, std::plus()); } template void InternalAdd::composeVector(storm::dd::Odd const& odd, std::vector const& ddVariableIndices, std::vector& targetVector, std::function const& function) const { composeVectorRec(this->getCuddDdNode(), 0, ddVariableIndices.size(), 0, odd, ddVariableIndices, targetVector, function); } template void InternalAdd::composeVectorRec(DdNode const* dd, uint_fast64_t currentLevel, uint_fast64_t maxLevel, uint_fast64_t currentOffset, Odd const& odd, std::vector const& ddVariableIndices, std::vector& targetVector, std::function const& function) const { // For the empty DD, we do not need to add any entries. if (dd == Cudd_ReadZero(this->getDdManager()->getCuddManager().getManager())) { return; } // If we are at the maximal level, the value to be set is stored as a constant in the DD. if (currentLevel == maxLevel) { targetVector[currentOffset] = function(targetVector[currentOffset], Cudd_V(dd)); } else if (ddVariableIndices[currentLevel] < dd->index) { // If we skipped a level, we need to enumerate the explicit entries for the case in which the bit is set // and for the one in which it is not set. composeVectorRec(dd, currentLevel + 1, maxLevel, currentOffset, odd.getElseSuccessor(), ddVariableIndices, targetVector, function); composeVectorRec(dd, currentLevel + 1, maxLevel, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), ddVariableIndices, targetVector, function); } else { // Otherwise, we simply recursively call the function for both (different) cases. composeVectorRec(Cudd_E(dd), currentLevel + 1, maxLevel, currentOffset, odd.getElseSuccessor(), ddVariableIndices, targetVector, function); composeVectorRec(Cudd_T(dd), currentLevel + 1, maxLevel, currentOffset + odd.getElseOffset(), odd.getThenSuccessor(), ddVariableIndices, targetVector, function); } } template void InternalAdd::toVectorRec(DdNode const* dd, std::vector& result, std::vector const& rowGroupOffsets, Odd const& rowOdd, uint_fast64_t currentRowLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, std::vector const& ddRowVariableIndices) const { // For the empty DD, we do not need to add any entries. if (dd == Cudd_ReadZero(ddManager->getCuddManager().getManager())) { return; } // If we are at the maximal level, the value to be set is stored as a constant in the DD. if (currentRowLevel == maxLevel) { result[rowGroupOffsets[currentRowOffset]] = Cudd_V(dd); } else if (ddRowVariableIndices[currentRowLevel] < dd->index) { toVectorRec(dd, result, rowGroupOffsets, rowOdd.getElseSuccessor(), currentRowLevel + 1, maxLevel, currentRowOffset, ddRowVariableIndices); toVectorRec(dd, result, rowGroupOffsets, rowOdd.getThenSuccessor(), currentRowLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), ddRowVariableIndices); } else { toVectorRec(Cudd_E(dd), result, rowGroupOffsets, rowOdd.getElseSuccessor(), currentRowLevel + 1, maxLevel, currentRowOffset, ddRowVariableIndices); toVectorRec(Cudd_T(dd), result, rowGroupOffsets, rowOdd.getThenSuccessor(), currentRowLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), ddRowVariableIndices); } } template storm::storage::SparseMatrix InternalAdd::toMatrix(uint_fast64_t numberOfDdVariables, storm::dd::Odd const& rowOdd, std::vector const& ddRowVariableIndices, storm::dd::Odd const& columnOdd, std::vector const& ddColumnVariableIndices) const { // Prepare the vectors that represent the matrix. std::vector rowIndications(rowOdd.getTotalOffset() + 1); std::vector> columnsAndValues(this->getNonZeroCount(numberOfDdVariables)); // Create a trivial row grouping. std::vector trivialRowGroupIndices(rowIndications.size()); uint_fast64_t i = 0; for (auto& entry : trivialRowGroupIndices) { entry = i; ++i; } // Use the toMatrixRec function to compute the number of elements in each row. Using the flag, we prevent // it from actually generating the entries in the entry vector. toMatrixRec(this->getCuddDdNode(), rowIndications, columnsAndValues, trivialRowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, false); // TODO: counting might be faster by just summing over the primed variables and then using the ODD to convert // the resulting (DD) vector to an explicit vector. // Now that we computed the number of entries in each row, compute the corresponding offsets in the entry vector. uint_fast64_t tmp = 0; uint_fast64_t tmp2 = 0; for (uint_fast64_t i = 1; i < rowIndications.size(); ++i) { tmp2 = rowIndications[i]; rowIndications[i] = rowIndications[i - 1] + tmp; std::swap(tmp, tmp2); } rowIndications[0] = 0; // Now actually fill the entry vector. toMatrixRec(this->getCuddDdNode(), rowIndications, columnsAndValues, trivialRowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, true); // Since the last call to toMatrixRec modified the rowIndications, we need to restore the correct values. for (uint_fast64_t i = rowIndications.size() - 1; i > 0; --i) { rowIndications[i] = rowIndications[i - 1]; } rowIndications[0] = 0; // Construct matrix and return result. return storm::storage::SparseMatrix(columnOdd.getTotalOffset(), std::move(rowIndications), std::move(columnsAndValues), std::move(trivialRowGroupIndices), false); } template void InternalAdd::toMatrixRec(DdNode const* dd, std::vector& rowIndications, std::vector>& columnsAndValues, std::vector const& rowGroupOffsets, Odd const& rowOdd, Odd const& columnOdd, uint_fast64_t currentRowLevel, uint_fast64_t currentColumnLevel, uint_fast64_t maxLevel, uint_fast64_t currentRowOffset, uint_fast64_t currentColumnOffset, std::vector const& ddRowVariableIndices, std::vector const& ddColumnVariableIndices, bool generateValues) const { // For the empty DD, we do not need to add any entries. if (dd == Cudd_ReadZero(ddManager->getCuddManager().getManager())) { return; } // If we are at the maximal level, the value to be set is stored as a constant in the DD. if (currentRowLevel + currentColumnLevel == maxLevel) { if (generateValues) { columnsAndValues[rowIndications[rowGroupOffsets[currentRowOffset]]] = storm::storage::MatrixEntry(currentColumnOffset, Cudd_V(dd)); } ++rowIndications[rowGroupOffsets[currentRowOffset]]; } else { DdNode const* elseElse; DdNode const* elseThen; DdNode const* thenElse; DdNode const* thenThen; if (ddColumnVariableIndices[currentColumnLevel] < dd->index) { elseElse = elseThen = thenElse = thenThen = dd; } else if (ddRowVariableIndices[currentColumnLevel] < dd->index) { elseElse = thenElse = Cudd_E(dd); elseThen = thenThen = Cudd_T(dd); } else { DdNode const* elseNode = Cudd_E(dd); if (ddColumnVariableIndices[currentColumnLevel] < elseNode->index) { elseElse = elseThen = elseNode; } else { elseElse = Cudd_E(elseNode); elseThen = Cudd_T(elseNode); } DdNode const* thenNode = Cudd_T(dd); if (ddColumnVariableIndices[currentColumnLevel] < thenNode->index) { thenElse = thenThen = thenNode; } else { thenElse = Cudd_E(thenNode); thenThen = Cudd_T(thenNode); } } // Visit else-else. toMatrixRec(elseElse, rowIndications, columnsAndValues, rowGroupOffsets, rowOdd.getElseSuccessor(), columnOdd.getElseSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset, currentColumnOffset, ddRowVariableIndices, ddColumnVariableIndices, generateValues); // Visit else-then. toMatrixRec(elseThen, rowIndications, columnsAndValues, rowGroupOffsets, rowOdd.getElseSuccessor(), columnOdd.getThenSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset, currentColumnOffset + columnOdd.getElseOffset(), ddRowVariableIndices, ddColumnVariableIndices, generateValues); // Visit then-else. toMatrixRec(thenElse, rowIndications, columnsAndValues, rowGroupOffsets, rowOdd.getThenSuccessor(), columnOdd.getElseSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), currentColumnOffset, ddRowVariableIndices, ddColumnVariableIndices, generateValues); // Visit then-then. toMatrixRec(thenThen, rowIndications, columnsAndValues, rowGroupOffsets, rowOdd.getThenSuccessor(), columnOdd.getThenSuccessor(), currentRowLevel + 1, currentColumnLevel + 1, maxLevel, currentRowOffset + rowOdd.getElseOffset(), currentColumnOffset + columnOdd.getElseOffset(), ddRowVariableIndices, ddColumnVariableIndices, generateValues); } } template storm::storage::SparseMatrix InternalAdd::toMatrix(std::vector const& ddGroupVariableIndices, InternalBdd const& groupVariableCube, storm::dd::Odd const& rowOdd, std::vector const& ddRowVariableIndices, storm::dd::Odd const& columnOdd, std::vector const& ddColumnVariableIndices, InternalBdd const& columnVariableCube) const { // Start by computing the offsets (in terms of rows) for each row group. InternalAdd stateToNumberOfChoices = this->notZero().existsAbstract(columnVariableCube).template toAdd().sumAbstract(groupVariableCube); std::vector rowGroupIndices = stateToNumberOfChoices.toVector(rowOdd); rowGroupIndices.resize(rowGroupIndices.size() + 1); uint_fast64_t tmp = 0; uint_fast64_t tmp2 = 0; for (uint_fast64_t i = 1; i < rowGroupIndices.size(); ++i) { tmp2 = rowGroupIndices[i]; rowGroupIndices[i] = rowGroupIndices[i - 1] + tmp; std::swap(tmp, tmp2); } rowGroupIndices[0] = 0; // Next, we split the matrix into one for each group. This only works if the group variables are at the very // top. std::vector> groups; splitGroupsRec(this->getCuddDdNode(), groups, ddGroupVariableIndices, 0, ddGroupVariableIndices.size()); // Create the actual storage for the non-zero entries. std::vector> columnsAndValues(this->getNonZeroCount()); // Now compute the indices at which the individual rows start. std::vector rowIndications(rowGroupIndices.back() + 1); std::vector> statesWithGroupEnabled(groups.size()); InternalAdd stateToRowGroupCount = this->getDdManager()->getAddZero(); for (uint_fast64_t i = 0; i < groups.size(); ++i) { auto const& dd = groups[i]; toMatrixRec(dd.getCuddDdNode(), rowIndications, columnsAndValues, rowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, false); statesWithGroupEnabled[i] = dd.notZero().existsAbstract(columnVariableCube).toAdd(); stateToRowGroupCount += statesWithGroupEnabled[i]; statesWithGroupEnabled[i].addToVector(rowOdd, ddRowVariableIndices, rowGroupIndices); } // Since we modified the rowGroupIndices, we need to restore the correct values. std::function fct = [] (uint_fast64_t const& a, double const& b) -> uint_fast64_t { return a - static_cast(b); }; composeVectorRec(stateToRowGroupCount.getCuddDdNode(), 0, ddRowVariableIndices.size(), 0, rowOdd, ddRowVariableIndices, rowGroupIndices, fct); // Now that we computed the number of entries in each row, compute the corresponding offsets in the entry vector. tmp = 0; tmp2 = 0; for (uint_fast64_t i = 1; i < rowIndications.size(); ++i) { tmp2 = rowIndications[i]; rowIndications[i] = rowIndications[i - 1] + tmp; std::swap(tmp, tmp2); } rowIndications[0] = 0; // Now actually fill the entry vector. for (uint_fast64_t i = 0; i < groups.size(); ++i) { auto const& dd = groups[i]; toMatrixRec(dd.getCuddDdNode(), rowIndications, columnsAndValues, rowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, true); statesWithGroupEnabled[i].addToVector(rowOdd, ddRowVariableIndices, rowGroupIndices); } // Since we modified the rowGroupIndices, we need to restore the correct values. composeVectorRec(stateToRowGroupCount.getCuddDdNode(), 0, ddRowVariableIndices.size(), 0, rowOdd, ddRowVariableIndices, rowGroupIndices, fct); // Since the last call to toMatrixRec modified the rowIndications, we need to restore the correct values. for (uint_fast64_t i = rowIndications.size() - 1; i > 0; --i) { rowIndications[i] = rowIndications[i - 1]; } rowIndications[0] = 0; return storm::storage::SparseMatrix(columnOdd.getTotalOffset(), std::move(rowIndications), std::move(columnsAndValues), std::move(rowGroupIndices), true); } template std::pair, std::vector> InternalAdd::toMatrixVector(InternalAdd const& vector, std::vector const& ddGroupVariableIndices, std::vector&& rowGroupIndices, storm::dd::Odd const& rowOdd, std::vector const& ddRowVariableIndices, storm::dd::Odd const& columnOdd, std::vector const& ddColumnVariableIndices, InternalBdd const& columnVariableCube) const { // Transform the row group sizes to the actual row group indices. rowGroupIndices.resize(rowGroupIndices.size() + 1); uint_fast64_t tmp = 0; uint_fast64_t tmp2 = 0; for (uint_fast64_t i = 1; i < rowGroupIndices.size(); ++i) { tmp2 = rowGroupIndices[i]; rowGroupIndices[i] = rowGroupIndices[i - 1] + tmp; std::swap(tmp, tmp2); } rowGroupIndices[0] = 0; // Create the explicit vector we need to fill later. std::vector explicitVector(rowGroupIndices.back()); // Next, we split the matrix into one for each group. This only works if the group variables are at the very top. std::vector, InternalAdd>> groups; splitGroupsRec(this->getCuddDdNode(), vector.getCuddDdNode(), groups, ddGroupVariableIndices, 0, ddGroupVariableIndices.size()); // Create the actual storage for the non-zero entries. std::vector> columnsAndValues(this->getNonZeroCount()); // Now compute the indices at which the individual rows start. std::vector rowIndications(rowGroupIndices.back() + 1); std::vector> statesWithGroupEnabled(groups.size()); storm::dd::InternalAdd stateToRowGroupCount = this->getDdManager()->getAddZero(); for (uint_fast64_t i = 0; i < groups.size(); ++i) { std::pair, storm::dd::InternalAdd> ddPair = groups[i]; toMatrixRec(ddPair.first.getCuddDdNode(), rowIndications, columnsAndValues, rowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, false); toVectorRec(ddPair.second.getCuddDdNode(), explicitVector, rowGroupIndices, rowOdd, 0, ddRowVariableIndices.size(), 0, ddRowVariableIndices); statesWithGroupEnabled[i] = (ddPair.first.notZero().existsAbstract(columnVariableCube) || ddPair.second.notZero()).toAdd(); stateToRowGroupCount += statesWithGroupEnabled[i]; statesWithGroupEnabled[i].addToVector(rowOdd, ddRowVariableIndices, rowGroupIndices); } // Since we modified the rowGroupIndices, we need to restore the correct values. std::function fct = [] (uint_fast64_t const& a, double const& b) -> uint_fast64_t { return a - static_cast(b); }; composeVectorRec(stateToRowGroupCount.getCuddDdNode(), 0, ddRowVariableIndices.size(), 0, rowOdd, ddRowVariableIndices, rowGroupIndices, fct); // Now that we computed the number of entries in each row, compute the corresponding offsets in the entry vector. tmp = 0; tmp2 = 0; for (uint_fast64_t i = 1; i < rowIndications.size(); ++i) { tmp2 = rowIndications[i]; rowIndications[i] = rowIndications[i - 1] + tmp; std::swap(tmp, tmp2); } rowIndications[0] = 0; // Now actually fill the entry vector. for (uint_fast64_t i = 0; i < groups.size(); ++i) { auto const& dd = groups[i].first; toMatrixRec(dd.getCuddDdNode(), rowIndications, columnsAndValues, rowGroupIndices, rowOdd, columnOdd, 0, 0, ddRowVariableIndices.size() + ddColumnVariableIndices.size(), 0, 0, ddRowVariableIndices, ddColumnVariableIndices, true); statesWithGroupEnabled[i].addToVector(rowOdd, ddRowVariableIndices, rowGroupIndices); } // Since we modified the rowGroupIndices, we need to restore the correct values. composeVectorRec(stateToRowGroupCount.getCuddDdNode(), 0, ddRowVariableIndices.size(), 0, rowOdd, ddRowVariableIndices, rowGroupIndices, fct); // Since the last call to toMatrixRec modified the rowIndications, we need to restore the correct values. for (uint_fast64_t i = rowIndications.size() - 1; i > 0; --i) { rowIndications[i] = rowIndications[i - 1]; } rowIndications[0] = 0; return std::make_pair(storm::storage::SparseMatrix(columnOdd.getTotalOffset(), std::move(rowIndications), std::move(columnsAndValues), std::move(rowGroupIndices), true), std::move(explicitVector)); } template void InternalAdd::splitGroupsRec(DdNode* dd, std::vector>& groups, std::vector const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const { // For the empty DD, we do not need to create a group. if (dd == Cudd_ReadZero(this->getDdManager()->getCuddManager().getManager())) { return; } if (currentLevel == maxLevel) { groups.push_back(InternalAdd(ddManager, ADD(ddManager->getCuddManager(), dd))); } else if (ddGroupVariableIndices[currentLevel] < dd->index) { splitGroupsRec(dd, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(dd, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } else { splitGroupsRec(Cudd_E(dd), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(Cudd_T(dd), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } } template void InternalAdd::splitGroupsRec(DdNode* dd1, DdNode* dd2, std::vector, InternalAdd>>& groups, std::vector const& ddGroupVariableIndices, uint_fast64_t currentLevel, uint_fast64_t maxLevel) const { // For the empty DD, we do not need to create a group. if (dd1 == Cudd_ReadZero(ddManager->getCuddManager().getManager()) && dd2 == Cudd_ReadZero(ddManager->getCuddManager().getManager())) { return; } if (currentLevel == maxLevel) { groups.push_back(std::make_pair(InternalAdd(ddManager, ADD(ddManager->getCuddManager(), dd1)), InternalAdd(ddManager, ADD(ddManager->getCuddManager(), dd2)))); } else if (ddGroupVariableIndices[currentLevel] < dd1->index) { if (ddGroupVariableIndices[currentLevel] < dd2->index) { splitGroupsRec(dd1, dd2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(dd1, dd2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } else { splitGroupsRec(dd1, Cudd_T(dd2), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(dd1, Cudd_E(dd2), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } } else if (ddGroupVariableIndices[currentLevel] < dd2->index) { splitGroupsRec(Cudd_T(dd1), dd2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(Cudd_E(dd1), dd2, groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } else { splitGroupsRec(Cudd_T(dd1), Cudd_T(dd2), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); splitGroupsRec(Cudd_E(dd1), Cudd_E(dd2), groups, ddGroupVariableIndices, currentLevel + 1, maxLevel); } } template InternalAdd InternalAdd::fromVector(InternalDdManager const* ddManager, std::vector const& values, storm::dd::Odd const& odd, std::vector const& ddVariableIndices) { uint_fast64_t offset = 0; return InternalAdd(ddManager, ADD(ddManager->getCuddManager(), fromVectorRec(ddManager->getCuddManager().getManager(), offset, 0, ddVariableIndices.size(), values, odd, ddVariableIndices))); } template DdNode* InternalAdd::fromVectorRec(::DdManager* manager, uint_fast64_t& currentOffset, uint_fast64_t currentLevel, uint_fast64_t maxLevel, std::vector const& values, Odd const& odd, std::vector const& ddVariableIndices) { if (currentLevel == maxLevel) { // If we are in a terminal node of the ODD, we need to check whether the then-offset of the ODD is one // (meaning the encoding is a valid one) or zero (meaning the encoding is not valid). Consequently, we // need to copy the next value of the vector iff the then-offset is greater than zero. if (odd.getThenOffset() > 0) { return Cudd_addConst(manager, values[currentOffset++]); } else { return Cudd_ReadZero(manager); } } else { // If the total offset is zero, we can just return the constant zero DD. if (odd.getThenOffset() + odd.getElseOffset() == 0) { return Cudd_ReadZero(manager); } // Determine the new else-successor. DdNode* elseSuccessor = nullptr; if (odd.getElseOffset() > 0) { elseSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, values, odd.getElseSuccessor(), ddVariableIndices); } else { elseSuccessor = Cudd_ReadZero(manager); } Cudd_Ref(elseSuccessor); // Determine the new then-successor. DdNode* thenSuccessor = nullptr; if (odd.getThenOffset() > 0) { thenSuccessor = fromVectorRec(manager, currentOffset, currentLevel + 1, maxLevel, values, odd.getThenSuccessor(), ddVariableIndices); } else { thenSuccessor = Cudd_ReadZero(manager); } Cudd_Ref(thenSuccessor); // Create a node representing ITE(currentVar, thenSuccessor, elseSuccessor); DdNode* result = Cudd_addIthVar(manager, static_cast(ddVariableIndices[currentLevel])); Cudd_Ref(result); DdNode* newResult = Cudd_addIte(manager, result, thenSuccessor, elseSuccessor); Cudd_Ref(newResult); // Dispose of the intermediate results Cudd_RecursiveDeref(manager, result); Cudd_RecursiveDeref(manager, thenSuccessor); Cudd_RecursiveDeref(manager, elseSuccessor); // Before returning, we remove the protection imposed by the previous call to Cudd_Ref. Cudd_Deref(newResult); return newResult; } } template class InternalAdd; } }