diff --git a/src/builder/DftExplorationHeuristic.cpp b/src/builder/DftExplorationHeuristic.cpp index 727c29c37..40d00115f 100644 --- a/src/builder/DftExplorationHeuristic.cpp +++ b/src/builder/DftExplorationHeuristic.cpp @@ -3,85 +3,73 @@ #include "src/utility/macros.h" #include "src/utility/constants.h" #include "src/exceptions/NotImplementedException.h" -#include "src/storage/dft/DFTState.h" - -#include namespace storm { namespace builder { template - DFTExplorationHeuristic::DFTExplorationHeuristic() : skip(true), depth(std::numeric_limits::max()), rate(storm::utility::zero()), exitRate(storm::utility::zero()) { + DFTExplorationHeuristic::DFTExplorationHeuristic(size_t id) : id(id), expand(true), lowerBound(storm::utility::zero()), upperBound(storm::utility::infinity()), depth(0), probability(storm::utility::one()) { // Intentionally left empty } template - bool DFTExplorationHeuristic::isSkip(double approximationThreshold, ApproximationHeuristic heuristic) const { - if (!skip) { - return false; - } - switch (heuristic) { - case ApproximationHeuristic::NONE: - return false; - case ApproximationHeuristic::DEPTH: - return depth > approximationThreshold; - case ApproximationHeuristic::RATERATIO: - // TODO Matthias: implement - STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work."); - } + DFTExplorationHeuristic::DFTExplorationHeuristic(size_t id, DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) : id(id), expand(false), lowerBound(storm::utility::zero()), upperBound(storm::utility::zero()), depth(predecessor.depth + 1) { + STORM_LOG_ASSERT(storm::utility::zero() < exitRate, "Exit rate is 0"); + probability = predecessor.probability * rate/exitRate; } template - void DFTExplorationHeuristic::setNotSkip() { - skip = false; + void DFTExplorationHeuristic::setBounds(ValueType lowerBound, ValueType upperBound) { + this->lowerBound = lowerBound; + this->upperBound = upperBound; } - - template - size_t DFTExplorationHeuristic::getDepth() const { - return depth; + template<> + bool DFTExplorationHeuristicProbability::updateHeuristicValues(DFTExplorationHeuristic const& predecessor, double rate, double exitRate) { + STORM_LOG_ASSERT(exitRate > 0, "Exit rate is 0"); + probability += predecessor.getProbability() * rate/exitRate; + return true; } - template - void DFTExplorationHeuristic::setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate) { - this->depth = depth; - this->rate = rate; - this->exitRate = exitRate; + template<> + bool DFTExplorationHeuristicProbability::updateHeuristicValues(DFTExplorationHeuristic const& predecessor, storm::RationalFunction rate, storm::RationalFunction exitRate) { + STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work for rational functions."); + return false; } - template - double DFTExplorationHeuristic::getPriority() const { - STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double."); + template<> + double DFTExplorationHeuristicProbability::getPriority() const { + return probability; } template<> - double DFTExplorationHeuristic::getPriority() const { - // TODO Matthias: change according to heuristic - //return rate/exitRate; - return depth; + double DFTExplorationHeuristicProbability::getPriority() const { + STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic rate ration does not work for rational functions."); } template<> - double DFTExplorationHeuristic::getPriority() const { - STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double."); - /*std::cout << (rate / exitRate) << " < " << threshold << ": " << (number < threshold) << std::endl; - std::map mapping; - storm::RationalFunction eval(number.evaluate(mapping)); - std::cout << "Evaluated: " << eval << std::endl; - return eval < threshold;*/ + double DFTExplorationHeuristicBoundDifference::getPriority() const { + return upperBound / lowerBound; } - template - bool compareDepth(std::shared_ptr> stateA, std::shared_ptr> stateB) { - return stateA->getPriority() > stateB->getPriority(); + template<> + double DFTExplorationHeuristicBoundDifference::getPriority() const { + STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Heuristic bound difference does not work for rational functions."); } + // Instantiate templates. template class DFTExplorationHeuristic; - template bool compareDepth(std::shared_ptr>, std::shared_ptr>); + template class DFTExplorationHeuristicNone; + template class DFTExplorationHeuristicDepth; + template class DFTExplorationHeuristicProbability; + template class DFTExplorationHeuristicBoundDifference; #ifdef STORM_HAVE_CARL template class DFTExplorationHeuristic; - template bool compareDepth(std::shared_ptr>, std::shared_ptr>); + template class DFTExplorationHeuristicNone; + template class DFTExplorationHeuristicDepth; + template class DFTExplorationHeuristicProbability; + template class DFTExplorationHeuristicBoundDifference; #endif } } diff --git a/src/builder/DftExplorationHeuristic.h b/src/builder/DftExplorationHeuristic.h index a9958443a..8feb0fefa 100644 --- a/src/builder/DftExplorationHeuristic.h +++ b/src/builder/DftExplorationHeuristic.h @@ -2,50 +2,189 @@ #define STORM_BUILDER_DFTEXPLORATIONHEURISTIC_H_ #include -#include namespace storm { - // Forward declaration - namespace storage { - template - class DFTState; - } - namespace builder { /*! * Enum representing the heuristic used for deciding which states to expand. */ - enum class ApproximationHeuristic { NONE, DEPTH, RATERATIO }; + enum class ApproximationHeuristic { NONE, DEPTH, PROBABILITY }; + + /*! + * General super class for appoximation heuristics. + */ template class DFTExplorationHeuristic { public: - DFTExplorationHeuristic(); + DFTExplorationHeuristic(size_t id); - void setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate); + DFTExplorationHeuristic(size_t id, DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate); - bool isSkip(double approximationThreshold, ApproximationHeuristic heuristic) const; + void setBounds(ValueType lowerBound, ValueType upperBound); - void setNotSkip(); + virtual bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) = 0; - size_t getDepth() const; + virtual double getPriority() const = 0; - double getPriority() const; - - private: + virtual bool isSkip(double approximationThreshold) const = 0; + + void markExpand() { + expand = true; + } + + size_t getId() const { + return id; + } + + bool isExpand() { + return expand; + } + + size_t getDepth() const { + return depth; + } + + ValueType getProbability() const { + return probability; + } - bool skip; + ValueType getLowerBound() const { + return lowerBound; + } + + ValueType getUpperBound() const { + return upperBound; + } + + protected: + size_t id; + bool expand; + ValueType lowerBound; + ValueType upperBound; size_t depth; - ValueType rate; - ValueType exitRate; + ValueType probability; + }; + + template + class DFTExplorationHeuristicNone : public DFTExplorationHeuristic { + public: + DFTExplorationHeuristicNone(size_t id) : DFTExplorationHeuristic(id) { + // Intentionally left empty + } + + DFTExplorationHeuristicNone(size_t id, DFTExplorationHeuristicNone const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic(id, predecessor, rate, exitRate) { + // Intentionally left empty + } + + bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) override { + return false; + } + + double getPriority() const override { + return 0; + } + + bool isSkip(double approximationThreshold) const override { + return false; + } + + bool operator<(DFTExplorationHeuristicNone const& other) const { + return this->id > other.id; + } + }; + + template + class DFTExplorationHeuristicDepth : public DFTExplorationHeuristic { + public: + DFTExplorationHeuristicDepth(size_t id) : DFTExplorationHeuristic(id) { + // Intentionally left empty + } + + DFTExplorationHeuristicDepth(size_t id, DFTExplorationHeuristicDepth const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic(id, predecessor, rate, exitRate) { + // Intentionally left empty + } + + bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) override { + if (predecessor.getDepth() < this->depth) { + this->depth = predecessor.getDepth(); + return true; + } + return false; + } + + + double getPriority() const override { + return this->depth; + } + + bool isSkip(double approximationThreshold) const override { + return !this->expand && this->depth > approximationThreshold; + } + + bool operator<(DFTExplorationHeuristicDepth const& other) const { + return this->depth > other.depth; + } + }; + + template + class DFTExplorationHeuristicProbability : public DFTExplorationHeuristic { + public: + DFTExplorationHeuristicProbability(size_t id) : DFTExplorationHeuristic(id) { + // Intentionally left empty + } + + DFTExplorationHeuristicProbability(size_t id, DFTExplorationHeuristicProbability const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic(id, predecessor, rate, exitRate) { + // Intentionally left empty + } + + bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) override; + + double getPriority() const override; + + bool isSkip(double approximationThreshold) const override { + return !this->expand && this->getPriority() < approximationThreshold; + } + bool operator<(DFTExplorationHeuristicProbability const& other) const { + return this->getPriority() < other.getPriority(); + } }; template - bool compareDepth(std::shared_ptr> stateA, std::shared_ptr> stateB); + class DFTExplorationHeuristicBoundDifference : public DFTExplorationHeuristic { + public: + DFTExplorationHeuristicBoundDifference(size_t id) : DFTExplorationHeuristic(id) { + // Intentionally left empty + } + + DFTExplorationHeuristicBoundDifference(size_t id, DFTExplorationHeuristicBoundDifference const& predecessor, ValueType rate, ValueType exitRate) : DFTExplorationHeuristic(id, predecessor, rate, exitRate) { + // Intentionally left empty + } + + bool updateHeuristicValues(DFTExplorationHeuristic const& predecessor, ValueType rate, ValueType exitRate) override { + return false; + } + + double getPriority() const override; + + bool isSkip(double approximationThreshold) const override { + return !this->expand && this->getPriority() < approximationThreshold; + } + + bool operator<(DFTExplorationHeuristicBoundDifference const& other) const { + return this->getPriority() < other.getPriority(); + } + + private: + ValueType lowerBound; + ValueType upperBound; + }; + + } } diff --git a/src/builder/DftSmtBuilder.cpp b/src/builder/DftSmtBuilder.cpp index 1d60b2c74..c1226531c 100644 --- a/src/builder/DftSmtBuilder.cpp +++ b/src/builder/DftSmtBuilder.cpp @@ -1,4 +1,4 @@ -#include "src/builder/DFTSMTBuilder.h" +#include "src/builder/DftSmtBuilder.h" #include "src/exceptions/NotImplementedException.h" namespace storm { diff --git a/src/builder/ExplicitDFTModelBuilder.cpp b/src/builder/ExplicitDFTModelBuilder.cpp index 9481e3392..4a2f56b5f 100644 --- a/src/builder/ExplicitDFTModelBuilder.cpp +++ b/src/builder/ExplicitDFTModelBuilder.cpp @@ -58,6 +58,7 @@ namespace storm { STORM_LOG_ASSERT(mStates.getValue(pseudoStatePair.second) >= OFFSET_PSEUDO_STATE, "State is no pseudo state."); STORM_LOG_TRACE("Create pseudo state from bit vector " << pseudoStatePair.second); DFTStatePointer pseudoState = std::make_shared>(pseudoStatePair.second, mDft, *mStateGenerationInfo, newIndex); + pseudoState->construct(); STORM_LOG_ASSERT(pseudoStatePair.second == pseudoState->status(), "Pseudo states do not coincide."); STORM_LOG_TRACE("Explore pseudo state " << mDft.getStateString(pseudoState) << " with id " << pseudoState->getId()); auto exploreResult = exploreStates(pseudoState, rowOffset, transitionMatrixBuilder, tmpMarkovianStates, modelComponents.exitRates); diff --git a/src/builder/ExplicitDFTModelBuilderApprox.cpp b/src/builder/ExplicitDFTModelBuilderApprox.cpp index cb04f3135..93a7b282e 100644 --- a/src/builder/ExplicitDFTModelBuilderApprox.cpp +++ b/src/builder/ExplicitDFTModelBuilderApprox.cpp @@ -3,6 +3,7 @@ #include #include #include +#include "src/utility/bitoperations.h" #include #include "src/settings/modules/DFTSettings.h" #include "src/settings/SettingsManager.h" @@ -31,18 +32,75 @@ namespace storm { generator(dft, *stateGenerationInfo, enableDC, mergeFailedStates), matrixBuilder(!generator.isDeterministicModel()), stateStorage(((dft.stateVectorSize() / 64) + 1) * 64), - explorationQueue([this](StateType idA, StateType idB) { - return isPriorityGreater(idA, idB); - }) + // TODO Matthias: make choosable + //explorationQueue(dft.nrElements()+1, 0, 1) + explorationQueue(200, 0, 0.9) { // Intentionally left empty. + // TODO Matthias: remove again + heuristic = storm::builder::ApproximationHeuristic::PROBABILITY; + + // Compute independent subtrees + if (dft.topLevelType() == storm::storage::DFTElementType::OR) { + // We only support this for approximation with top level element OR + for (auto const& child : dft.getGate(dft.getTopLevelIndex())->children()) { + // Consider all children of the top level gate + std::vector isubdft; + if (child->nrParents() > 1 || child->hasOutgoingDependencies()) { + STORM_LOG_TRACE("child " << child->name() << "does not allow modularisation."); + isubdft.clear(); + } else if (dft.isGate(child->id())) { + isubdft = dft.getGate(child->id())->independentSubDft(false); + } else { + STORM_LOG_ASSERT(dft.isBasicElement(child->id()), "Child is no BE."); + if(dft.getBasicElement(child->id())->hasIngoingDependencies()) { + STORM_LOG_TRACE("child " << child->name() << "does not allow modularisation."); + isubdft.clear(); + } else { + isubdft = {child->id()}; + } + } + if(isubdft.empty()) { + subtreeBEs.clear(); + break; + } else { + // Add new independent subtree + std::vector subtree; + for (size_t id : isubdft) { + if (dft.isBasicElement(id)) { + subtree.push_back(id); + } + } + subtreeBEs.push_back(subtree); + } + } + } + if (subtreeBEs.empty()) { + // Consider complete tree + std::vector subtree; + for (size_t id = 0; id < dft.nrElements(); ++id) { + if (dft.isBasicElement(id)) { + subtree.push_back(id); + } + } + subtreeBEs.push_back(subtree); + } + + for (auto tree : subtreeBEs) { + std::stringstream stream; + stream << "Subtree: "; + for (size_t id : tree) { + stream << id << ", "; + } + STORM_LOG_TRACE(stream.str()); + } } template - void ExplicitDFTModelBuilderApprox::buildModel(LabelOptions const& labelOpts, bool firstTime, double approximationThreshold) { + void ExplicitDFTModelBuilderApprox::buildModel(LabelOptions const& labelOpts, size_t iteration, double approximationThreshold) { STORM_LOG_TRACE("Generating DFT state space"); - if (firstTime) { + if (iteration < 1) { // Initialize modelComponents.markovianStates = storm::storage::BitVector(INITIAL_BITVECTOR_SIZE); @@ -75,11 +133,28 @@ namespace storm { STORM_LOG_ASSERT(stateStorage.initialStateIndices.size() == 1, "Only one initial state assumed."); initialStateIndex = stateStorage.initialStateIndices[0]; STORM_LOG_TRACE("Initial state: " << initialStateIndex); - + // Initialize heuristic values for inital state + STORM_LOG_ASSERT(!statesNotExplored.at(initialStateIndex).second, "Heuristic for initial state is already initialized"); + ExplorationHeuristicPointer heuristic = std::make_shared(initialStateIndex); + heuristic->markExpand(); + statesNotExplored[initialStateIndex].second = heuristic; + explorationQueue.push(heuristic); } else { initializeNextIteration(); } + switch (heuristic) { + case storm::builder::ApproximationHeuristic::NONE: + // Do not change anything + approximationThreshold = dft.nrElements()+10; + break; + case storm::builder::ApproximationHeuristic::DEPTH: + approximationThreshold = iteration; + break; + case storm::builder::ApproximationHeuristic::PROBABILITY: + approximationThreshold = 10 * std::pow(2, iteration); + break; + } exploreStateSpace(approximationThreshold); size_t stateSize = stateStorage.getNumberOfStates() + (mergeFailedStates ? 1 : 0); @@ -94,8 +169,7 @@ namespace storm { STORM_LOG_TRACE("State remapping: " << matrixBuilder.stateRemapping); STORM_LOG_TRACE("Markovian states: " << modelComponents.markovianStates); - STORM_LOG_DEBUG("Generated " << stateSize << " states"); - STORM_LOG_DEBUG("Skipped " << skippedStates.size() << " states"); + STORM_LOG_DEBUG("Model has " << stateSize << " states"); STORM_LOG_DEBUG("Model is " << (generator.isDeterministicModel() ? "deterministic" : "non-deterministic")); // Build transition matrix @@ -117,8 +191,8 @@ namespace storm { // Push skipped states to explore queue // TODO Matthias: remove for (auto const& skippedState : skippedStates) { - statesNotExplored[skippedState.second->getId()] = skippedState.second; - explorationQueue.push(skippedState.second->getId()); + statesNotExplored[skippedState.second.first->getId()] = skippedState.second; + explorationQueue.push(skippedState.second.second); } // Initialize matrix builder again @@ -134,7 +208,8 @@ namespace storm { auto iterSkipped = skippedStates.begin(); size_t skippedBefore = 0; for (size_t i = 0; i < indexRemapping.size(); ++i) { - while (iterSkipped->first <= i) { + while (iterSkipped != skippedStates.end() && iterSkipped->first <= i) { + STORM_LOG_ASSERT(iterSkipped->first < indexRemapping.size(), "Skipped is too high."); ++skippedBefore; ++iterSkipped; } @@ -146,7 +221,7 @@ namespace storm { matrixBuilder.mappingOffset = nrStates; STORM_LOG_TRACE("# expanded states: " << nrExpandedStates); StateType skippedIndex = nrExpandedStates; - std::map skippedStatesNew; + std::map> skippedStatesNew; for (size_t id = 0; id < matrixBuilder.stateRemapping.size(); ++id) { StateType index = matrixBuilder.stateRemapping[id]; auto itFind = skippedStates.find(index); @@ -193,7 +268,7 @@ namespace storm { auto itFind = skippedStates.find(itEntry->getColumn()); if (itFind != skippedStates.end()) { // Set id for skipped states as we remap it later - matrixBuilder.addTransition(matrixBuilder.mappingOffset + itFind->second->getId(), itEntry->getValue()); + matrixBuilder.addTransition(matrixBuilder.mappingOffset + itFind->second.first->getId(), itEntry->getValue()); } else { // Set newly remapped index for expanded states matrixBuilder.addTransition(indexRemapping[itEntry->getColumn()], itEntry->getValue()); @@ -212,13 +287,17 @@ namespace storm { template void ExplicitDFTModelBuilderApprox::exploreStateSpace(double approximationThreshold) { + size_t nrExpandedStates = 0; + size_t nrSkippedStates = 0; // TODO Matthias: do not empty queue every time but break before while (!explorationQueue.empty()) { // Get the first state in the queue - StateType currentId = explorationQueue.popTop(); + ExplorationHeuristicPointer currentExplorationHeuristic = explorationQueue.popTop(); + StateType currentId = currentExplorationHeuristic->getId(); auto itFind = statesNotExplored.find(currentId); STORM_LOG_ASSERT(itFind != statesNotExplored.end(), "Id " << currentId << " not found"); - DFTStatePointer currentState = itFind->second; + DFTStatePointer currentState = itFind->second.first; + STORM_LOG_ASSERT(currentExplorationHeuristic == itFind->second.second, "Exploration heuristics do not match"); STORM_LOG_ASSERT(currentState->getId() == currentId, "Ids do not match"); // Remove it from the list of not explored states statesNotExplored.erase(itFind); @@ -240,18 +319,21 @@ namespace storm { // Try to explore the next state generator.load(currentState); - if (currentState->isSkip(approximationThreshold, heuristic)) { + if (nrExpandedStates > approximationThreshold && !currentExplorationHeuristic->isExpand()) { + //if (currentExplorationHeuristic->isSkip(approximationThreshold)) { // Skip the current state + ++nrSkippedStates; STORM_LOG_TRACE("Skip expansion of state: " << dft.getStateString(currentState)); setMarkovian(true); // Add transition to target state with temporary value 0 // TODO Matthias: what to do when there is no unique target state? matrixBuilder.addTransition(failedStateId, storm::utility::zero()); // Remember skipped state - skippedStates[matrixBuilder.getCurrentRowGroup() - 1] = currentState; + skippedStates[matrixBuilder.getCurrentRowGroup() - 1] = std::make_pair(currentState, currentExplorationHeuristic); matrixBuilder.finishRow(); } else { // Explore the current state + ++nrExpandedStates; storm::generator::StateBehavior behavior = generator.expand(std::bind(&ExplicitDFTModelBuilderApprox::getOrAddStateIndex, this, std::placeholders::_1)); STORM_LOG_ASSERT(!behavior.empty(), "Behavior is empty."); setMarkovian(behavior.begin()->isMarkovian()); @@ -260,18 +342,40 @@ namespace storm { for (auto const& choice : behavior) { // Add the probabilistic behavior to the matrix. for (auto const& stateProbabilityPair : choice) { + STORM_LOG_ASSERT(!storm::utility::isZero(stateProbabilityPair.second), "Probability zero."); // Set transition to state id + offset. This helps in only remapping all previously skipped states. matrixBuilder.addTransition(matrixBuilder.mappingOffset + stateProbabilityPair.first, stateProbabilityPair.second); - // TODO Matthias: set heuristic values here + // Set heuristic values for reached states + auto iter = statesNotExplored.find(stateProbabilityPair.first); + if (iter != statesNotExplored.end()) { + // Update heuristic values + DFTStatePointer state = iter->second.first; + if (!iter->second.second) { + // Initialize heuristic values + ExplorationHeuristicPointer heuristic = std::make_shared(stateProbabilityPair.first, *currentExplorationHeuristic, stateProbabilityPair.second, choice.getTotalMass()); + iter->second.second = heuristic; + if (state->hasFailed(dft.getTopLevelIndex()) || state->isFailsafe(dft.getTopLevelIndex()) || state->nrFailableDependencies() > 0 || (state->nrFailableDependencies() == 0 && state->nrFailableBEs() == 0)) { + // Do not skip absorbing state or if reached by dependencies + iter->second.second->markExpand(); + } + explorationQueue.push(heuristic); + } else if (!iter->second.second->isExpand()) { + double oldPriority = iter->second.second->getPriority(); + if (iter->second.second->updateHeuristicValues(*currentExplorationHeuristic, stateProbabilityPair.second, choice.getTotalMass())) { + // Update priority queue + explorationQueue.update(iter->second.second, oldPriority); + } + } + } } matrixBuilder.finishRow(); } } - - // Update priority queue - // TODO Matthias: only when necessary - explorationQueue.fix(); } // end exploration + + STORM_LOG_INFO("Expanded " << nrExpandedStates << " states"); + STORM_LOG_INFO("Skipped " << nrSkippedStates << " states"); + STORM_LOG_ASSERT(nrSkippedStates == skippedStates.size(), "Nr skipped states is wrong"); } template @@ -328,11 +432,7 @@ namespace storm { template std::shared_ptr> ExplicitDFTModelBuilderApprox::getModelApproximation(bool lowerBound) { // TODO Matthias: handle case with no skipped states - if (lowerBound) { - changeMatrixLowerBound(modelComponents.transitionMatrix); - } else { - changeMatrixUpperBound(modelComponents.transitionMatrix); - } + changeMatrixBound(modelComponents.transitionMatrix, lowerBound); return createModel(true); } @@ -388,42 +488,148 @@ namespace storm { } template - void ExplicitDFTModelBuilderApprox::changeMatrixLowerBound(storm::storage::SparseMatrix & matrix) const { + void ExplicitDFTModelBuilderApprox::changeMatrixBound(storm::storage::SparseMatrix & matrix, bool lowerBound) const { // Set lower bound for skipped states for (auto it = skippedStates.begin(); it != skippedStates.end(); ++it) { auto matrixEntry = matrix.getRow(it->first, 0).begin(); STORM_LOG_ASSERT(matrixEntry->getColumn() == failedStateId, "Transition has wrong target state."); - // Get the lower bound by considering the failure of all possible BEs - ValueType newRate = storm::utility::zero(); - for (size_t index = 0; index < it->second->nrFailableBEs(); ++index) { - newRate += it->second->getFailableBERate(index); + STORM_LOG_ASSERT(!it->second.first->isPseudoState(), "State is still pseudo state."); + + ExplorationHeuristicPointer heuristic = it->second.second; + if (storm::utility::isZero(heuristic->getUpperBound())) { + // Initialize bounds + ValueType lowerBound = getLowerBound(it->second.first); + ValueType upperBound = getUpperBound(it->second.first); + heuristic->setBounds(lowerBound, upperBound); } - for (size_t index = 0; index < it->second->nrNotFailableBEs(); ++index) { - newRate += it->second->getNotFailableBERate(index); + + // Change bound + if (lowerBound) { + matrixEntry->setValue(it->second.second->getLowerBound()); + } else { + matrixEntry->setValue(it->second.second->getUpperBound()); } - matrixEntry->setValue(newRate); } } template - void ExplicitDFTModelBuilderApprox::changeMatrixUpperBound(storm::storage::SparseMatrix & matrix) const { - // Set uppper bound for skipped states - for (auto it = skippedStates.begin(); it != skippedStates.end(); ++it) { - auto matrixEntry = matrix.getRow(it->first, 0).begin(); - STORM_LOG_ASSERT(matrixEntry->getColumn() == failedStateId, "Transition has wrong target state."); - // Get the upper bound by considering the failure of all BEs - // The used formula for the rate is 1/( 1/a + 1/b + ...) - // TODO Matthias: improve by using closed formula for AND of all BEs - ValueType newRate = storm::utility::zero(); - for (size_t index = 0; index < it->second->nrFailableBEs(); ++index) { - newRate += storm::utility::one() / it->second->getFailableBERate(index); + ValueType ExplicitDFTModelBuilderApprox::getLowerBound(DFTStatePointer const& state) const { + // Get the lower bound by considering the failure of all possible BEs + ValueType lowerBound = storm::utility::zero(); + for (size_t index = 0; index < state->nrFailableBEs(); ++index) { + lowerBound += state->getFailableBERate(index); + } + return lowerBound; + } + + template + ValueType ExplicitDFTModelBuilderApprox::getUpperBound(DFTStatePointer const& state) const { + // Get the upper bound by considering the failure of all BEs + ValueType upperBound = storm::utility::one(); + ValueType rateSum = storm::utility::zero(); + + // Compute for each independent subtree + for (std::vector const& subtree : subtreeBEs) { + // Get all possible rates + std::vector rates; + storm::storage::BitVector coldBEs(subtree.size(), false); + for (size_t i = 0; i < subtree.size(); ++i) { + size_t id = subtree[i]; + if (state->isOperational(id)) { + // Get BE rate + ValueType rate = state->getBERate(id); + if (storm::utility::isZero(rate)) { + // Get active failure rate for cold BE + rate = dft.getBasicElement(id)->activeFailureRate(); + // Mark BE as cold + coldBEs.set(i, true); + } + rates.push_back(rate); + rateSum += rate; + } + } + + STORM_LOG_ASSERT(rates.size() > 0, "No rates failable"); + + // Sort rates + std::sort(rates.begin(), rates.end()); + std::vector rateCount(rates.size(), 0); + size_t lastIndex = 0; + for (size_t i = 0; i < rates.size(); ++i) { + if (rates[lastIndex] != rates[i]) { + lastIndex = i; + } + ++rateCount[lastIndex]; } - for (size_t index = 0; index < it->second->nrNotFailableBEs(); ++index) { - newRate += storm::utility::one() / it->second->getNotFailableBERate(index); + + for (size_t i = 0; i < rates.size(); ++i) { + // Cold BEs cannot fail in the first step + if (!coldBEs.get(i) && rateCount[i] > 0) { + // Compute AND MTTF of subtree without current rate and scale with current rate + upperBound += rates.back() * rateCount[i] * computeMTTFAnd(rates, rates.size() - 1); + // Swap here to avoid swapping back + std::iter_swap(rates.begin() + i, rates.end() - 1); + } } - newRate = storm::utility::one() / newRate; - matrixEntry->setValue(newRate); } + + STORM_LOG_TRACE("Upper bound is " << (rateSum / upperBound) << " for state " << state->getId()); + STORM_LOG_ASSERT(!storm::utility::isZero(upperBound), "Upper bound is 0"); + STORM_LOG_ASSERT(!storm::utility::isZero(rateSum), "State is absorbing"); + return rateSum / upperBound; + } + + template + ValueType ExplicitDFTModelBuilderApprox::computeMTTFAnd(std::vector const& rates, size_t size) const { + ValueType result = storm::utility::zero(); + if (size == 0) { + return result; + } + + // TODO Matthias: works only for <64 BEs! + // WARNING: this code produces wrong results for more than 32 BEs + /*for (size_t i = 1; i < 4 && i <= rates.size(); ++i) { + size_t permutation = smallestIntWithNBitsSet(static_cast(i)); + ValueType sum = storm::utility::zero(); + do { + ValueType permResult = storm::utility::zero(); + for(size_t j = 0; j < rates.size(); ++j) { + if(permutation & static_cast(1 << static_cast(j))) { + // WARNING: if the first bit is set, it also recognizes the 32nd bit as set + // TODO: fix + permResult += rates[j]; + } + } + permutation = nextBitPermutation(permutation); + STORM_LOG_ASSERT(!storm::utility::isZero(permResult), "PermResult is 0"); + sum += storm::utility::one() / permResult; + } while(permutation < (static_cast(1) << rates.size()) && permutation != 0); + if (i % 2 == 0) { + result -= sum; + } else { + result += sum; + } + }*/ + + // Compute result with permutations of size <= 3 + ValueType one = storm::utility::one(); + for (size_t i1 = 0; i1 < size; ++i1) { + // + 1/a + ValueType sum = rates[i1]; + result += one / sum; + for (size_t i2 = 0; i2 < i1; ++i2) { + // - 1/(a+b) + ValueType sum2 = sum + rates[i2]; + result -= one / sum2; + for (size_t i3 = 0; i3 < i2; ++i3) { + // + 1/(a+b+c) + result += one / (sum2 + rates[i3]); + } + } + } + + STORM_LOG_ASSERT(!storm::utility::isZero(result), "UpperBound is 0"); + return result; } template @@ -446,14 +652,14 @@ namespace storm { // Check if state is pseudo state // If state is explored already the possible pseudo state was already constructed auto iter = statesNotExplored.find(stateId); - if (iter != statesNotExplored.end() && iter->second->isPseudoState()) { + if (iter != statesNotExplored.end() && iter->second.first->isPseudoState()) { // Create pseudo state now - STORM_LOG_ASSERT(iter->second->getId() == stateId, "Ids do not match."); - STORM_LOG_ASSERT(iter->second->status() == state->status(), "Pseudo states do not coincide."); + STORM_LOG_ASSERT(iter->second.first->getId() == stateId, "Ids do not match."); + STORM_LOG_ASSERT(iter->second.first->status() == state->status(), "Pseudo states do not coincide."); state->setId(stateId); // Update mapping to map to concrete state now - statesNotExplored[stateId] = state; - // TODO Matthias: copy explorationHeuristic + // TODO Matthias: just change pointer? + statesNotExplored[stateId] = std::make_pair(state, iter->second.second); // We do not push the new state on the exploration queue as the pseudo state was already pushed STORM_LOG_TRACE("Created pseudo state " << dft.getStateString(state)); } @@ -466,8 +672,8 @@ namespace storm { stateId = stateStorage.stateToId.findOrAdd(state->status(), state->getId()); STORM_LOG_ASSERT(stateId == state->getId(), "Ids do not match."); // Insert state as not yet explored - statesNotExplored[stateId] = state; - explorationQueue.push(stateId); + ExplorationHeuristicPointer nullHeuristic; + statesNotExplored[stateId] = std::make_pair(state, nullHeuristic); // Reserve one slot for the new state in the remapping matrixBuilder.stateRemapping.push_back(0); STORM_LOG_TRACE("New " << (state->isPseudoState() ? "pseudo" : "concrete") << " state: " << dft.getStateString(state)); @@ -485,9 +691,11 @@ namespace storm { } template - bool ExplicitDFTModelBuilderApprox::isPriorityGreater(StateType idA, StateType idB) const { - // TODO Matthias: compare directly and according to heuristic - return storm::builder::compareDepth(statesNotExplored.at(idA), statesNotExplored.at(idB)); + void ExplicitDFTModelBuilderApprox::printNotExplored() const { + std::cout << "states not explored:" << std::endl; + for (auto it : statesNotExplored) { + std::cout << it.first << " -> " << dft.getStateString(it.second.first) << std::endl; + } } diff --git a/src/builder/ExplicitDFTModelBuilderApprox.h b/src/builder/ExplicitDFTModelBuilderApprox.h index 5e841fddc..933cd3277 100644 --- a/src/builder/ExplicitDFTModelBuilderApprox.h +++ b/src/builder/ExplicitDFTModelBuilderApprox.h @@ -10,7 +10,7 @@ #include "src/storage/sparse/StateStorage.h" #include "src/storage/dft/DFT.h" #include "src/storage/dft/SymmetricUnits.h" -#include "src/storage/DynamicPriorityQueue.h" +#include "src/storage/BucketPriorityQueue.h" #include #include #include @@ -26,11 +26,10 @@ namespace storm { template class ExplicitDFTModelBuilderApprox { - using DFTElementPointer = std::shared_ptr>; - using DFTElementCPointer = std::shared_ptr const>; - using DFTGatePointer = std::shared_ptr>; using DFTStatePointer = std::shared_ptr>; - using DFTRestrictionPointer = std::shared_ptr>; + // TODO Matthias: make choosable + using ExplorationHeuristic = DFTExplorationHeuristicProbability; + using ExplorationHeuristicPointer = std::shared_ptr; // A structure holding the individual components of a model. @@ -159,10 +158,10 @@ namespace storm { * Build model from DFT. * * @param labelOpts Options for labeling. - * @param firstTime Flag indicating if the model is built for the first time or rebuilt. + * @param iteration Current number of iteration. * @param approximationThreshold Threshold determining when to skip exploring states. */ - void buildModel(LabelOptions const& labelOpts, bool firstTime, double approximationThreshold = 0.0); + void buildModel(LabelOptions const& labelOpts, size_t iteration, double approximationThreshold = 0.0); /*! * Get the built model. @@ -218,18 +217,36 @@ namespace storm { void setMarkovian(bool markovian); /** - * Change matrix to reflect the lower approximation bound. + * Change matrix to reflect the lower or upper approximation bound. * - * @param matrix Matrix to change. The change are reflected here. + * @param matrix Matrix to change. The change are reflected here. + * @param lowerBound Flag indicating if the lower bound should be used. Otherwise the upper bound is used. */ - void changeMatrixLowerBound(storm::storage::SparseMatrix & matrix) const; + void changeMatrixBound(storm::storage::SparseMatrix & matrix, bool lowerBound) const; /*! - * Change matrix to reflect the upper approximation bound. + * Get lower bound approximation for state. * - * @param matrix Matrix to change. The change are reflected here. + * @return Lower bound approximation. */ - void changeMatrixUpperBound(storm::storage::SparseMatrix & matrix) const; + ValueType getLowerBound(DFTStatePointer const& state) const; + + /*! + * Get upper bound approximation for state. + * + * @return Upper bound approximation. + */ + ValueType getUpperBound(DFTStatePointer const& state) const; + + /*! + * Compute the MTTF of an AND gate via a closed formula. + * The used formula is 1/( 1/a + 1/b + 1/c + ... - 1/(a+b) - 1/(a+c) - ... + 1/(a+b+c) + ... - ...) + * + * @param rates List of rates of children of AND. + * @param size Only indices < size are considered in the vector. + * @return MTTF. + */ + ValueType computeMTTFAnd(std::vector const& rates, size_t size) const; /*! * Compares the priority of two states. @@ -241,6 +258,8 @@ namespace storm { */ bool isPriorityGreater(StateType idA, StateType idB) const; + void printNotExplored() const; + /*! * Create the model model from the model components. * @@ -294,16 +313,20 @@ namespace storm { storm::storage::sparse::StateStorage stateStorage; // A priority queue of states that still need to be explored. - storm::storage::DynamicPriorityQueue, std::function> explorationQueue; + storm::storage::BucketPriorityQueue explorationQueue; + //storm::storage::DynamicPriorityQueue, std::function> explorationQueue; - // A mapping of not yet explored states from the id to the state object. - std::map statesNotExplored; + // A mapping of not yet explored states from the id to the tuple (state object, heuristic values). + std::map> statesNotExplored; // Holds all skipped states which were not yet expanded. More concretely it is a mapping from matrix indices // to the corresponding skipped states. // Notice that we need an ordered map here to easily iterate in increasing order over state ids. // TODO remove again - std::map skippedStates; + std::map> skippedStates; + + // List of independent subtrees and the BEs contained in them. + std::vector> subtreeBEs; }; } diff --git a/src/generator/DftNextStateGenerator.cpp b/src/generator/DftNextStateGenerator.cpp index e61e8e331..69fca22b4 100644 --- a/src/generator/DftNextStateGenerator.cpp +++ b/src/generator/DftNextStateGenerator.cpp @@ -20,7 +20,6 @@ namespace storm { template std::vector DftNextStateGenerator::getInitialStates(StateToIdCallback const& stateToIdCallback) { DFTStatePointer initialState = std::make_shared>(mDft, mStateGenerationInfo, 0); - initialState->setHeuristicValues(0, storm::utility::zero(), storm::utility::zero()); // Register initial state StateType id = stateToIdCallback(initialState); @@ -158,10 +157,8 @@ namespace storm { ValueType remainingProbability = storm::utility::one() - probability; choice.addProbability(unsuccessfulStateId, remainingProbability); STORM_LOG_TRACE("Added transition to " << unsuccessfulStateId << " with remaining probability " << remainingProbability); - unsuccessfulState->setHeuristicValues(state, remainingProbability, storm::utility::one()); } result.addChoice(std::move(choice)); - newState->setHeuristicValues(state, probability, storm::utility::one()); } else { // Failure is due to "normal" BE failure // Set failure rate according to activation @@ -174,7 +171,6 @@ namespace storm { STORM_LOG_ASSERT(!storm::utility::isZero(rate), "Rate is 0."); choice.addProbability(newStateId, rate); STORM_LOG_TRACE("Added transition to " << newStateId << " with " << (isActive ? "active" : "passive") << " failure rate " << rate); - newState->setHeuristicValues(state, rate, choice.getTotalMass()); } ++currentFailable; diff --git a/src/modelchecker/dft/DFTModelChecker.cpp b/src/modelchecker/dft/DFTModelChecker.cpp index 717ace4c3..5eee17e3e 100644 --- a/src/modelchecker/dft/DFTModelChecker.cpp +++ b/src/modelchecker/dft/DFTModelChecker.cpp @@ -17,13 +17,13 @@ namespace storm { template void DFTModelChecker::check(storm::storage::DFT const& origDft, std::shared_ptr const& formula, bool symred, bool allowModularisation, bool enableDC, double approximationError) { // Initialize - this->buildingTime = std::chrono::duration::zero(); this->explorationTime = std::chrono::duration::zero(); + this->buildingTime = std::chrono::duration::zero(); this->bisimulationTime = std::chrono::duration::zero(); this->modelCheckingTime = std::chrono::duration::zero(); this->totalTime = std::chrono::duration::zero(); this->approximationError = approximationError; - std::chrono::high_resolution_clock::time_point totalStart = std::chrono::high_resolution_clock::now(); + totalStart = std::chrono::high_resolution_clock::now(); // Optimizing DFT storm::storage::DFT dft = origDft.optimize(); @@ -100,6 +100,8 @@ namespace storm { ValueType result = storm::utility::zero(); int limK = invResults ? -1 : nrM+1; int chK = invResults ? -1 : 1; + // WARNING: there is a bug for computing permutations with more than 32 elements + STORM_LOG_ASSERT(res.size() < 32, "Permutations work only for < 32 elements"); for(int cK = nrK; cK != limK; cK += chK ) { STORM_LOG_ASSERT(cK >= 0, "ck negative."); size_t permutation = smallestIntWithNBitsSet(static_cast(cK)); @@ -132,7 +134,7 @@ namespace storm { template typename DFTModelChecker::dft_result DFTModelChecker::checkDFT(storm::storage::DFT const& dft, std::shared_ptr const& formula, bool symred, bool enableDC, double approximationError) { - std::chrono::high_resolution_clock::time_point buildingStart = std::chrono::high_resolution_clock::now(); + std::chrono::high_resolution_clock::time_point checkingStart = std::chrono::high_resolution_clock::now(); // Find symmetries std::map>> emptySymmetry; @@ -143,35 +145,38 @@ namespace storm { STORM_LOG_INFO("Found " << symmetries.groups.size() << " symmetries."); STORM_LOG_TRACE("Symmetries: " << std::endl << symmetries); } - std::chrono::high_resolution_clock::time_point buildingEnd = std::chrono::high_resolution_clock::now(); - buildingTime += buildingEnd - buildingStart; if (approximationError > 0.0) { // Comparator for checking the error of the approximation storm::utility::ConstantsComparator comparator; // Build approximate Markov Automata for lower and upper bound - double currentApproximationError = approximationError; approximation_result approxResult = std::make_pair(storm::utility::zero(), storm::utility::zero()); - std::chrono::high_resolution_clock::time_point explorationStart; std::shared_ptr> model; storm::builder::ExplicitDFTModelBuilderApprox builder(dft, symmetries, enableDC); typename storm::builder::ExplicitDFTModelBuilderApprox::LabelOptions labeloptions; // TODO initialize this with the formula + bool probabilityFormula = formula->isProbabilityOperatorFormula(); + STORM_LOG_ASSERT((formula->isTimeOperatorFormula() && !probabilityFormula) || (!formula->isTimeOperatorFormula() && probabilityFormula), "Probability formula not initialized correctly"); size_t iteration = 0; do { // Iteratively build finer models - explorationStart = std::chrono::high_resolution_clock::now(); + std::chrono::high_resolution_clock::time_point explorationStart = std::chrono::high_resolution_clock::now(); STORM_LOG_INFO("Building model..."); // TODO Matthias refine model using existing model and MC results - currentApproximationError = pow(0.1, iteration) * approximationError; - builder.buildModel(labeloptions, iteration < 1, iteration); + builder.buildModel(labeloptions, iteration, approximationError); + std::chrono::high_resolution_clock::time_point explorationEnd = std::chrono::high_resolution_clock::now(); + explorationTime = explorationEnd - explorationStart; // TODO Matthias: possible to do bisimulation on approximated model and not on concrete one? // Build model for lower bound STORM_LOG_INFO("Getting model for lower bound..."); - model = builder.getModelApproximation(true); - explorationTime += std::chrono::high_resolution_clock::now() - explorationStart; + model = builder.getModelApproximation(probabilityFormula ? false : true); + // We only output the info from the lower bound as the info for the upper bound is the same + STORM_LOG_INFO("No. states: " << model->getNumberOfStates()); + STORM_LOG_INFO("No. transitions: " << model->getNumberOfTransitions()); + buildingTime += std::chrono::high_resolution_clock::now() - explorationEnd; + // Check lower bound std::unique_ptr result = checkModel(model, formula); result->filter(storm::modelchecker::ExplicitQualitativeCheckResult(model->getInitialStates())); @@ -181,9 +186,9 @@ namespace storm { // Build model for upper bound STORM_LOG_INFO("Getting model for upper bound..."); - explorationStart = std::chrono::high_resolution_clock::now(); - model = builder.getModelApproximation(false); - explorationTime += std::chrono::high_resolution_clock::now() - explorationStart; + explorationEnd = std::chrono::high_resolution_clock::now(); + model = builder.getModelApproximation(probabilityFormula ? true : false); + buildingTime += std::chrono::high_resolution_clock::now() - explorationEnd; // Check upper bound result = checkModel(model, formula); result->filter(storm::modelchecker::ExplicitQualitativeCheckResult(model->getInitialStates())); @@ -193,8 +198,10 @@ namespace storm { ++iteration; STORM_LOG_INFO("Result after iteration " << iteration << ": (" << std::setprecision(10) << approxResult.first << ", " << approxResult.second << ")"); + totalTime = std::chrono::high_resolution_clock::now() - totalStart; + printTimings(); STORM_LOG_THROW(!storm::utility::isInfinity(approxResult.first) && !storm::utility::isInfinity(approxResult.second), storm::exceptions::NotSupportedException, "Approximation does not work if result might be infinity."); - } while (!isApproximationSufficient(approxResult.first, approxResult.second, approximationError)); + } while (!isApproximationSufficient(approxResult.first, approxResult.second, approximationError, probabilityFormula)); STORM_LOG_INFO("Finished approximation after " << iteration << " iteration" << (iteration > 1 ? "s." : ".")); return approxResult; @@ -203,10 +210,10 @@ namespace storm { STORM_LOG_INFO("Building Model..."); std::shared_ptr> model; // TODO Matthias: use only one builder if everything works again - if (approximationError >= 0.0) { + if (storm::settings::getModule().isApproximationErrorSet()) { storm::builder::ExplicitDFTModelBuilderApprox builder(dft, symmetries, enableDC); typename storm::builder::ExplicitDFTModelBuilderApprox::LabelOptions labeloptions; // TODO initialize this with the formula - builder.buildModel(labeloptions, true); + builder.buildModel(labeloptions, 0); model = builder.getModel(); } else { storm::builder::ExplicitDFTModelBuilder builder(dft, symmetries, enableDC); @@ -216,7 +223,7 @@ namespace storm { //model->printModelInformationToStream(std::cout); STORM_LOG_INFO("No. states (Explored): " << model->getNumberOfStates()); STORM_LOG_INFO("No. transitions (Explored): " << model->getNumberOfTransitions()); - explorationTime += std::chrono::high_resolution_clock::now() - buildingEnd; + explorationTime += std::chrono::high_resolution_clock::now() - checkingStart; // Model checking std::unique_ptr result = checkModel(model, formula); @@ -248,20 +255,25 @@ namespace storm { } template - bool DFTModelChecker::isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError) { + bool DFTModelChecker::isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError, bool relative) { STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Approximation works only for double."); } template<> - bool DFTModelChecker::isApproximationSufficient(double lowerBound, double upperBound, double approximationError) { - return upperBound - lowerBound <= approximationError * (lowerBound + upperBound) / 2; + bool DFTModelChecker::isApproximationSufficient(double lowerBound, double upperBound, double approximationError, bool relative) { + STORM_LOG_THROW(!std::isnan(lowerBound) && !std::isnan(upperBound), storm::exceptions::NotSupportedException, "Approximation does not work if result is NaN."); + if (relative) { + return upperBound - lowerBound <= approximationError; + } else { + return upperBound - lowerBound <= approximationError * (lowerBound + upperBound) / 2; + } } template void DFTModelChecker::printTimings(std::ostream& os) { os << "Times:" << std::endl; - os << "Building:\t" << buildingTime.count() << std::endl; os << "Exploration:\t" << explorationTime.count() << std::endl; + os << "Building:\t" << buildingTime.count() << std::endl; os << "Bisimulation:\t" << bisimulationTime.count() << std::endl; os << "Modelchecking:\t" << modelCheckingTime.count() << std::endl; os << "Total:\t\t" << totalTime.count() << std::endl; diff --git a/src/modelchecker/dft/DFTModelChecker.h b/src/modelchecker/dft/DFTModelChecker.h index 83df59bfa..543bf97fd 100644 --- a/src/modelchecker/dft/DFTModelChecker.h +++ b/src/modelchecker/dft/DFTModelChecker.h @@ -61,6 +61,7 @@ namespace storm { std::chrono::duration bisimulationTime = std::chrono::duration::zero(); std::chrono::duration modelCheckingTime = std::chrono::duration::zero(); std::chrono::duration totalTime = std::chrono::duration::zero(); + std::chrono::high_resolution_clock::time_point totalStart; // Model checking result dft_result checkResult; @@ -107,15 +108,17 @@ namespace storm { /*! * Checks if the computed approximation is sufficient, i.e. - * upperBound - lowerBound <= approximationError * mean(upperBound, lowerBound). + * upperBound - lowerBound <= approximationError * mean(lowerBound, upperBound). * * @param lowerBound The lower bound on the result. * @param upperBound The upper bound on the result. * @param approximationError The allowed error for approximating. + * @param relative Flag indicating if the error should be relative to 1 or + to the mean of lower and upper bound. * * @return True, if the approximation is sufficient. */ - bool isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError); + bool isApproximationSufficient(ValueType lowerBound, ValueType upperBound, double approximationError, bool relative); }; } diff --git a/src/settings/modules/DFTSettings.cpp b/src/settings/modules/DFTSettings.cpp index 0dcb49f1d..ab707b5a4 100644 --- a/src/settings/modules/DFTSettings.cpp +++ b/src/settings/modules/DFTSettings.cpp @@ -40,7 +40,7 @@ namespace storm { this->addOption(storm::settings::OptionBuilder(moduleName, modularisationOptionName, false, "Use modularisation (not applicable for expected time).").build()); this->addOption(storm::settings::OptionBuilder(moduleName, disableDCOptionName, false, "Disable Dont Care propagation.").build()); this->addOption(storm::settings::OptionBuilder(moduleName, approximationErrorOptionName, false, "Approximation error allowed.").setShortName(approximationErrorOptionShortName).addArgument(storm::settings::ArgumentBuilder::createDoubleArgument("error", "The relative approximation error to use.").addValidationFunctionDouble(storm::settings::ArgumentValidators::doubleGreaterValidatorIncluding(0.0)).build()).build()); - this->addOption(storm::settings::OptionBuilder(moduleName, approximationHeuristicOptionName, false, "Set the heuristic used for approximation.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("heuristic", "Sets which heuristic is used for approximation. Must be in {depth, rateratio}. Default is").setDefaultValueString("depth").addValidationFunctionString(storm::settings::ArgumentValidators::stringInListValidator({"depth", "rateratio"})).build()).build()); + this->addOption(storm::settings::OptionBuilder(moduleName, approximationHeuristicOptionName, false, "Set the heuristic used for approximation.").addArgument(storm::settings::ArgumentBuilder::createStringArgument("heuristic", "Sets which heuristic is used for approximation. Must be in {depth, probability}. Default is").setDefaultValueString("depth").addValidationFunctionString(storm::settings::ArgumentValidators::stringInListValidator({"depth", "rateratio"})).build()).build()); this->addOption(storm::settings::OptionBuilder(moduleName, propExpectedTimeOptionName, false, "Compute expected time of system failure.").setShortName(propExpectedTimeOptionShortName).build()); this->addOption(storm::settings::OptionBuilder(moduleName, propProbabilityOptionName, false, "Compute probability of system failure.").build()); this->addOption(storm::settings::OptionBuilder(moduleName, propTimeBoundOptionName, false, "Compute probability of system failure up to given timebound.").addArgument(storm::settings::ArgumentBuilder::createDoubleArgument("time", "The timebound to use.").addValidationFunctionDouble(storm::settings::ArgumentValidators::doubleGreaterValidatorExcluding(0.0)).build()).build()); @@ -87,8 +87,8 @@ namespace storm { std::string heuristicAsString = this->getOption(approximationHeuristicOptionName).getArgumentByName("heuristic").getValueAsString(); if (heuristicAsString == "depth") { return storm::builder::ApproximationHeuristic::DEPTH; - } else if (heuristicAsString == "rateratio") { - return storm::builder::ApproximationHeuristic::RATERATIO; + } else if (heuristicAsString == "probability") { + return storm::builder::ApproximationHeuristic::PROBABILITY; } STORM_LOG_THROW(false, storm::exceptions::IllegalArgumentValueException, "Illegal value '" << heuristicAsString << "' set as heuristic for approximation."); } diff --git a/src/storage/BucketPriorityQueue.cpp b/src/storage/BucketPriorityQueue.cpp new file mode 100644 index 000000000..224b14cf6 --- /dev/null +++ b/src/storage/BucketPriorityQueue.cpp @@ -0,0 +1,203 @@ +#include "src/storage/BucketPriorityQueue.h" +#include "src/utility/macros.h" +#include "src/adapters/CarlAdapter.h" + +#include + +namespace storm { + namespace storage { + + template + BucketPriorityQueue::BucketPriorityQueue(size_t nrBuckets, double lowerValue, double ratio) : lowerValue(lowerValue), logBase(std::log(ratio)), nrBuckets(nrBuckets), nrUnsortedItems(0), buckets(nrBuckets), currentBucket(nrBuckets) { + compare = ([this](HeuristicPointer a, HeuristicPointer b) { + return *a < *b; + }); + } + + template + void BucketPriorityQueue::fix() { + if (currentBucket < nrBuckets && nrUnsortedItems > buckets[currentBucket].size() / 10) { + // Fix current bucket + std::make_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare); + nrUnsortedItems = 0; + } + } + + template + bool BucketPriorityQueue::empty() const { + return currentBucket == nrBuckets && immediateBucket.empty(); + } + + template + std::size_t BucketPriorityQueue::size() const { + size_t size = immediateBucket.size(); + for (size_t i = currentBucket; currentBucket < nrBuckets; ++i) { + size += buckets[i].size(); + } + return size; + } + + template + typename BucketPriorityQueue::HeuristicPointer const& BucketPriorityQueue::top() const { + if (!immediateBucket.empty()) { + return immediateBucket.back(); + } + STORM_LOG_ASSERT(!empty(), "BucketPriorityQueue is empty"); + return buckets[currentBucket].front(); + } + + template + void BucketPriorityQueue::push(HeuristicPointer const& item) { + if (item->isExpand()) { + immediateBucket.push_back(item); + return; + } + size_t bucket = getBucket(item->getPriority()); + if (bucket < currentBucket) { + currentBucket = bucket; + nrUnsortedItems = 0; + } + buckets[bucket].push_back(item); + if (bucket == currentBucket) { + // Insert in first bucket + if (AUTOSORT) { + std::push_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare); + } else { + ++nrUnsortedItems; + } + } + } + + template + void BucketPriorityQueue::update(HeuristicPointer const& item, double oldPriority) { + STORM_LOG_ASSERT(!item->isExpand(), "Item is marked for expansion"); + size_t newBucket = getBucket(item->getPriority()); + size_t oldBucket = getBucket(oldPriority); + + if (oldBucket == newBucket) { + if (currentBucket < newBucket) { + // No change as the bucket is not sorted yet + } else { + if (AUTOSORT) { + // Sort first bucket + fix(); + } else { + ++nrUnsortedItems; + } + } + } else { + // Move to new bucket + STORM_LOG_ASSERT(newBucket < oldBucket, "Will update to higher bucket"); + if (newBucket < currentBucket) { + currentBucket = newBucket; + nrUnsortedItems = 0; + } + // Remove old entry by swap-and-pop + if (buckets[oldBucket].size() >= 2) { + // Find old index by linear search + // Notice: using a map to rememeber index was not efficient + size_t oldIndex = 0; + for ( ; oldIndex < buckets[oldBucket].size(); ++oldIndex) { + if (buckets[oldBucket][oldIndex]->getId() == item->getId()) { + break; + } + } + STORM_LOG_ASSERT(oldIndex < buckets[oldBucket].size(), "Id " << item->getId() << " not found"); + std::iter_swap(buckets[oldBucket].begin() + oldIndex, buckets[oldBucket].end() - 1); + } + buckets[oldBucket].pop_back(); + // Insert new element + buckets[newBucket].push_back(item); + if (newBucket == currentBucket) { + if (AUTOSORT) { + // Sort in first bucket + std::push_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare); + } else { + ++nrUnsortedItems; + } + } + } + } + + + template + void BucketPriorityQueue::pop() { + if (!immediateBucket.empty()) { + immediateBucket.pop_back(); + return; + } + STORM_LOG_ASSERT(!empty(), "BucketPriorityQueue is empty"); + std::pop_heap(buckets[currentBucket].begin(), buckets[currentBucket].end(), compare); + buckets[currentBucket].pop_back(); + if (buckets[currentBucket].empty()) { + // Find next bucket with elements + for ( ; currentBucket < nrBuckets; ++currentBucket) { + if (!buckets[currentBucket].empty()) { + nrUnsortedItems = buckets[currentBucket].size(); + if (AUTOSORT) { + fix(); + } + break; + } + } + } + } + + template + typename BucketPriorityQueue::HeuristicPointer BucketPriorityQueue::popTop() { + HeuristicPointer item = top(); + pop(); + return item; + } + + template + size_t BucketPriorityQueue::getBucket(double priority) const { + STORM_LOG_ASSERT(priority >= lowerValue, "Priority " << priority << " is too low"); + size_t newBucket = std::log(priority - lowerValue) / logBase; + if (newBucket >= nrBuckets) { + newBucket = nrBuckets - 1; + } + if (!HIGHER) { + newBucket = nrBuckets-1 - newBucket; + } + //std::cout << "get Bucket: " << priority << ", " << newBucket << std::endl; + STORM_LOG_ASSERT(newBucket < nrBuckets, "Priority " << priority << " is too high"); + return newBucket; + } + + template + void BucketPriorityQueue::print(std::ostream& out) const { + out << "Bucket priority queue with size " << buckets.size() << ", lower value: " << lowerValue << " and logBase: " << logBase << std::endl; + out << "Immediate bucket: "; + for (HeuristicPointer heuristic : immediateBucket) { + out << heuristic->getId() << ", "; + } + out << std::endl; + out << "Current bucket (" << currentBucket << ") has " << nrUnsortedItems << " unsorted items" << std::endl; + for (size_t bucket = 0; bucket < buckets.size(); ++bucket) { + if (!buckets[bucket].empty()) { + out << "Bucket " << bucket << ":" << std::endl; + for (HeuristicPointer heuristic : buckets[bucket]) { + out << "\t" << heuristic->getId() << ": " << heuristic->getPriority() << std::endl; + } + } + } + } + + template + void BucketPriorityQueue::printSizes(std::ostream& out) const { + out << "Bucket sizes: " << immediateBucket.size() << " | "; + for (size_t bucket = 0; bucket < buckets.size(); ++bucket) { + out << buckets[bucket].size() << " "; + } + std::cout << std::endl; + } + + // Template instantiations + template class BucketPriorityQueue; + +#ifdef STORM_HAVE_CARL + template class BucketPriorityQueue; +#endif + } +} diff --git a/src/storage/BucketPriorityQueue.h b/src/storage/BucketPriorityQueue.h new file mode 100644 index 000000000..1719c7124 --- /dev/null +++ b/src/storage/BucketPriorityQueue.h @@ -0,0 +1,73 @@ +#ifndef STORM_STORAGE_BUCKETPRIORITYQUEUE_H_ +#define STORM_STORAGE_BUCKETPRIORITYQUEUE_H_ + +#include "src/builder/DftExplorationHeuristic.h" +#include +#include +#include +#include + +namespace storm { + namespace storage { + + template + class BucketPriorityQueue { + + using HeuristicPointer = std::shared_ptr>; + + public: + explicit BucketPriorityQueue(size_t nrBuckets, double lowerValue, double ratio); + + void fix(); + + bool empty() const; + + std::size_t size() const; + + HeuristicPointer const& top() const; + + void push(HeuristicPointer const& item); + + void update(HeuristicPointer const& item, double oldPriority); + + void pop(); + + HeuristicPointer popTop(); + + void print(std::ostream& out) const; + + void printSizes(std::ostream& out) const; + + private: + + size_t getBucket(double priority) const; + + const double lowerValue; + + const bool HIGHER = true; + + const bool AUTOSORT = false; + + const double logBase; + + const size_t nrBuckets; + + size_t nrUnsortedItems; + + // List of buckets + std::vector> buckets; + + // Bucket containing all items which should be considered immediately + std::vector immediateBucket; + + // Index of first bucket which contains items + size_t currentBucket; + + std::function compare; + + }; + + } +} + +#endif // STORM_STORAGE_BUCKETPRIORITYQUEUE_H_ diff --git a/src/storage/bisimulation/BisimulationDecomposition.cpp b/src/storage/bisimulation/BisimulationDecomposition.cpp index 4b31551f9..33d4cffcc 100644 --- a/src/storage/bisimulation/BisimulationDecomposition.cpp +++ b/src/storage/bisimulation/BisimulationDecomposition.cpp @@ -230,25 +230,25 @@ namespace storm { template void BisimulationDecomposition::performPartitionRefinement() { - // Insert all blocks into the splitter queue as a (potential) splitter. - std::deque*> splitterQueue; - std::for_each(partition.getBlocks().begin(), partition.getBlocks().end(), [&] (std::unique_ptr> const& block) { block->data().setSplitter(); splitterQueue.push_back(block.get()); } ); + // Insert all blocks into the splitter vector as a (potential) splitter. + std::vector*> splitterVector; + std::for_each(partition.getBlocks().begin(), partition.getBlocks().end(), [&] (std::unique_ptr> const& block) { block->data().setSplitter(); splitterVector.push_back(block.get()); } ); // Then perform the actual splitting until there are no more splitters. uint_fast64_t iterations = 0; - while (!splitterQueue.empty()) { + while (!splitterVector.empty()) { ++iterations; // Get and prepare the next splitter. // Sort the splitters according to their sizes to prefer small splitters. That is just a heuristic, but // tends to work well. - std::sort(splitterQueue.begin(), splitterQueue.end(), [] (Block const* b1, Block const* b2) { return b1->getNumberOfStates() < b2->getNumberOfStates(); } ); - Block* splitter = splitterQueue.front(); - splitterQueue.pop_front(); + std::sort(splitterVector.begin(), splitterVector.end(), [] (Block const* b1, Block const* b2) { return b1->getNumberOfStates() > b2->getNumberOfStates(); } ); + Block* splitter = splitterVector.back(); + splitterVector.pop_back(); splitter->data().setSplitter(false); // Now refine the partition using the current splitter. - refinePartitionBasedOnSplitter(*splitter, splitterQueue); + refinePartitionBasedOnSplitter(*splitter, splitterVector); } } diff --git a/src/storage/bisimulation/BisimulationDecomposition.h b/src/storage/bisimulation/BisimulationDecomposition.h index 6d86f2df9..df951bce3 100644 --- a/src/storage/bisimulation/BisimulationDecomposition.h +++ b/src/storage/bisimulation/BisimulationDecomposition.h @@ -1,7 +1,7 @@ #ifndef STORM_STORAGE_BISIMULATIONDECOMPOSITION_H_ #define STORM_STORAGE_BISIMULATIONDECOMPOSITION_H_ -#include +#include #include "src/settings/SettingsManager.h" #include "src/settings/modules/BisimulationSettings.h" @@ -219,12 +219,12 @@ namespace storm { /*! * Refines the partition by considering the given splitter. All blocks that become potential splitters - * because of this refinement, are marked as splitters and inserted into the splitter queue. + * because of this refinement, are marked as splitters and inserted into the splitter vector. * * @param splitter The splitter to use. - * @param splitterQueue The queue into which to insert the newly discovered potential splitters. + * @param splitterVector The vector into which to insert the newly discovered potential splitters. */ - virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::deque*>& splitterQueue) = 0; + virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::vector*>& splitterVector) = 0; /*! * Builds the quotient model based on the previously computed equivalence classes (stored in the blocks diff --git a/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp b/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp index e968c98cd..dcc12fbfb 100644 --- a/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp +++ b/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.cpp @@ -157,7 +157,7 @@ namespace storm { } template - void DeterministicModelBisimulationDecomposition::refinePredecessorBlocksOfSplitterStrong(std::list*> const& predecessorBlocks, std::deque*>& splitterQueue) { + void DeterministicModelBisimulationDecomposition::refinePredecessorBlocksOfSplitterStrong(std::list*> const& predecessorBlocks, std::vector*>& splitterVector) { for (auto block : predecessorBlocks) { STORM_LOG_TRACE("Refining predecessor " << block->getId() << " of splitter"); @@ -176,15 +176,15 @@ namespace storm { [this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) { return this->comparator.isLess(getProbabilityToSplitter(state1), getProbabilityToSplitter(state2)); }, - [&splitterQueue] (Block& block) { - splitterQueue.emplace_back(&block); block.data().setSplitter(); + [&splitterVector] (Block& block) { + splitterVector.emplace_back(&block); block.data().setSplitter(); }); - // If the predecessor block was split, we need to insert it into the splitter queue if it is not already + // If the predecessor block was split, we need to insert it into the splitter vector if it is not already // marked as a splitter. if (split && !blockToRefineProbabilistically->data().splitter()) { - splitterQueue.emplace_back(blockToRefineProbabilistically); + splitterVector.emplace_back(blockToRefineProbabilistically); blockToRefineProbabilistically->data().setSplitter(); } @@ -378,7 +378,7 @@ namespace storm { } template - void DeterministicModelBisimulationDecomposition::refinePredecessorBlockOfSplitterWeak(bisimulation::Block& block, std::deque*>& splitterQueue) { + void DeterministicModelBisimulationDecomposition::refinePredecessorBlockOfSplitterWeak(bisimulation::Block& block, std::vector*>& splitterVector) { // First, we need to turn the one-step probabilities to go to the splitter to the conditional probabilities // for all non-silent states. computeConditionalProbabilitiesForNonSilentStates(block); @@ -395,12 +395,12 @@ namespace storm { [&weakStateLabels,&block,originalBlockIndex,this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) { return weakStateLabels[this->partition.getPosition(state1) - originalBlockIndex] < weakStateLabels[this->partition.getPosition(state2) - originalBlockIndex]; }, - [this, &splitterQueue] (bisimulation::Block& block) { + [this, &splitterVector] (bisimulation::Block& block) { updateSilentProbabilitiesBasedOnTransitions(block); // Insert the new block as a splitter. block.data().setSplitter(); - splitterQueue.emplace_back(&block); + splitterVector.emplace_back(&block); }); // If the block was split, we also update the silent probabilities. @@ -410,16 +410,16 @@ namespace storm { if (!block.data().splitter()) { // Insert the new block as a splitter. block.data().setSplitter(); - splitterQueue.emplace_back(&block); + splitterVector.emplace_back(&block); } } } template - void DeterministicModelBisimulationDecomposition::refinePredecessorBlocksOfSplitterWeak(bisimulation::Block& splitter, std::list*> const& predecessorBlocks, std::deque*>& splitterQueue) { + void DeterministicModelBisimulationDecomposition::refinePredecessorBlocksOfSplitterWeak(bisimulation::Block& splitter, std::list*> const& predecessorBlocks, std::vector*>& splitterVector) { for (auto block : predecessorBlocks) { if (*block != splitter) { - refinePredecessorBlockOfSplitterWeak(*block, splitterQueue); + refinePredecessorBlockOfSplitterWeak(*block, splitterVector); } else { // If the block to split is the splitter itself, we must not do any splitting here. } @@ -439,7 +439,7 @@ namespace storm { } template - void DeterministicModelBisimulationDecomposition::refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::deque*>& splitterQueue) { + void DeterministicModelBisimulationDecomposition::refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::vector*>& splitterVector) { STORM_LOG_TRACE("Refining partition based on splitter " << splitter.getId()); // The outline of the refinement is as follows. @@ -513,7 +513,7 @@ namespace storm { if (this->options.getType() == BisimulationType::Strong || this->model.getType() == storm::models::ModelType::Ctmc) { // In the case of CTMCs and weak bisimulation, we still call the "splitStrong" method, but we already have // taken care of not adding the splitter to the predecessor blocks, so this is safe. - refinePredecessorBlocksOfSplitterStrong(predecessorBlocks, splitterQueue); + refinePredecessorBlocksOfSplitterStrong(predecessorBlocks, splitterVector); } else { // If the splitter is a predecessor of we can use the computed probabilities to update the silent // probabilities. @@ -521,7 +521,7 @@ namespace storm { updateSilentProbabilitiesBasedOnProbabilitiesToSplitter(splitter); } - refinePredecessorBlocksOfSplitterWeak(splitter, predecessorBlocks, splitterQueue); + refinePredecessorBlocksOfSplitterWeak(splitter, predecessorBlocks, splitterVector); } } diff --git a/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h b/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h index b9f2a2a5a..b460dfa5d 100644 --- a/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h +++ b/src/storage/bisimulation/DeterministicModelBisimulationDecomposition.h @@ -39,11 +39,11 @@ namespace storm { virtual void buildQuotient() override; - virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::deque*>& splitterQueue) override; + virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::vector*>& splitterVector) override; private: // Refines the predecessor blocks wrt. strong bisimulation. - void refinePredecessorBlocksOfSplitterStrong(std::list*> const& predecessorBlocks, std::deque*>& splitterQueue); + void refinePredecessorBlocksOfSplitterStrong(std::list*> const& predecessorBlocks, std::vector*>& splitterVector); /*! * Performs the necessary steps to compute a weak bisimulation on a DTMC. @@ -99,10 +99,10 @@ namespace storm { void updateSilentProbabilitiesBasedOnTransitions(bisimulation::Block& block); // Refines the predecessor blocks of the splitter wrt. weak bisimulation in DTMCs. - void refinePredecessorBlocksOfSplitterWeak(bisimulation::Block& splitter, std::list*> const& predecessorBlocks, std::deque*>& splitterQueue); + void refinePredecessorBlocksOfSplitterWeak(bisimulation::Block& splitter, std::list*> const& predecessorBlocks, std::vector*>& splitterVector); // Refines the given block wrt to weak bisimulation in DTMCs. - void refinePredecessorBlockOfSplitterWeak(bisimulation::Block& block, std::deque*>& splitterQueue); + void refinePredecessorBlockOfSplitterWeak(bisimulation::Block& block, std::vector*>& splitterVector); // Converts the one-step probabilities of going into the splitter into the conditional probabilities needed // for weak bisimulation (on DTMCs). @@ -127,4 +127,4 @@ namespace storm { } } -#endif /* STORM_STORAGE_BISIMULATION_DETERMINISTICMODELBISIMULATIONDECOMPOSITION_H_ */ \ No newline at end of file +#endif /* STORM_STORAGE_BISIMULATION_DETERMINISTICMODELBISIMULATIONDECOMPOSITION_H_ */ diff --git a/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp b/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp index 2e7d54113..fe53c6602 100644 --- a/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp +++ b/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.cpp @@ -198,7 +198,7 @@ namespace storm { } template - void NondeterministicModelBisimulationDecomposition::updateQuotientDistributionsOfPredecessors(Block const& newBlock, Block const& oldBlock, std::deque*>& splitterQueue) { + void NondeterministicModelBisimulationDecomposition::updateQuotientDistributionsOfPredecessors(Block const& newBlock, Block const& oldBlock, std::vector*>& splitterVector) { uint_fast64_t lastState = 0; bool lastStateInitialized = false; @@ -220,7 +220,7 @@ namespace storm { // If the predecessor block is not marked as to-refined, we do so now. if (!predecessorBlock.data().splitter()) { predecessorBlock.data().setSplitter(); - splitterQueue.push_back(&predecessorBlock); + splitterVector.push_back(&predecessorBlock); } if (lastStateInitialized) { @@ -332,7 +332,7 @@ namespace storm { } template - bool NondeterministicModelBisimulationDecomposition::splitBlockAccordingToCurrentQuotientDistributions(Block& block, std::deque*>& splitterQueue) { + bool NondeterministicModelBisimulationDecomposition::splitBlockAccordingToCurrentQuotientDistributions(Block& block, std::vector*>& splitterVector) { std::list*> newBlocks; bool split = this->partition.splitBlock(block, [this] (storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) { @@ -340,7 +340,7 @@ namespace storm { // std::cout << state1 << " is " << (!result ? "not" : "") << " smaller than " << state2 << std::endl; return result; }, - [this, &block, &splitterQueue, &newBlocks] (Block& newBlock) { + [this, &block, &splitterVector, &newBlocks] (Block& newBlock) { newBlocks.push_back(&newBlock); // this->checkBlockStable(newBlock); @@ -357,7 +357,7 @@ namespace storm { // defer updating the quotient distributions until *after* all splits, because // it otherwise influences the subsequent splits! for (auto el : newBlocks) { - this->updateQuotientDistributionsOfPredecessors(*el, block, splitterQueue); + this->updateQuotientDistributionsOfPredecessors(*el, block, splitterVector); } // this->checkQuotientDistributions(); @@ -405,14 +405,14 @@ namespace storm { } template - void NondeterministicModelBisimulationDecomposition::refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::deque*>& splitterQueue) { + void NondeterministicModelBisimulationDecomposition::refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::vector*>& splitterVector) { if (!possiblyNeedsRefinement(splitter)) { return; } STORM_LOG_TRACE("Refining block " << splitter.getId()); - splitBlockAccordingToCurrentQuotientDistributions(splitter, splitterQueue); + splitBlockAccordingToCurrentQuotientDistributions(splitter, splitterVector); } template class NondeterministicModelBisimulationDecomposition>; diff --git a/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h b/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h index 18a6e8e58..38feaf36a 100644 --- a/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h +++ b/src/storage/bisimulation/NondeterministicModelBisimulationDecomposition.h @@ -37,7 +37,7 @@ namespace storm { virtual void buildQuotient() override; - virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::deque*>& splitterQueue) override; + virtual void refinePartitionBasedOnSplitter(bisimulation::Block& splitter, std::vector*>& splitterVector) override; virtual void initialize() override; @@ -52,7 +52,7 @@ namespace storm { bool possiblyNeedsRefinement(bisimulation::Block const& block) const; // Splits the given block according to the current quotient distributions. - bool splitBlockAccordingToCurrentQuotientDistributions(bisimulation::Block& block, std::deque*>& splitterQueue); + bool splitBlockAccordingToCurrentQuotientDistributions(bisimulation::Block& block, std::vector*>& splitterVector); // Retrieves whether the quotient distributions of state 1 are considered to be less than the ones of state 2. bool quotientDistributionsLess(storm::storage::sparse::state_type state1, storm::storage::sparse::state_type state2) const; @@ -62,7 +62,7 @@ namespace storm { // Updates the quotient distributions of the predecessors of the new block by taking the probability mass // away from the old block. - void updateQuotientDistributionsOfPredecessors(bisimulation::Block const& newBlock, bisimulation::Block const& oldBlock, std::deque*>& splitterQueue); + void updateQuotientDistributionsOfPredecessors(bisimulation::Block const& newBlock, bisimulation::Block const& oldBlock, std::vector*>& splitterVector); bool checkQuotientDistributions() const; bool checkBlockStable(bisimulation::Block const& newBlock) const; @@ -81,4 +81,4 @@ namespace storm { } } -#endif /* STORM_STORAGE_BISIMULATION_NONDETERMINISTICMODELBISIMULATIONDECOMPOSITION_H_ */ \ No newline at end of file +#endif /* STORM_STORAGE_BISIMULATION_NONDETERMINISTICMODELBISIMULATIONDECOMPOSITION_H_ */ diff --git a/src/storage/dft/DFTState.cpp b/src/storage/dft/DFTState.cpp index 6e2b80035..cc0eaabec 100644 --- a/src/storage/dft/DFTState.cpp +++ b/src/storage/dft/DFTState.cpp @@ -6,7 +6,7 @@ namespace storm { namespace storage { template - DFTState::DFTState(DFT const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(dft.stateVectorSize()), mId(id), mPseudoState(false), mDft(dft), mStateGenerationInfo(stateGenerationInfo), exploreHeuristic() { + DFTState::DFTState(DFT const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(dft.stateVectorSize()), mId(id), mPseudoState(false), mDft(dft), mStateGenerationInfo(stateGenerationInfo) { // TODO Matthias: use construct() // Initialize uses @@ -17,27 +17,15 @@ namespace storm { this->setUses(spareId, elem->children()[0]->id()); } - for (auto elem : mDft.getBasicElements()) { - mCurrentlyNotFailableBE.push_back(elem->id()); - } - // Initialize activation propagateActivation(mDft.getTopLevelIndex()); std::vector alwaysActiveBEs = mDft.nonColdBEs(); mCurrentlyFailableBE.insert(mCurrentlyFailableBE.end(), alwaysActiveBEs.begin(), alwaysActiveBEs.end()); - // Remove always active BEs from currently not failable BEs - for (size_t id : alwaysActiveBEs) { - auto it = std::find(mCurrentlyNotFailableBE.begin(), mCurrentlyNotFailableBE.end(), id); - STORM_LOG_ASSERT(it != mCurrentlyNotFailableBE.end(), "Id not found."); - mCurrentlyNotFailableBE.erase(it); - } - - sortFailableBEs(); } template - DFTState::DFTState(storm::storage::BitVector const& status, DFT const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(status), mId(id), mPseudoState(true), mDft(dft), mStateGenerationInfo(stateGenerationInfo), exploreHeuristic() { + DFTState::DFTState(storm::storage::BitVector const& status, DFT const& dft, DFTStateGenerationInfo const& stateGenerationInfo, size_t id) : mStatus(status), mId(id), mPseudoState(true), mDft(dft), mStateGenerationInfo(stateGenerationInfo) { // Intentionally left empty } @@ -46,7 +34,6 @@ namespace storm { STORM_LOG_TRACE("Construct concrete state from pseudo state " << mDft.getStateString(mStatus, mStateGenerationInfo, mId)); // Clear information from pseudo state mCurrentlyFailableBE.clear(); - mCurrentlyNotFailableBE.clear(); mFailableDependencies.clear(); mUsedRepresentants.clear(); STORM_LOG_ASSERT(mPseudoState, "Only pseudo states can be constructed."); @@ -57,10 +44,6 @@ namespace storm { if ((!be->isColdBasicElement() && be->canFail()) || !mDft.hasRepresentant(index) || isActive(mDft.getRepresentant(index))) { mCurrentlyFailableBE.push_back(index); STORM_LOG_TRACE("Currently failable: " << mDft.getBasicElement(index)->toString()); - } else { - // BE currently is not failable - mCurrentlyNotFailableBE.push_back(index); - STORM_LOG_TRACE("Currently not failable: " << mDft.getBasicElement(index)->toString()); } } else if (mDft.getElement(index)->isSpareGate()) { // Initialize used representants @@ -69,8 +52,7 @@ namespace storm { STORM_LOG_TRACE("Spare " << index << " uses " << useId); } } - sortFailableBEs(); - + // Initialize failable dependencies for (size_t dependencyId : mDft.getDependencies()) { std::shared_ptr const> dependency = mDft.getDependency(dependencyId); @@ -85,9 +67,7 @@ namespace storm { template std::shared_ptr> DFTState::copy() const { - std::shared_ptr> stateCopy = std::make_shared>(*this); - stateCopy->exploreHeuristic = storm::builder::DFTExplorationHeuristic(); - return stateCopy; + return std::make_shared>(*this); } template @@ -205,7 +185,7 @@ namespace storm { template void DFTState::beNoLongerFailable(size_t id) { auto it = std::find(mCurrentlyFailableBE.begin(), mCurrentlyFailableBE.end(), id); - if(it != mCurrentlyFailableBE.end()) { + if (it != mCurrentlyFailableBE.end()) { mCurrentlyFailableBE.erase(it); } } @@ -239,11 +219,14 @@ namespace storm { } template - ValueType DFTState::getBERate(size_t id, bool considerPassive) const { + ValueType DFTState::getBERate(size_t id) const { STORM_LOG_ASSERT(mDft.isBasicElement(id), "Element is no BE."); - if (considerPassive && mDft.hasRepresentant(id) && !isActive(mDft.getRepresentant(id))) { + + if (mDft.hasRepresentant(id) && !isActive(mDft.getRepresentant(id))) { + // Return passive failure rate return mDft.getBasicElement(id)->passiveFailureRate(); } else { + // Return active failure rate return mDft.getBasicElement(id)->activeFailureRate(); } } @@ -251,16 +234,7 @@ namespace storm { template ValueType DFTState::getFailableBERate(size_t index) const { STORM_LOG_ASSERT(index < nrFailableBEs(), "Index invalid."); - return getBERate(mCurrentlyFailableBE[index], true); - } - - template - ValueType DFTState::getNotFailableBERate(size_t index) const { - STORM_LOG_ASSERT(index < nrNotFailableBEs(), "Index invalid."); - STORM_LOG_ASSERT(storm::utility::isZero(mDft.getBasicElement(mCurrentlyNotFailableBE[index])->activeFailureRate()) || - (mDft.hasRepresentant(mCurrentlyNotFailableBE[index]) && !isActive(mDft.getRepresentant(mCurrentlyNotFailableBE[index]))), "BE " << mCurrentlyNotFailableBE[index] << " can fail"); - // Use active failure rate as passive failure rate is 0. - return getBERate(mCurrentlyNotFailableBE[index], false); + return getBERate(mCurrentlyFailableBE[index]); } template @@ -317,16 +291,11 @@ namespace storm { if (be->isColdBasicElement() && be->canFail()) { // Add to failable BEs mCurrentlyFailableBE.push_back(elem); - // Remove from not failable BEs - auto it = std::find(mCurrentlyNotFailableBE.begin(), mCurrentlyNotFailableBE.end(), elem); - STORM_LOG_ASSERT(it != mCurrentlyNotFailableBE.end(), "Element " << mDft.getElement(elem)->name() << " not found."); - mCurrentlyNotFailableBE.erase(it); } } else if (mDft.getElement(elem)->isSpareGate() && !isActive(uses(elem))) { propagateActivation(uses(elem)); } } - sortFailableBEs(); } template @@ -424,14 +393,6 @@ namespace storm { } return changed; } - - template - void DFTState::sortFailableBEs() { - std::sort(mCurrentlyFailableBE.begin( ), mCurrentlyFailableBE.end( ), [&](size_t const& lhs, size_t const& rhs) { - // Sort decreasing - return getBERate(rhs, true) < getBERate(lhs, true); - }); - } // Explicitly instantiate the class. diff --git a/src/storage/dft/DFTState.h b/src/storage/dft/DFTState.h index b2fb803da..b077b58b0 100644 --- a/src/storage/dft/DFTState.h +++ b/src/storage/dft/DFTState.h @@ -27,15 +27,13 @@ namespace storm { storm::storage::BitVector mStatus; size_t mId; std::vector mCurrentlyFailableBE; - std::vector mCurrentlyNotFailableBE; std::vector mFailableDependencies; std::vector mUsedRepresentants; bool mPseudoState; bool mValid = true; const DFT& mDft; const DFTStateGenerationInfo& mStateGenerationInfo; - storm::builder::DFTExplorationHeuristic exploreHeuristic; - + public: /** * Construct the initial state. @@ -123,26 +121,6 @@ namespace storm { return mPseudoState; } - void setHeuristicValues(size_t depth, ValueType rate, ValueType exitRate) { - exploreHeuristic.setHeuristicValues(depth, rate, exitRate); - } - - void setHeuristicValues(std::shared_ptr> oldState, ValueType rate, ValueType exitRate) { - if (hasFailed(mDft.getTopLevelIndex()) || isFailsafe(mDft.getTopLevelIndex()) || nrFailableDependencies() > 0 || (nrFailableDependencies() == 0 && nrFailableBEs() == 0)) { - // Do not skip absorbing state or if reached by dependencies - exploreHeuristic.setNotSkip(); - } - exploreHeuristic.setHeuristicValues(oldState->exploreHeuristic.getDepth() + 1, rate, exitRate); - } - - bool isSkip(double approximationThreshold, storm::builder::ApproximationHeuristic heuristic) { - return exploreHeuristic.isSkip(approximationThreshold, heuristic); - } - - double getPriority() const { - return exploreHeuristic.getPriority(); - } - storm::storage::BitVector const& status() const { return mStatus; } @@ -202,15 +180,6 @@ namespace storm { return mCurrentlyFailableBE.size(); } - /** - * Get number of currently not failable BEs. These are cold BE which can fail in the future. - * - * @return Number of not failable BEs. - */ - size_t nrNotFailableBEs() const { - return mCurrentlyNotFailableBE.size(); - } - /** * Get the failure rate of the currently failable BE on the given index. * @@ -221,13 +190,13 @@ namespace storm { ValueType getFailableBERate(size_t index) const; /** - * Get the failure rate of the currently not failable BE on the given index. + * Get the current failure rate of the given BE. * - * @param index Index of BE in list of currently not failable BEs. + * @param id Id of BE. * * @return Failure rate of the BE. */ - ValueType getNotFailableBERate(size_t index) const; + ValueType getBERate(size_t id) const; /** Get number of currently failable dependencies. * @@ -323,22 +292,6 @@ namespace storm { private: void propagateActivation(size_t representativeId); - /** - * Get the failure rate of the given BE. - * - * @param id Id of BE. - * @param considerPassive Flag indicating if the passive failure rate should be returned if - * the BE currently is passive. - * - * @return Failure rate of the BE. - */ - ValueType getBERate(size_t id, bool considerPassive) const; - - /*! - * Sort failable BEs in decreasing order of their active failure rate. - */ - void sortFailableBEs(); - }; } diff --git a/src/storm-dyftee.cpp b/src/storm-dyftee.cpp index 2c7418075..5b040a3b0 100644 --- a/src/storm-dyftee.cpp +++ b/src/storm-dyftee.cpp @@ -120,6 +120,7 @@ int main(const int argc, const char** argv) { parametric = generalSettings.isParametricSet(); #endif +#ifdef STORM_HAVE_Z3 if (dftSettings.solveWithSMT()) { // Solve with SMT if (parametric) { @@ -130,6 +131,7 @@ int main(const int argc, const char** argv) { storm::utility::cleanUp(); return 0; } +#endif // Set min or max bool minimal = true; diff --git a/src/utility/bitoperations.h b/src/utility/bitoperations.h index bec6a6414..18b42a1cd 100644 --- a/src/utility/bitoperations.h +++ b/src/utility/bitoperations.h @@ -10,6 +10,6 @@ inline size_t smallestIntWithNBitsSet(size_t n) { inline size_t nextBitPermutation(size_t v) { if (v==0) return static_cast(0); // From https://graphics.stanford.edu/~seander/bithacks.html#NextBitPermutation - unsigned int t = (v | (v - 1)) + 1; + size_t t = (v | (v - 1)) + 1; return t | ((((t & -t) / (v & -v)) >> 1) - 1); }