|
|
@ -9,9 +9,6 @@ |
|
|
|
|
|
|
|
namespace storm { |
|
|
|
namespace storage { |
|
|
|
template<class ValueType, class Hash> |
|
|
|
const std::vector<std::size_t> BitVectorHashMap<ValueType, Hash>::sizes = {5, 13, 31, 79, 163, 277, 499, 1021, 2029, 3989, 8059, 16001, 32099, 64301, 127921, 256499, 511111, 1024901, 2048003, 4096891, 8192411, 15485863, 32142191, 64285127, 128572517, 257148523, 514299959, 1028599919, 2057199839, 4114399697, 8228799419, 16457598791, 32915197603, 65830395223}; |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
BitVectorHashMap<ValueType, Hash>::BitVectorHashMapIterator::BitVectorHashMapIterator(BitVectorHashMap const& map, BitVector::const_iterator indexIt) : map(map), indexIt(indexIt) { |
|
|
|
// Intentionally left empty.
|
|
|
@ -45,19 +42,20 @@ namespace storm { |
|
|
|
} |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
BitVectorHashMap<ValueType, Hash>::BitVectorHashMap(uint64_t bucketSize, uint64_t initialSize, double loadFactor) : loadFactor(loadFactor), bucketSize(bucketSize), numberOfElements(0) { |
|
|
|
BitVectorHashMap<ValueType, Hash>::BitVectorHashMap(uint64_t bucketSize, uint64_t initialSize, double loadFactor) : loadFactor(loadFactor), bucketSize(bucketSize), currentSize(1), numberOfElements(0) { |
|
|
|
STORM_LOG_ASSERT(bucketSize % 64 == 0, "Bucket size must be a multiple of 64."); |
|
|
|
currentSizeIterator = std::find_if(sizes.begin(), sizes.end(), [=] (uint64_t value) { return value > initialSize; } ); |
|
|
|
|
|
|
|
while (initialSize > 0) { |
|
|
|
++currentSize; |
|
|
|
initialSize >>= 1; |
|
|
|
} |
|
|
|
|
|
|
|
// Create the underlying containers.
|
|
|
|
buckets = storm::storage::BitVector(bucketSize * *currentSizeIterator); |
|
|
|
occupied = storm::storage::BitVector(*currentSizeIterator); |
|
|
|
values = std::vector<ValueType>(*currentSizeIterator); |
|
|
|
|
|
|
|
buckets = storm::storage::BitVector(bucketSize * (1ull << currentSize)); |
|
|
|
occupied = storm::storage::BitVector(1ull << currentSize); |
|
|
|
values = std::vector<ValueType>(1ull << currentSize); |
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
for (uint64_t i = 0; i < sizes.size() - 1; ++i) { |
|
|
|
STORM_LOG_ASSERT(sizes[i] < sizes[i + 1], "Expected stricly increasing sizes."); |
|
|
|
} |
|
|
|
numberOfInsertions = 0; |
|
|
|
numberOfInsertionProbingSteps = 0; |
|
|
|
numberOfFinds = 0; |
|
|
@ -77,60 +75,32 @@ namespace storm { |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
std::size_t BitVectorHashMap<ValueType, Hash>::capacity() const { |
|
|
|
return *currentSizeIterator; |
|
|
|
return 1ull << currentSize; |
|
|
|
} |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
void BitVectorHashMap<ValueType, Hash>::increaseSize() { |
|
|
|
++currentSizeIterator; |
|
|
|
STORM_LOG_ASSERT(currentSizeIterator != sizes.end(), "Hash map became to big."); |
|
|
|
++currentSize; |
|
|
|
#ifndef NDEBUG
|
|
|
|
STORM_LOG_TRACE("Increasing size of hash map from " << *(currentSizeIterator - 1) << " to " << *currentSizeIterator << ". Stats: " << numberOfFinds << " finds (avg. " << (numberOfFindProbingSteps / static_cast<double>(numberOfFinds)) << " probing steps), " << numberOfInsertions << " insertions (avg. " << (numberOfInsertionProbingSteps / static_cast<double>(numberOfInsertions)) << " probing steps)."); |
|
|
|
STORM_LOG_TRACE("Increasing size of hash map from " << (1ull << (currentSize - 1)) << " to " << (1ull << currentSize) << ". Stats: " << numberOfFinds << " finds (avg. " << (numberOfFindProbingSteps / static_cast<double>(numberOfFinds)) << " probing steps), " << numberOfInsertions << " insertions (avg. " << (numberOfInsertionProbingSteps / static_cast<double>(numberOfInsertions)) << " probing steps)."); |
|
|
|
#else
|
|
|
|
STORM_LOG_TRACE("Increasing size of hash map from " << *(currentSizeIterator - 1) << " to " << *currentSizeIterator << "."); |
|
|
|
STORM_LOG_TRACE("Increasing size of hash map from " << (1ull << (currentSize - 1)) << " to " << (1ull << currentSize) << "."); |
|
|
|
#endif
|
|
|
|
|
|
|
|
// Create new containers and swap them with the old ones.
|
|
|
|
numberOfElements = 0; |
|
|
|
storm::storage::BitVector oldBuckets(bucketSize * *currentSizeIterator); |
|
|
|
storm::storage::BitVector oldBuckets(bucketSize * (1ull << currentSize)); |
|
|
|
std::swap(oldBuckets, buckets); |
|
|
|
storm::storage::BitVector oldOccupied = storm::storage::BitVector(*currentSizeIterator); |
|
|
|
storm::storage::BitVector oldOccupied = storm::storage::BitVector(1ull << currentSize); |
|
|
|
std::swap(oldOccupied, occupied); |
|
|
|
std::vector<ValueType> oldValues = std::vector<ValueType>(*currentSizeIterator); |
|
|
|
std::vector<ValueType> oldValues = std::vector<ValueType>(1ull << currentSize); |
|
|
|
std::swap(oldValues, values); |
|
|
|
|
|
|
|
// Now iterate through the elements and reinsert them in the new storage.
|
|
|
|
bool fail = false; |
|
|
|
for (auto bucketIndex : oldOccupied) { |
|
|
|
fail = !this->insertWithoutIncreasingSize(oldBuckets.get(bucketIndex * bucketSize, bucketSize), oldValues[bucketIndex]); |
|
|
|
|
|
|
|
// If we failed to insert just one element, we have to redo the procedure with a larger container size.
|
|
|
|
if (fail) { |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
uint_fast64_t failCount = 0; |
|
|
|
while (fail) { |
|
|
|
++failCount; |
|
|
|
STORM_LOG_THROW(failCount < 2, storm::exceptions::InternalException, "Increasing size failed too often."); |
|
|
|
|
|
|
|
++currentSizeIterator; |
|
|
|
STORM_LOG_THROW(currentSizeIterator != sizes.end(), storm::exceptions::InternalException, "Hash map became to big."); |
|
|
|
|
|
|
|
numberOfElements = 0; |
|
|
|
buckets = storm::storage::BitVector(bucketSize * *currentSizeIterator); |
|
|
|
occupied = storm::storage::BitVector(*currentSizeIterator); |
|
|
|
values = std::vector<ValueType>(*currentSizeIterator); |
|
|
|
|
|
|
|
for (auto bucketIndex : oldOccupied) { |
|
|
|
fail = !this->insertWithoutIncreasingSize(oldBuckets.get(bucketIndex * bucketSize, bucketSize), oldValues[bucketIndex]); |
|
|
|
|
|
|
|
// If we failed to insert just one element, we have to redo the procedure with a larger container size.
|
|
|
|
if (fail) { |
|
|
|
break; |
|
|
|
} |
|
|
|
} |
|
|
|
STORM_LOG_ASSERT(!fail, "Expected to be able to insert all elements."); |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
@ -166,7 +136,7 @@ namespace storm { |
|
|
|
template<class ValueType, class Hash> |
|
|
|
std::pair<ValueType, std::size_t> BitVectorHashMap<ValueType, Hash>::findOrAddAndGetBucket(storm::storage::BitVector const& key, ValueType const& value) { |
|
|
|
// If the load of the map is too high, we increase the size.
|
|
|
|
if (numberOfElements >= loadFactor * *currentSizeIterator) { |
|
|
|
if (numberOfElements >= loadFactor * (1ull << currentSize)) { |
|
|
|
this->increaseSize(); |
|
|
|
} |
|
|
|
|
|
|
@ -187,7 +157,7 @@ namespace storm { |
|
|
|
template<class ValueType, class Hash> |
|
|
|
std::size_t BitVectorHashMap<ValueType, Hash>::setOrAddAndGetBucket(storm::storage::BitVector const& key, ValueType const& value) { |
|
|
|
// If the load of the map is too high, we increase the size.
|
|
|
|
if (numberOfElements >= loadFactor * *currentSizeIterator) { |
|
|
|
if (numberOfElements >= loadFactor * (1ull << currentSize)) { |
|
|
|
this->increaseSize(); |
|
|
|
} |
|
|
|
|
|
|
@ -230,17 +200,12 @@ namespace storm { |
|
|
|
return const_iterator(*this, occupied.end()); |
|
|
|
} |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
uint_fast64_t BitVectorHashMap<ValueType, Hash>::getNextBucketInProbingSequence(uint_fast64_t, uint_fast64_t currentValue, uint_fast64_t step) const { |
|
|
|
return (currentValue + step + step*step) % *currentSizeIterator; |
|
|
|
} |
|
|
|
|
|
|
|
template<class ValueType, class Hash> |
|
|
|
std::pair<bool, std::size_t> BitVectorHashMap<ValueType, Hash>::findBucket(storm::storage::BitVector const& key) const { |
|
|
|
#ifndef NDEBUG
|
|
|
|
++numberOfFinds; |
|
|
|
#endif
|
|
|
|
uint_fast64_t initialHash = hasher(key) % *currentSizeIterator; |
|
|
|
uint_fast64_t initialHash = hasher(key) % (1ull << currentSize); |
|
|
|
uint_fast64_t bucket = initialHash; |
|
|
|
|
|
|
|
uint_fast64_t i = 0; |
|
|
@ -252,8 +217,11 @@ namespace storm { |
|
|
|
if (buckets.matches(bucket * bucketSize, key)) { |
|
|
|
return std::make_pair(true, bucket); |
|
|
|
} |
|
|
|
bucket = getNextBucketInProbingSequence(initialHash, bucket, i); |
|
|
|
|
|
|
|
bucket += 1; |
|
|
|
if (bucket == (1ull << currentSize)) { |
|
|
|
bucket = 0; |
|
|
|
} |
|
|
|
|
|
|
|
if (bucket == initialHash) { |
|
|
|
return std::make_pair(false, bucket); |
|
|
|
} |
|
|
@ -268,7 +236,7 @@ namespace storm { |
|
|
|
#ifndef NDEBUG
|
|
|
|
++numberOfInsertions; |
|
|
|
#endif
|
|
|
|
uint_fast64_t initialHash = hasher(key) % *currentSizeIterator; |
|
|
|
uint_fast64_t initialHash = hasher(key) % (1ull << currentSize); |
|
|
|
uint_fast64_t bucket = initialHash; |
|
|
|
|
|
|
|
uint64_t i = 0; |
|
|
@ -280,12 +248,15 @@ namespace storm { |
|
|
|
if (buckets.matches(bucket * bucketSize, key)) { |
|
|
|
return std::make_tuple(true, bucket, false); |
|
|
|
} |
|
|
|
bucket = getNextBucketInProbingSequence(initialHash, bucket, i); |
|
|
|
|
|
|
|
bucket += 1; |
|
|
|
if (bucket == (1ull << currentSize)) { |
|
|
|
bucket = 0; |
|
|
|
} |
|
|
|
|
|
|
|
if (bucket == initialHash) { |
|
|
|
if (increaseStorage) { |
|
|
|
this->increaseSize(); |
|
|
|
bucket = initialHash = hasher(key) % *currentSizeIterator; |
|
|
|
bucket = initialHash = hasher(key) % (1ull << currentSize); |
|
|
|
} else { |
|
|
|
return std::make_tuple(false, bucket, true); |
|
|
|
} |
|
|
|