From 4c3a409961db152f514af5dd27f3ee8487c96534 Mon Sep 17 00:00:00 2001 From: dehnert Date: Mon, 24 Jul 2017 23:02:43 +0200 Subject: [PATCH] readd sparsepp in new version --- resources/3rdparty/sparsepp/.gitignore | 47 + resources/3rdparty/sparsepp/.travis.yml | 14 + resources/3rdparty/sparsepp/CHANGELOG.md | 16 + resources/3rdparty/sparsepp/LICENSE | 0 resources/3rdparty/sparsepp/README.md | 14 +- resources/3rdparty/sparsepp/bench.md | 0 resources/3rdparty/sparsepp/docs/.gitignore | 0 .../3rdparty/sparsepp/examples/emplace.cc | 128 + .../3rdparty/sparsepp/examples/hash_std.cc | 47 + resources/3rdparty/sparsepp/examples/makefile | 18 + .../sparsepp/examples/serialize_file.cc | 82 + .../sparsepp/examples/serialize_large.cc | 97 + .../sparsepp/examples/serialize_stream.cc | 64 + .../vsprojects/serialize_stream.vcxproj | 172 + .../serialize_stream.vcxproj.filters | 13 + .../examples/vsprojects/spp_examples.sln | 28 + resources/3rdparty/sparsepp/sparsepp/spp.h | 4347 +++++++++++++++++ .../3rdparty/sparsepp/sparsepp/spp_config.h | 781 +++ .../3rdparty/sparsepp/sparsepp/spp_dlalloc.h | 4023 +++++++++++++++ .../3rdparty/sparsepp/sparsepp/spp_memory.h | 121 + .../3rdparty/sparsepp/sparsepp/spp_smartptr.h | 76 + .../3rdparty/sparsepp/sparsepp/spp_stdint.h | 16 + .../3rdparty/sparsepp/sparsepp/spp_timer.h | 58 + .../3rdparty/sparsepp/sparsepp/spp_traits.h | 122 + .../3rdparty/sparsepp/sparsepp/spp_utils.h | 447 ++ resources/3rdparty/sparsepp/spp.natvis | 41 + resources/3rdparty/sparsepp/tests/makefile | 27 + .../3rdparty/sparsepp/tests/perftest1.cc | 162 + .../3rdparty/sparsepp/tests/spp_alloc_test.cc | 189 + .../sparsepp/tests/spp_bitset_test.cc | 284 ++ resources/3rdparty/sparsepp/tests/spp_test.cc | 2988 +++++++++++ .../sparsepp/tests/vsprojects/spp.sln | 38 + .../tests/vsprojects/spp_alloc_test.vcxproj | 176 + .../vsprojects/spp_alloc_test.vcxproj.filters | 28 + .../tests/vsprojects/spp_test.vcxproj | 175 + .../tests/vsprojects/spp_test.vcxproj.filters | 32 + 36 files changed, 14869 insertions(+), 2 deletions(-) create mode 100755 resources/3rdparty/sparsepp/.gitignore create mode 100755 resources/3rdparty/sparsepp/.travis.yml create mode 100755 resources/3rdparty/sparsepp/CHANGELOG.md mode change 100644 => 100755 resources/3rdparty/sparsepp/LICENSE mode change 100644 => 100755 resources/3rdparty/sparsepp/README.md mode change 100644 => 100755 resources/3rdparty/sparsepp/bench.md mode change 100644 => 100755 resources/3rdparty/sparsepp/docs/.gitignore create mode 100755 resources/3rdparty/sparsepp/examples/emplace.cc create mode 100755 resources/3rdparty/sparsepp/examples/hash_std.cc create mode 100755 resources/3rdparty/sparsepp/examples/makefile create mode 100755 resources/3rdparty/sparsepp/examples/serialize_file.cc create mode 100755 resources/3rdparty/sparsepp/examples/serialize_large.cc create mode 100755 resources/3rdparty/sparsepp/examples/serialize_stream.cc create mode 100755 resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj create mode 100755 resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters create mode 100755 resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_config.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_memory.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_stdint.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_timer.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_traits.h create mode 100755 resources/3rdparty/sparsepp/sparsepp/spp_utils.h create mode 100755 resources/3rdparty/sparsepp/spp.natvis create mode 100755 resources/3rdparty/sparsepp/tests/makefile create mode 100755 resources/3rdparty/sparsepp/tests/perftest1.cc create mode 100755 resources/3rdparty/sparsepp/tests/spp_alloc_test.cc create mode 100755 resources/3rdparty/sparsepp/tests/spp_bitset_test.cc create mode 100755 resources/3rdparty/sparsepp/tests/spp_test.cc create mode 100755 resources/3rdparty/sparsepp/tests/vsprojects/spp.sln create mode 100755 resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj create mode 100755 resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters create mode 100755 resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj create mode 100755 resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters diff --git a/resources/3rdparty/sparsepp/.gitignore b/resources/3rdparty/sparsepp/.gitignore new file mode 100755 index 000000000..cd2946ad7 --- /dev/null +++ b/resources/3rdparty/sparsepp/.gitignore @@ -0,0 +1,47 @@ +# Windows image file caches +Thumbs.db +ehthumbs.db + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# ========================= +# Operating System Files +# ========================= + +# OSX +# ========================= + +.DS_Store +.AppleDouble +.LSOverride + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk diff --git a/resources/3rdparty/sparsepp/.travis.yml b/resources/3rdparty/sparsepp/.travis.yml new file mode 100755 index 000000000..c8d240b98 --- /dev/null +++ b/resources/3rdparty/sparsepp/.travis.yml @@ -0,0 +1,14 @@ +language: cpp + +os: + - linux + - osx + +compiler: + - clang + - gcc + +dist: trusty +sudo: false + +script: cd tests && make && make test diff --git a/resources/3rdparty/sparsepp/CHANGELOG.md b/resources/3rdparty/sparsepp/CHANGELOG.md new file mode 100755 index 000000000..c491ed950 --- /dev/null +++ b/resources/3rdparty/sparsepp/CHANGELOG.md @@ -0,0 +1,16 @@ +# 0.95 + +* not single header anymore (this was just too much of a hassle). +* custom allocator not quite ready yet. Checked in, but still using old allocator (easy to toggle - line 15 of spp_config.h) + + +# 0.90 + +* stable release (single header) +* known issues: + - memory usage can be excessive in Windows + + sparsepp has a very simple default allocator based on the system malloc/realloc/free implementation, + and the default Windows realloc() appears to fragment the memory, causing significantly higher + memory usage than on linux. To solve this issue, I am working on a new allocator which will + remedy the problem. diff --git a/resources/3rdparty/sparsepp/LICENSE b/resources/3rdparty/sparsepp/LICENSE old mode 100644 new mode 100755 diff --git a/resources/3rdparty/sparsepp/README.md b/resources/3rdparty/sparsepp/README.md old mode 100644 new mode 100755 index 224bb5175..7cf36b83e --- a/resources/3rdparty/sparsepp/README.md +++ b/resources/3rdparty/sparsepp/README.md @@ -98,6 +98,16 @@ These classes provide the same interface as std::unordered_map and std::unordere - Since items are not grouped into buckets, Bucket APIs have been adapted: `max_bucket_count` is equivalent to `max_size`, and `bucket_count` returns the sparsetable size, which is normally at least twice the number of items inserted into the hash_map. +## Memory allocator on Windows (when building with Visual Studio) + +When building with the Microsoft compiler, we provide a custom allocator because the default one (from the Visual C++ runtime) fragments memory when reallocating. + +This is desirable *only* when creating large sparsepp hash maps. If you create lots of small hash_maps, memory usage may increase instead of decreasing as expected. The reason is that, for each instance of a hash_map, the custom memory allocator creates a new memory space to allocate from, which is typically 4K, so it may be a big waste if just a few items are allocated. + +In order to use the custom spp allocator, define the following preprocessor variable before including ``: + +`#define SPP_USE_SPP_ALLOC 1` + ## Integer keys, and other hash function considerations. 1. For basic integer types, sparsepp provides a default hash function which does some mixing of the bits of the keys (see [Integer Hashing](http://burtleburtle.net/bob/hash/integer.html)). This prevents a pathological case where inserted keys are sequential (1, 2, 3, 4, ...), and the lookup on non-present keys becomes very slow. @@ -229,7 +239,7 @@ This support is implemented in the following APIs: bool unserialize(Serializer serializer, INPUT *stream); ``` -The following example demontrates how a simple sparse_hash_map can be written to a file, and then read back. The serializer we use read and writes to a file using the stdio APIs, but it would be equally simple to write a serialized using the stream APIS: +The following example demonstrates how a simple sparse_hash_map can be written to a file, and then read back. The serializer we use read and writes to a file using the stdio APIs, but it would be equally simple to write a serialized using the stream APIS: ```c++ #include @@ -319,7 +329,7 @@ int main(int argc, char* argv[]) ## Thread safety -Sparsepp follows the trade safety rules of the Standard C++ library. In Particular: +Sparsepp follows the thread safety rules of the Standard C++ library. In Particular: - A single sparsepp hash table is thread safe for reading from multiple threads. For example, given a hash table A, it is safe to read A from thread 1 and from thread 2 simultaneously. diff --git a/resources/3rdparty/sparsepp/bench.md b/resources/3rdparty/sparsepp/bench.md old mode 100644 new mode 100755 diff --git a/resources/3rdparty/sparsepp/docs/.gitignore b/resources/3rdparty/sparsepp/docs/.gitignore old mode 100644 new mode 100755 diff --git a/resources/3rdparty/sparsepp/examples/emplace.cc b/resources/3rdparty/sparsepp/examples/emplace.cc new file mode 100755 index 000000000..e4c04dcfd --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/emplace.cc @@ -0,0 +1,128 @@ +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace patch +{ + template std::string to_string(const T& n) + { + std::ostringstream stm; + stm << n; + return stm.str(); + } +} + +#if defined(SPP_NO_CXX11_RVALUE_REFERENCES) + #warning "problem: we expect spp will detect we have rvalue support" +#endif + +template +using milliseconds = std::chrono::duration; + +class custom_type +{ + std::string one = "one"; + std::string two = "two"; + std::uint32_t three = 3; + std::uint64_t four = 4; + std::uint64_t five = 5; +public: + custom_type() = default; + // Make object movable and non-copyable + custom_type(custom_type &&) = default; + custom_type& operator=(custom_type &&) = default; + // should be automatically deleted per http://www.slideshare.net/ripplelabs/howard-hinnant-accu2014 + //custom_type(custom_type const&) = delete; + //custom_type& operator=(custom_type const&) = delete; +}; + +void test(std::size_t iterations, std::size_t container_size) +{ + std::clog << "bench: iterations: " << iterations << " / container_size: " << container_size << "\n"; + { + std::size_t count = 0; + auto t1 = std::chrono::high_resolution_clock::now(); + for (std::size_t i=0; i m; + m.reserve(container_size); + for (std::size_t j=0; j(t2 - t1).count(); + if (count != iterations*container_size) + std::clog << " invalid count: " << count << "\n"; + std::clog << " std::unordered_map: " << std::fixed << int(elapsed) << " ms\n"; + } + + { + std::size_t count = 0; + auto t1 = std::chrono::high_resolution_clock::now(); + for (std::size_t i=0; i m; + for (std::size_t j=0; j(t2 - t1).count(); + if (count != iterations*container_size) + std::clog << " invalid count: " << count << "\n"; + std::clog << " std::map: " << std::fixed << int(elapsed) << " ms\n"; + } + + { + std::size_t count = 0; + auto t1 = std::chrono::high_resolution_clock::now(); + for (std::size_t i=0; i> m; + m.reserve(container_size); + for (std::size_t j=0; j(t2 - t1).count(); + if (count != iterations*container_size) + std::clog << " invalid count: " << count << "\n"; + std::clog << " std::vector: " << std::fixed << int(elapsed) << " ms\n"; + } + + { + std::size_t count = 0; + auto t1 = std::chrono::high_resolution_clock::now(); + for (std::size_t i=0; i m; + m.reserve(container_size); + for (std::size_t j=0; j(t2 - t1).count(); + if (count != iterations*container_size) + std::clog << " invalid count: " << count << "\n"; + std::clog << " spp::sparse_hash_map: " << std::fixed << int(elapsed) << " ms\n"; + } + +} + +int main() +{ + std::size_t iterations = 100000; + + test(iterations,1); + test(iterations,10); + test(iterations,50); +} diff --git a/resources/3rdparty/sparsepp/examples/hash_std.cc b/resources/3rdparty/sparsepp/examples/hash_std.cc new file mode 100755 index 000000000..e0738df58 --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/hash_std.cc @@ -0,0 +1,47 @@ +#include +#include +#include + +using std::string; + +struct Person +{ + bool operator==(const Person &o) const + { + return _first == o._first && _last == o._last; + } + + string _first; + string _last; +}; + +namespace std +{ +// inject specialization of std::hash for Person into namespace std +// ---------------------------------------------------------------- +template<> +struct hash +{ + std::size_t operator()(Person const &p) const + { + std::size_t seed = 0; + spp::hash_combine(seed, p._first); + spp::hash_combine(seed, p._last); + return seed; + } +}; +} + +int main() +{ + // As we have defined a specialization of std::hash() for Person, + // we can now create sparse_hash_set or sparse_hash_map of Persons + // ---------------------------------------------------------------- + spp::sparse_hash_set persons = + { { "John", "Galt" }, + { "Jane", "Doe" } + }; + + for (auto& p: persons) + std::cout << p._first << ' ' << p._last << '\n'; +} diff --git a/resources/3rdparty/sparsepp/examples/makefile b/resources/3rdparty/sparsepp/examples/makefile new file mode 100755 index 000000000..979a10fb9 --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/makefile @@ -0,0 +1,18 @@ +CXXFLAGS = -O2 -std=c++11 -I.. +CXXFLAGS += -Wall -pedantic -Wextra -D_XOPEN_SOURCE=700 +SPP_DEPS_1 = spp.h spp_utils.h spp_dlalloc.h spp_traits.h spp_config.h +SPP_DEPS = $(addprefix ../sparsepp/,$(SPP_DEPS_1)) +TARGETS = emplace hash_std serialize_file serialize_stream serialize_large + +ifeq ($(OS),Windows_NT) + LDFLAGS = -lpsapi +endif + +all: $(TARGETS) + +clean: + rm -f $(TARGETS) ages.dmp data.dat vsprojects/x64/* vsprojects/x86/* + +%: %.cc $(SPP_DEPS) makefile + $(CXX) $(CXXFLAGS) -DNDEBUG $< -o $@ $(LDFLAGS) + diff --git a/resources/3rdparty/sparsepp/examples/serialize_file.cc b/resources/3rdparty/sparsepp/examples/serialize_file.cc new file mode 100755 index 000000000..b682b6f9e --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/serialize_file.cc @@ -0,0 +1,82 @@ +#include +#include + +using spp::sparse_hash_map; +using namespace std; + +class FileSerializer +{ +public: + // serialize basic types to FILE + // ----------------------------- + template + bool operator()(FILE *fp, const T& value) + { + return fwrite((const void *)&value, sizeof(value), 1, fp) == 1; + } + + template + bool operator()(FILE *fp, T* value) + { + return fread((void *)value, sizeof(*value), 1, fp) == 1; + } + + // serialize std::string to FILE + // ----------------------------- + bool operator()(FILE *fp, const string& value) + { + const size_t size = value.size(); + return (*this)(fp, size) && fwrite(value.c_str(), size, 1, fp) == 1; + } + + bool operator()(FILE *fp, string* value) + { + size_t size; + if (!(*this)(fp, &size)) + return false; + char* buf = new char[size]; + if (fread(buf, size, 1, fp) != 1) + { + delete [] buf; + return false; + } + new (value) string(buf, (size_t)size); + delete[] buf; + return true; + } + + // serialize std::pair to FILE - needed for maps + // --------------------------------------------------------- + template + bool operator()(FILE *fp, const std::pair& value) + { + return (*this)(fp, value.first) && (*this)(fp, value.second); + } + + template + bool operator()(FILE *fp, std::pair *value) + { + return (*this)(fp, (A *)&value->first) && (*this)(fp, &value->second); + } +}; + +int main(int, char* []) +{ + sparse_hash_map age{ { "John", 12 }, {"Jane", 13 }, { "Fred", 8 } }; + + // serialize age hash_map to "ages.dmp" file + FILE *out = fopen("ages.dmp", "wb"); + age.serialize(FileSerializer(), out); + fclose(out); + + sparse_hash_map age_read; + + // read from "ages.dmp" file into age_read hash_map + FILE *input = fopen("ages.dmp", "rb"); + age_read.unserialize(FileSerializer(), input); + fclose(input); + + // print out contents of age_read to verify correct serialization + for (auto& v : age_read) + printf("age_read: %s -> %d\n", v.first.c_str(), v.second); +} diff --git a/resources/3rdparty/sparsepp/examples/serialize_large.cc b/resources/3rdparty/sparsepp/examples/serialize_large.cc new file mode 100755 index 000000000..574d34b13 --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/serialize_large.cc @@ -0,0 +1,97 @@ +#include +#include +#include +#include +#include +#include +#include + +using spp::sparse_hash_map; +using namespace std; + +class FileSerializer +{ +public: + // serialize basic types to FILE + // ----------------------------- + template + bool operator()(FILE *fp, const T& value) + { + return fwrite((const void *)&value, sizeof(value), 1, fp) == 1; + } + + template + bool operator()(FILE *fp, T* value) + { + return fread((void *)value, sizeof(*value), 1, fp) == 1; + } + + // serialize std::string to FILE + // ----------------------------- + bool operator()(FILE *fp, const string& value) + { + const size_t size = value.size(); + return (*this)(fp, size) && fwrite(value.c_str(), size, 1, fp) == 1; + } + + bool operator()(FILE *fp, string* value) + { + size_t size; + if (!(*this)(fp, &size)) + return false; + char* buf = new char[size]; + if (fread(buf, size, 1, fp) != 1) + { + delete [] buf; + return false; + } + new (value) string(buf, (size_t)size); + delete[] buf; + return true; + } + + // serialize std::pair to FILE - needed for maps + // --------------------------------------------------------- + template + bool operator()(FILE *fp, const std::pair& value) + { + return (*this)(fp, value.first) && (*this)(fp, value.second); + } + + template + bool operator()(FILE *fp, std::pair *value) + { + return (*this)(fp, (A *)&value->first) && (*this)(fp, &value->second); + } +}; + +float _to_gb(uint64_t m) { return (float)((double)m / (1024 * 1024 * 1024)); } + +int main(int, char* []) +{ + sparse_hash_map age; + + for (size_t i=0; i<10000000; ++i) + { + char buff[20]; + sprintf(buff, "%zu", i); + age.insert(std::make_pair(std::string(buff), i)); + } + + printf("before serialize(): mem_usage %4.1f GB\n", _to_gb(spp::GetProcessMemoryUsed())); + // serialize age hash_map to "ages.dmp" file + FILE *out = fopen("ages.dmp", "wb"); + age.serialize(FileSerializer(), out); + fclose(out); + + printf("before clear(): mem_usage %4.1f GB\n", _to_gb(spp::GetProcessMemoryUsed())); + age.clear(); + printf("after clear(): mem_usage %4.1f GB\n", _to_gb(spp::GetProcessMemoryUsed())); + + + // read from "ages.dmp" file into age_read hash_map + FILE *input = fopen("ages.dmp", "rb"); + age.unserialize(FileSerializer(), input); + fclose(input); + printf("after unserialize(): mem_usage %4.1f GB\n", _to_gb(spp::GetProcessMemoryUsed())); +} diff --git a/resources/3rdparty/sparsepp/examples/serialize_stream.cc b/resources/3rdparty/sparsepp/examples/serialize_stream.cc new file mode 100755 index 000000000..db65e456e --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/serialize_stream.cc @@ -0,0 +1,64 @@ +#include +#include +#include + +#include +using spp::sparse_hash_map; + +using namespace std; + +struct StringToIntSerializer +{ + bool operator()(std::ofstream* stream, const std::pair& value) const + { + size_t sizeSecond = sizeof(value.second); + size_t sizeFirst = value.first.size(); + stream->write((char*)&sizeFirst, sizeof(sizeFirst)); + stream->write(value.first.c_str(), sizeFirst); + stream->write((char*)&value.second, sizeSecond); + return true; + } + + bool operator()(std::ifstream* istream, std::pair* value) const + { + // Read key + size_t size = 0; + istream->read((char*)&size, sizeof(size)); + char * first = new char[size]; + istream->read(first, size); + new (const_cast(&value->first)) string(first, size); + + // Read value + istream->read((char *)&value->second, sizeof(value->second)); + return true; + } +}; + +int main(int , char* []) +{ + sparse_hash_map users; + + users["John"] = 12345; + users["Bob"] = 553; + users["Alice"] = 82200; + + // Write users to file "data.dat" + // ------------------------------ + std::ofstream* stream = new std::ofstream("data.dat", + std::ios::out | std::ios::trunc | std::ios::binary); + users.serialize(StringToIntSerializer(), stream); + stream->close(); + delete stream; + + // Read from file "data.dat" into users2 + // ------------------------------------- + sparse_hash_map users2; + std::ifstream* istream = new std::ifstream("data.dat"); + users2.unserialize(StringToIntSerializer(), istream); + istream->close(); + delete istream; + + for (sparse_hash_map::iterator it = users2.begin(); it != users2.end(); ++it) + printf("users2: %s -> %d\n", it->first.c_str(), it->second); + +} diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj new file mode 100755 index 000000000..63b159bcb --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj @@ -0,0 +1,172 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + + + + {19BC4240-15ED-4C76-BC57-34BB70FE163B} + Win32Proj + 8.1 + serialize_stream + + + + Application + v140 + MultiByte + + + Application + MultiByte + v140 + + + Application + v140 + MultiByte + + + Application + v140 + MultiByte + + + + + + + + + + + + + + + + + + + <_ProjectFileVersion>14.0.23107.0 + + + None + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + true + + + true + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + false + + + false + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebug + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + $(OutDir)spp_alloc_test.pdb + Console + MachineX86 + + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + EnableFastChecks + MultiThreadedDebug + + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + $(OutDir)spp_alloc_test.pdb + Console + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + Console + true + true + MachineX86 + true + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + Console + true + true + true + + + + + + \ No newline at end of file diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters new file mode 100755 index 000000000..39ecd7689 --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/vsprojects/serialize_stream.vcxproj.filters @@ -0,0 +1,13 @@ + + + + + {ba5fa1b8-1783-4b3b-9a41-31d363b52841} + + + + + Header Files + + + \ No newline at end of file diff --git a/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln b/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln new file mode 100755 index 000000000..a37d41277 --- /dev/null +++ b/resources/3rdparty/sparsepp/examples/vsprojects/spp_examples.sln @@ -0,0 +1,28 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "serialize_stream", "serialize_stream.vcxproj", "{19BC4240-15ED-4C76-BC57-34BB70FE163B}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.ActiveCfg = Debug|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.Build.0 = Debug|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.ActiveCfg = Debug|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.Build.0 = Debug|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.ActiveCfg = Release|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.Build.0 = Release|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.ActiveCfg = Release|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/resources/3rdparty/sparsepp/sparsepp/spp.h b/resources/3rdparty/sparsepp/sparsepp/spp.h new file mode 100755 index 000000000..abd4295e9 --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp.h @@ -0,0 +1,4347 @@ +#if !defined(sparsepp_h_guard_) +#define sparsepp_h_guard_ + + +// ---------------------------------------------------------------------- +// Copyright (c) 2016, Gregory Popovitch - greg7mdp@gmail.com +// All rights reserved. +// +// This work is derived from Google's sparsehash library +// +// Copyright (c) 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// ---------------------------------------------------------------------- + + +// some macros for portability +// --------------------------- +// includes +// -------- +#include +#include +#include +#include // for numeric_limits +#include // For swap(), eg +#include // for iterator tags +#include // for equal_to<>, select1st<>, std::unary_function, etc +#include // for alloc, uninitialized_copy, uninitialized_fill +#include // for malloc/realloc/free +#include // for ptrdiff_t +#include // for placement new +#include // For length_error +#include // for pair<> +#include +#include +#include + +#include // includes spp_config.h +#include +#include + +#ifdef SPP_INCLUDE_SPP_ALLOC + #include +#endif + +#if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST) + #include +#endif + +#if (SPP_GROUP_SIZE == 32) + #define SPP_SHIFT_ 5 + #define SPP_MASK_ 0x1F + typedef uint32_t group_bm_type; +#elif (SPP_GROUP_SIZE == 64) + #define SPP_SHIFT_ 6 + #define SPP_MASK_ 0x3F + typedef uint64_t group_bm_type; +#else + #error "SPP_GROUP_SIZE must be either 32 or 64" +#endif + +namespace spp_ { + +// ---------------------------------------------------------------------- +// U T I L F U N C T I O N S +// ---------------------------------------------------------------------- +template +inline void throw_exception(const E& exception) +{ +#if !defined(SPP_NO_EXCEPTIONS) + throw exception; +#else + assert(0); + abort(); +#endif +} + +// ---------------------------------------------------------------------- +// M U T A B L E P A I R H A C K +// turn std::pair into mutable std::pair +// ---------------------------------------------------------------------- +template +struct cvt +{ + typedef T type; +}; + +template +struct cvt > +{ + typedef std::pair type; +}; + +template +struct cvt > +{ + typedef const std::pair type; +}; + +// ---------------------------------------------------------------------- +// M O V E I T E R A T O R +// ---------------------------------------------------------------------- +#ifdef SPP_NO_CXX11_RVALUE_REFERENCES + #define MK_MOVE_IT(p) (p) +#else + #define MK_MOVE_IT(p) std::make_move_iterator(p) +#endif + + +// ---------------------------------------------------------------------- +// I N T E R N A L S T U F F +// ---------------------------------------------------------------------- +#ifdef SPP_NO_CXX11_STATIC_ASSERT + template struct SppCompileAssert { }; + #define SPP_COMPILE_ASSERT(expr, msg) \ + SPP_ATTRIBUTE_UNUSED typedef SppCompileAssert<(bool(expr))> spp_bogus_[bool(expr) ? 1 : -1] +#else + #define SPP_COMPILE_ASSERT static_assert +#endif + +namespace sparsehash_internal +{ + +// Adaptor methods for reading/writing data from an INPUT or OUPTUT +// variable passed to serialize() or unserialize(). For now we +// have implemented INPUT/OUTPUT for FILE*, istream*/ostream* (note +// they are pointers, unlike typical use), or else a pointer to +// something that supports a Read()/Write() method. +// +// For technical reasons, we implement read_data/write_data in two +// stages. The actual work is done in *_data_internal, which takes +// the stream argument twice: once as a template type, and once with +// normal type information. (We only use the second version.) We do +// this because of how C++ picks what function overload to use. If we +// implemented this the naive way: +// bool read_data(istream* is, const void* data, size_t length); +// template read_data(T* fp, const void* data, size_t length); +// C++ would prefer the second version for every stream type except +// istream. However, we want C++ to prefer the first version for +// streams that are *subclasses* of istream, such as istringstream. +// This is not possible given the way template types are resolved. So +// we split the stream argument in two, one of which is templated and +// one of which is not. The specialized functions (like the istream +// version above) ignore the template arg and use the second, 'type' +// arg, getting subclass matching as normal. The 'catch-all' +// functions (the second version above) use the template arg to deduce +// the type, and use a second, void* arg to achieve the desired +// 'catch-all' semantics. + + // ----- low-level I/O for FILE* ---- + + template + inline bool read_data_internal(Ignored* /*unused*/, FILE* fp, + void* data, size_t length) + { + return fread(data, length, 1, fp) == 1; + } + + template + inline bool write_data_internal(Ignored* /*unused*/, FILE* fp, + const void* data, size_t length) + { + return fwrite(data, length, 1, fp) == 1; + } + + // ----- low-level I/O for iostream ---- + + // We want the caller to be responsible for #including , not + // us, because iostream is a big header! According to the standard, + // it's only legal to delay the instantiation the way we want to if + // the istream/ostream is a template type. So we jump through hoops. + template + inline bool read_data_internal_for_istream(ISTREAM* fp, + void* data, size_t length) + { + return fp->read(reinterpret_cast(data), + static_cast(length)).good(); + } + template + inline bool read_data_internal(Ignored* /*unused*/, std::istream* fp, + void* data, size_t length) + { + return read_data_internal_for_istream(fp, data, length); + } + + template + inline bool write_data_internal_for_ostream(OSTREAM* fp, + const void* data, size_t length) + { + return fp->write(reinterpret_cast(data), + static_cast(length)).good(); + } + template + inline bool write_data_internal(Ignored* /*unused*/, std::ostream* fp, + const void* data, size_t length) + { + return write_data_internal_for_ostream(fp, data, length); + } + + // ----- low-level I/O for custom streams ---- + + // The INPUT type needs to support a Read() method that takes a + // buffer and a length and returns the number of bytes read. + template + inline bool read_data_internal(INPUT* fp, void* /*unused*/, + void* data, size_t length) + { + return static_cast(fp->Read(data, length)) == length; + } + + // The OUTPUT type needs to support a Write() operation that takes + // a buffer and a length and returns the number of bytes written. + template + inline bool write_data_internal(OUTPUT* fp, void* /*unused*/, + const void* data, size_t length) + { + return static_cast(fp->Write(data, length)) == length; + } + + // ----- low-level I/O: the public API ---- + + template + inline bool read_data(INPUT* fp, void* data, size_t length) + { + return read_data_internal(fp, fp, data, length); + } + + template + inline bool write_data(OUTPUT* fp, const void* data, size_t length) + { + return write_data_internal(fp, fp, data, length); + } + + // Uses read_data() and write_data() to read/write an integer. + // length is the number of bytes to read/write (which may differ + // from sizeof(IntType), allowing us to save on a 32-bit system + // and load on a 64-bit system). Excess bytes are taken to be 0. + // INPUT and OUTPUT must match legal inputs to read/write_data (above). + // -------------------------------------------------------------------- + template + bool read_bigendian_number(INPUT* fp, IntType* value, size_t length) + { + *value = 0; + unsigned char byte; + // We require IntType to be unsigned or else the shifting gets all screwy. + SPP_COMPILE_ASSERT(static_cast(-1) > static_cast(0), "serializing_int_requires_an_unsigned_type"); + for (size_t i = 0; i < length; ++i) + { + if (!read_data(fp, &byte, sizeof(byte))) + return false; + *value |= static_cast(byte) << ((length - 1 - i) * 8); + } + return true; + } + + template + bool write_bigendian_number(OUTPUT* fp, IntType value, size_t length) + { + unsigned char byte; + // We require IntType to be unsigned or else the shifting gets all screwy. + SPP_COMPILE_ASSERT(static_cast(-1) > static_cast(0), "serializing_int_requires_an_unsigned_type"); + for (size_t i = 0; i < length; ++i) + { + byte = (sizeof(value) <= length-1 - i) + ? static_cast(0) : static_cast((value >> ((length-1 - i) * 8)) & 255); + if (!write_data(fp, &byte, sizeof(byte))) return false; + } + return true; + } + + // If your keys and values are simple enough, you can pass this + // serializer to serialize()/unserialize(). "Simple enough" means + // value_type is a POD type that contains no pointers. Note, + // however, we don't try to normalize endianness. + // This is the type used for NopointerSerializer. + // --------------------------------------------------------------- + template struct pod_serializer + { + template + bool operator()(INPUT* fp, value_type* value) const + { + return read_data(fp, value, sizeof(*value)); + } + + template + bool operator()(OUTPUT* fp, const value_type& value) const + { + return write_data(fp, &value, sizeof(value)); + } + }; + + + // Settings contains parameters for growing and shrinking the table. + // It also packages zero-size functor (ie. hasher). + // + // It does some munging of the hash value for the cases where + // the original hash function is not be very good. + // --------------------------------------------------------------- + template + class sh_hashtable_settings : public HashFunc + { + private: +#ifndef SPP_MIX_HASH + template struct Mixer + { + inline T operator()(T h) const { return h; } + }; +#else + template struct Mixer + { + inline T operator()(T h) const; + }; + + template struct Mixer + { + inline T operator()(T h) const + { + // from Thomas Wang - https://gist.github.com/badboy/6267743 + // --------------------------------------------------------- + h = (h ^ 61) ^ (h >> 16); + h = h + (h << 3); + h = h ^ (h >> 4); + h = h * 0x27d4eb2d; + h = h ^ (h >> 15); + return h; + } + }; + + template struct Mixer + { + inline T operator()(T h) const + { + // from Thomas Wang - https://gist.github.com/badboy/6267743 + // --------------------------------------------------------- + h = (~h) + (h << 21); // h = (h << 21) - h - 1; + h = h ^ (h >> 24); + h = (h + (h << 3)) + (h << 8); // h * 265 + h = h ^ (h >> 14); + h = (h + (h << 2)) + (h << 4); // h * 21 + h = h ^ (h >> 28); + h = h + (h << 31); + return h; + } + }; +#endif + + public: + typedef Key key_type; + typedef HashFunc hasher; + typedef SizeType size_type; + + public: + sh_hashtable_settings(const hasher& hf, + const float ht_occupancy_flt, + const float ht_empty_flt) + : hasher(hf), + enlarge_threshold_(0), + shrink_threshold_(0), + consider_shrink_(false), + num_ht_copies_(0) + { + set_enlarge_factor(ht_occupancy_flt); + set_shrink_factor(ht_empty_flt); + } + + size_t hash(const key_type& v) const + { + size_t h = hasher::operator()(v); + Mixer mixer; + + return mixer(h); + } + + float enlarge_factor() const { return enlarge_factor_; } + void set_enlarge_factor(float f) { enlarge_factor_ = f; } + float shrink_factor() const { return shrink_factor_; } + void set_shrink_factor(float f) { shrink_factor_ = f; } + + size_type enlarge_threshold() const { return enlarge_threshold_; } + void set_enlarge_threshold(size_type t) { enlarge_threshold_ = t; } + size_type shrink_threshold() const { return shrink_threshold_; } + void set_shrink_threshold(size_type t) { shrink_threshold_ = t; } + + size_type enlarge_size(size_type x) const { return static_cast(x * enlarge_factor_); } + size_type shrink_size(size_type x) const { return static_cast(x * shrink_factor_); } + + bool consider_shrink() const { return consider_shrink_; } + void set_consider_shrink(bool t) { consider_shrink_ = t; } + + unsigned int num_ht_copies() const { return num_ht_copies_; } + void inc_num_ht_copies() { ++num_ht_copies_; } + + // Reset the enlarge and shrink thresholds + void reset_thresholds(size_type num_buckets) + { + set_enlarge_threshold(enlarge_size(num_buckets)); + set_shrink_threshold(shrink_size(num_buckets)); + // whatever caused us to reset already considered + set_consider_shrink(false); + } + + // Caller is resposible for calling reset_threshold right after + // set_resizing_parameters. + // ------------------------------------------------------------ + void set_resizing_parameters(float shrink, float grow) + { + assert(shrink >= 0); + assert(grow <= 1); + if (shrink > grow/2.0f) + shrink = grow / 2.0f; // otherwise we thrash hashtable size + set_shrink_factor(shrink); + set_enlarge_factor(grow); + } + + // This is the smallest size a hashtable can be without being too crowded + // If you like, you can give a min #buckets as well as a min #elts + // ---------------------------------------------------------------------- + size_type min_buckets(size_type num_elts, size_type min_buckets_wanted) + { + float enlarge = enlarge_factor(); + size_type sz = HT_MIN_BUCKETS; // min buckets allowed + while (sz < min_buckets_wanted || + num_elts >= static_cast(sz * enlarge)) + { + // This just prevents overflowing size_type, since sz can exceed + // max_size() here. + // ------------------------------------------------------------- + if (static_cast(sz * 2) < sz) + throw_exception(std::length_error("resize overflow")); // protect against overflow + sz *= 2; + } + return sz; + } + + private: + size_type enlarge_threshold_; // table.size() * enlarge_factor + size_type shrink_threshold_; // table.size() * shrink_factor + float enlarge_factor_; // how full before resize + float shrink_factor_; // how empty before resize + bool consider_shrink_; // if we should try to shrink before next insert + + unsigned int num_ht_copies_; // num_ht_copies is a counter incremented every Copy/Move + }; + +} // namespace sparsehash_internal + +#undef SPP_COMPILE_ASSERT + +// ---------------------------------------------------------------------- +// S P A R S E T A B L E +// ---------------------------------------------------------------------- +// +// A sparsetable is a random container that implements a sparse array, +// that is, an array that uses very little memory to store unassigned +// indices (in this case, between 1-2 bits per unassigned index). For +// instance, if you allocate an array of size 5 and assign a[2] = , then a[2] will take up a lot of memory but a[0], a[1], +// a[3], and a[4] will not. Array elements that have a value are +// called "assigned". Array elements that have no value yet, or have +// had their value cleared using erase() or clear(), are called +// "unassigned". +// +// Unassigned values seem to have the default value of T (see below). +// Nevertheless, there is a difference between an unassigned index and +// one explicitly assigned the value of T(). The latter is considered +// assigned. +// +// Access to an array element is constant time, as is insertion and +// deletion. Insertion and deletion may be fairly slow, however: +// because of this container's memory economy, each insert and delete +// causes a memory reallocation. +// +// NOTE: You should not test(), get(), or set() any index that is +// greater than sparsetable.size(). If you need to do that, call +// resize() first. +// +// --- Template parameters +// PARAMETER DESCRIPTION DEFAULT +// T The value of the array: the type of -- +// object that is stored in the array. +// +// Alloc: Allocator to use to allocate memory. +// +// --- Model of +// Random Access Container +// +// --- Type requirements +// T must be Copy Constructible. It need not be Assignable. +// +// --- Public base classes +// None. +// +// --- Members +// +// [*] All iterators are const in a sparsetable (though nonempty_iterators +// may not be). Use get() and set() to assign values, not iterators. +// +// [+] iterators are random-access iterators. nonempty_iterators are +// bidirectional iterators. + +// [*] If you shrink a sparsetable using resize(), assigned elements +// past the end of the table are removed using erase(). If you grow +// a sparsetable, new unassigned indices are created. +// +// [+] Note that operator[] returns a const reference. You must use +// set() to change the value of a table element. +// +// [!] Unassignment also calls the destructor. +// +// Iterators are invalidated whenever an item is inserted or +// deleted (ie set() or erase() is used) or when the size of +// the table changes (ie resize() or clear() is used). + + + +// --------------------------------------------------------------------------- +// Our iterator as simple as iterators can be: basically it's just +// the index into our table. Dereference, the only complicated +// thing, we punt to the table class. This just goes to show how +// much machinery STL requires to do even the most trivial tasks. +// +// A NOTE ON ASSIGNING: +// A sparse table does not actually allocate memory for entries +// that are not filled. Because of this, it becomes complicated +// to have a non-const iterator: we don't know, if the iterator points +// to a not-filled bucket, whether you plan to fill it with something +// or whether you plan to read its value (in which case you'll get +// the default bucket value). Therefore, while we can define const +// operations in a pretty 'normal' way, for non-const operations, we +// define something that returns a helper object with operator= and +// operator& that allocate a bucket lazily. We use this for table[] +// and also for regular table iterators. + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +// Our iterator as simple as iterators can be: basically it's just +// the index into our table. Dereference, the only complicated +// thing, we punt to the table class. This just goes to show how +// much machinery STL requires to do even the most trivial tasks. +// +// By templatizing over tabletype, we have one iterator type which +// we can use for both sparsetables and sparsebins. In fact it +// works on any class that allows size() and operator[] (eg vector), +// as long as it does the standard STL typedefs too (eg value_type). + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +template +class table_iterator +{ +public: + typedef table_iterator iterator; + + typedef std::random_access_iterator_tag iterator_category; + typedef typename tabletype::value_type value_type; + typedef typename tabletype::difference_type difference_type; + typedef typename tabletype::size_type size_type; + + explicit table_iterator(tabletype *tbl = 0, size_type p = 0) : + table(tbl), pos(p) + { } + + // Helper function to assert things are ok; eg pos is still in range + void check() const + { + assert(table); + assert(pos <= table->size()); + } + + // Arithmetic: we just do arithmetic on pos. We don't even need to + // do bounds checking, since STL doesn't consider that its job. :-) + iterator& operator+=(size_type t) { pos += t; check(); return *this; } + iterator& operator-=(size_type t) { pos -= t; check(); return *this; } + iterator& operator++() { ++pos; check(); return *this; } + iterator& operator--() { --pos; check(); return *this; } + iterator operator++(int) + { + iterator tmp(*this); // for x++ + ++pos; check(); return tmp; + } + + iterator operator--(int) + { + iterator tmp(*this); // for x-- + --pos; check(); return tmp; + } + + iterator operator+(difference_type i) const + { + iterator tmp(*this); + tmp += i; return tmp; + } + + iterator operator-(difference_type i) const + { + iterator tmp(*this); + tmp -= i; return tmp; + } + + difference_type operator-(iterator it) const + { + // for "x = it2 - it" + assert(table == it.table); + return pos - it.pos; + } + + // Comparisons. + bool operator==(const iterator& it) const + { + return table == it.table && pos == it.pos; + } + + bool operator<(const iterator& it) const + { + assert(table == it.table); // life is bad bad bad otherwise + return pos < it.pos; + } + + bool operator!=(const iterator& it) const { return !(*this == it); } + bool operator<=(const iterator& it) const { return !(it < *this); } + bool operator>(const iterator& it) const { return it < *this; } + bool operator>=(const iterator& it) const { return !(*this < it); } + + // Here's the info we actually need to be an iterator + tabletype *table; // so we can dereference and bounds-check + size_type pos; // index into the table +}; + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +template +class const_table_iterator +{ +public: + typedef table_iterator iterator; + typedef const_table_iterator const_iterator; + + typedef std::random_access_iterator_tag iterator_category; + typedef typename tabletype::value_type value_type; + typedef typename tabletype::difference_type difference_type; + typedef typename tabletype::size_type size_type; + typedef typename tabletype::const_reference reference; // we're const-only + typedef typename tabletype::const_pointer pointer; + + // The "real" constructor + const_table_iterator(const tabletype *tbl, size_type p) + : table(tbl), pos(p) { } + + // The default constructor, used when I define vars of type table::iterator + const_table_iterator() : table(NULL), pos(0) { } + + // The copy constructor, for when I say table::iterator foo = tbl.begin() + // Also converts normal iterators to const iterators // not explicit on purpose + const_table_iterator(const iterator &from) + : table(from.table), pos(from.pos) { } + + // The default destructor is fine; we don't define one + // The default operator= is fine; we don't define one + + // The main thing our iterator does is dereference. If the table entry + // we point to is empty, we return the default value type. + reference operator*() const { return (*table)[pos]; } + pointer operator->() const { return &(operator*()); } + + // Helper function to assert things are ok; eg pos is still in range + void check() const + { + assert(table); + assert(pos <= table->size()); + } + + // Arithmetic: we just do arithmetic on pos. We don't even need to + // do bounds checking, since STL doesn't consider that its job. :-) + const_iterator& operator+=(size_type t) { pos += t; check(); return *this; } + const_iterator& operator-=(size_type t) { pos -= t; check(); return *this; } + const_iterator& operator++() { ++pos; check(); return *this; } + const_iterator& operator--() { --pos; check(); return *this; } + const_iterator operator++(int) + { + const_iterator tmp(*this); // for x++ + ++pos; check(); + return tmp; + } + const_iterator operator--(int) + { + const_iterator tmp(*this); // for x-- + --pos; check(); + return tmp; + } + const_iterator operator+(difference_type i) const + { + const_iterator tmp(*this); + tmp += i; + return tmp; + } + const_iterator operator-(difference_type i) const + { + const_iterator tmp(*this); + tmp -= i; + return tmp; + } + difference_type operator-(const_iterator it) const + { + // for "x = it2 - it" + assert(table == it.table); + return pos - it.pos; + } + reference operator[](difference_type n) const + { + return *(*this + n); // simple though not totally efficient + } + + // Comparisons. + bool operator==(const const_iterator& it) const + { + return table == it.table && pos == it.pos; + } + + bool operator<(const const_iterator& it) const + { + assert(table == it.table); // life is bad bad bad otherwise + return pos < it.pos; + } + bool operator!=(const const_iterator& it) const { return !(*this == it); } + bool operator<=(const const_iterator& it) const { return !(it < *this); } + bool operator>(const const_iterator& it) const { return it < *this; } + bool operator>=(const const_iterator& it) const { return !(*this < it); } + + // Here's the info we actually need to be an iterator + const tabletype *table; // so we can dereference and bounds-check + size_type pos; // index into the table +}; + +// --------------------------------------------------------------------------- +// This is a 2-D iterator. You specify a begin and end over a list +// of *containers*. We iterate over each container by iterating over +// it. It's actually simple: +// VECTOR.begin() VECTOR[0].begin() --------> VECTOR[0].end() ---, +// | ________________________________________________/ +// | \_> VECTOR[1].begin() --------> VECTOR[1].end() -, +// | ___________________________________________________/ +// v \_> ...... +// VECTOR.end() +// +// It's impossible to do random access on one of these things in constant +// time, so it's just a bidirectional iterator. +// +// Unfortunately, because we need to use this for a non-empty iterator, +// we use ne_begin() and ne_end() instead of begin() and end() +// (though only going across, not down). +// --------------------------------------------------------------------------- + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +template +class Two_d_iterator : public std::iterator +{ +public: + typedef Two_d_iterator iterator; + typedef T value_type; + + explicit Two_d_iterator(row_it curr) : row_current(curr), col_current(0) + { + if (row_current && !row_current->is_marked()) + { + col_current = row_current->ne_begin(); + advance_past_end(); // in case cur->begin() == cur->end() + } + } + + explicit Two_d_iterator(row_it curr, col_it col) : row_current(curr), col_current(col) + { + assert(col); + } + + // The default constructor + Two_d_iterator() : row_current(0), col_current(0) { } + + // Need this explicitly so we can convert normal iterators <=> const iterators + // not explicit on purpose + // --------------------------------------------------------------------------- + template + Two_d_iterator(const Two_d_iterator& it) : + row_current (*(row_it *)&it.row_current), + col_current (*(col_it *)&it.col_current) + { } + + // The default destructor is fine; we don't define one + // The default operator= is fine; we don't define one + + value_type& operator*() const { return *(col_current); } + value_type* operator->() const { return &(operator*()); } + + // Arithmetic: we just do arithmetic on pos. We don't even need to + // do bounds checking, since STL doesn't consider that its job. :-) + // NOTE: this is not amortized constant time! What do we do about it? + // ------------------------------------------------------------------ + void advance_past_end() + { + // used when col_current points to end() + while (col_current == row_current->ne_end()) + { + // end of current row + // ------------------ + ++row_current; // go to beginning of next + if (!row_current->is_marked()) // col is irrelevant at end + col_current = row_current->ne_begin(); + else + break; // don't go past row_end + } + } + + friend size_t operator-(iterator l, iterator f) + { + if (f.row_current->is_marked()) + return 0; + + size_t diff(0); + while (f != l) + { + ++diff; + ++f; + } + return diff; + } + + iterator& operator++() + { + // assert(!row_current->is_marked()); // how to ++ from there? + ++col_current; + advance_past_end(); // in case col_current is at end() + return *this; + } + + iterator& operator--() + { + while (row_current->is_marked() || + col_current == row_current->ne_begin()) + { + --row_current; + col_current = row_current->ne_end(); // this is 1 too far + } + --col_current; + return *this; + } + iterator operator++(int) { iterator tmp(*this); ++*this; return tmp; } + iterator operator--(int) { iterator tmp(*this); --*this; return tmp; } + + + // Comparisons. + bool operator==(const iterator& it) const + { + return (row_current == it.row_current && + (!row_current || row_current->is_marked() || col_current == it.col_current)); + } + + bool operator!=(const iterator& it) const { return !(*this == it); } + + // Here's the info we actually need to be an iterator + // These need to be public so we convert from iterator to const_iterator + // --------------------------------------------------------------------- + row_it row_current; + col_it col_current; +}; + + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +template +class Two_d_destructive_iterator : public Two_d_iterator +{ +public: + typedef Two_d_destructive_iterator iterator; + + Two_d_destructive_iterator(Alloc &alloc, row_it curr) : + _alloc(alloc) + { + this->row_current = curr; + this->col_current = 0; + if (this->row_current && !this->row_current->is_marked()) + { + this->col_current = this->row_current->ne_begin(); + advance_past_end(); // in case cur->begin() == cur->end() + } + } + + // Arithmetic: we just do arithmetic on pos. We don't even need to + // do bounds checking, since STL doesn't consider that its job. :-) + // NOTE: this is not amortized constant time! What do we do about it? + // ------------------------------------------------------------------ + void advance_past_end() + { + // used when col_current points to end() + while (this->col_current == this->row_current->ne_end()) + { + this->row_current->clear(_alloc, true); // This is what differs from non-destructive iterators above + + // end of current row + // ------------------ + ++this->row_current; // go to beginning of next + if (!this->row_current->is_marked()) // col is irrelevant at end + this->col_current = this->row_current->ne_begin(); + else + break; // don't go past row_end + } + } + + iterator& operator++() + { + // assert(!this->row_current->is_marked()); // how to ++ from there? + ++this->col_current; + advance_past_end(); // in case col_current is at end() + return *this; + } + +private: + Two_d_destructive_iterator& operator=(const Two_d_destructive_iterator &o); + + Alloc &_alloc; +}; + + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +#if defined(SPP_POPCNT_CHECK) +static inline bool spp_popcount_check() +{ + int cpuInfo[4] = { -1 }; + spp_cpuid(cpuInfo, 1); + if (cpuInfo[2] & (1 << 23)) + return true; // means SPP_POPCNT supported + return false; +} +#endif + +#if defined(SPP_POPCNT_CHECK) && defined(SPP_POPCNT) + +static inline uint32_t spp_popcount(uint32_t i) +{ + static const bool s_ok = spp_popcount_check(); + return s_ok ? SPP_POPCNT(i) : s_spp_popcount_default(i); +} + +#else + +static inline uint32_t spp_popcount(uint32_t i) +{ +#if defined(SPP_POPCNT) + return static_cast(SPP_POPCNT(i)); +#else + return s_spp_popcount_default(i); +#endif +} + +#endif + +#if defined(SPP_POPCNT_CHECK) && defined(SPP_POPCNT64) + +static inline uint32_t spp_popcount(uint64_t i) +{ + static const bool s_ok = spp_popcount_check(); + return s_ok ? (uint32_t)SPP_POPCNT64(i) : s_spp_popcount_default(i); +} + +#else + +static inline uint32_t spp_popcount(uint64_t i) +{ +#if defined(SPP_POPCNT64) + return static_cast(SPP_POPCNT64(i)); +#elif 1 + return s_spp_popcount_default(i); +#endif +} + +#endif + +// --------------------------------------------------------------------------- +// SPARSE-TABLE +// ------------ +// The idea is that a table with (logically) t buckets is divided +// into t/M *groups* of M buckets each. (M is a constant, typically +// 32) Each group is stored sparsely. +// Thus, inserting into the table causes some array to grow, which is +// slow but still constant time. Lookup involves doing a +// logical-position-to-sparse-position lookup, which is also slow but +// constant time. The larger M is, the slower these operations are +// but the less overhead (slightly). +// +// To store the sparse array, we store a bitmap B, where B[i] = 1 iff +// bucket i is non-empty. Then to look up bucket i we really look up +// array[# of 1s before i in B]. This is constant time for fixed M. +// +// Terminology: the position of an item in the overall table (from +// 1 .. t) is called its "location." The logical position in a group +// (from 1 .. M) is called its "position." The actual location in +// the array (from 1 .. # of non-empty buckets in the group) is +// called its "offset." +// --------------------------------------------------------------------------- + +template +class sparsegroup +{ +public: + // Basic types + typedef T value_type; + typedef Alloc allocator_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + typedef uint8_t size_type; // max # of buckets + + // These are our special iterators, that go over non-empty buckets in a + // group. These aren't const-only because you can change non-empty bcks. + // --------------------------------------------------------------------- + typedef pointer ne_iterator; + typedef const_pointer const_ne_iterator; + typedef std::reverse_iterator reverse_ne_iterator; + typedef std::reverse_iterator const_reverse_ne_iterator; + + // We'll have versions for our special non-empty iterator too + // ---------------------------------------------------------- + ne_iterator ne_begin() { return reinterpret_cast(_group); } + const_ne_iterator ne_begin() const { return reinterpret_cast(_group); } + const_ne_iterator ne_cbegin() const { return reinterpret_cast(_group); } + ne_iterator ne_end() { return reinterpret_cast(_group + _num_items()); } + const_ne_iterator ne_end() const { return reinterpret_cast(_group + _num_items()); } + const_ne_iterator ne_cend() const { return reinterpret_cast(_group + _num_items()); } + reverse_ne_iterator ne_rbegin() { return reverse_ne_iterator(ne_end()); } + const_reverse_ne_iterator ne_rbegin() const { return const_reverse_ne_iterator(ne_cend()); } + const_reverse_ne_iterator ne_crbegin() const { return const_reverse_ne_iterator(ne_cend()); } + reverse_ne_iterator ne_rend() { return reverse_ne_iterator(ne_begin()); } + const_reverse_ne_iterator ne_rend() const { return const_reverse_ne_iterator(ne_cbegin()); } + const_reverse_ne_iterator ne_crend() const { return const_reverse_ne_iterator(ne_cbegin()); } + +private: + // T can be std::pair, but sometime we need to cast to a mutable type + // ------------------------------------------------------------------------------ + typedef typename spp_::cvt::type mutable_value_type; + typedef mutable_value_type * mutable_pointer; + typedef const mutable_value_type * const_mutable_pointer; + + bool _bmtest(size_type i) const { return !!(_bitmap & (static_cast(1) << i)); } + void _bmset(size_type i) { _bitmap |= static_cast(1) << i; } + void _bmclear(size_type i) { _bitmap &= ~(static_cast(1) << i); } + + bool _bme_test(size_type i) const { return !!(_bm_erased & (static_cast(1) << i)); } + void _bme_set(size_type i) { _bm_erased |= static_cast(1) << i; } + void _bme_clear(size_type i) { _bm_erased &= ~(static_cast(1) << i); } + + bool _bmtest_strict(size_type i) const + { return !!((_bitmap | _bm_erased) & (static_cast(1) << i)); } + + + static uint32_t _sizing(uint32_t n) + { +#if !defined(SPP_ALLOC_SZ) || (SPP_ALLOC_SZ == 0) + // aggressive allocation first, then decreasing as sparsegroups fill up + // -------------------------------------------------------------------- + static uint8_t s_alloc_batch_sz[SPP_GROUP_SIZE] = { 0 }; + if (!s_alloc_batch_sz[0]) + { + // 32 bit bitmap + // ........ .... .... .. .. .. .. . . . . . . . . + // 8 12 16 18 20 22 24 25 26 ... 32 + // ------------------------------------------------------ + uint8_t group_sz = SPP_GROUP_SIZE / 4; + uint8_t group_start_alloc = SPP_GROUP_SIZE / 8; //4; + uint8_t alloc_sz = group_start_alloc; + for (int i=0; i<4; ++i) + { + for (int j=0; j 2) + group_start_alloc /= 2; + alloc_sz += group_start_alloc; + } + } + + return n ? static_cast(s_alloc_batch_sz[n-1]) : 0; // more aggressive alloc at the beginning + +#elif (SPP_ALLOC_SZ == 1) + // use as little memory as possible - slowest insert/delete in table + // ----------------------------------------------------------------- + return n; +#else + // decent compromise when SPP_ALLOC_SZ == 2 + // ---------------------------------------- + static size_type sz_minus_1 = SPP_ALLOC_SZ - 1; + return (n + sz_minus_1) & ~sz_minus_1; +#endif + } + + pointer _allocate_group(allocator_type &alloc, uint32_t n /* , bool tight = false */) + { + // ignore tight since we don't store num_alloc + // num_alloc = (uint8_t)(tight ? n : _sizing(n)); + + uint32_t num_alloc = (uint8_t)_sizing(n); + _set_num_alloc(num_alloc); + pointer retval = alloc.allocate(static_cast(num_alloc)); + if (retval == NULL) + { + // the allocator is supposed to throw an exception if the allocation fails. + fprintf(stderr, "sparsehash FATAL ERROR: failed to allocate %d groups\n", num_alloc); + exit(1); + } + return retval; + } + + void _free_group(allocator_type &alloc, uint32_t num_alloc) + { + if (_group) + { + uint32_t num_buckets = _num_items(); + if (num_buckets) + { + mutable_pointer end_it = (mutable_pointer)(_group + num_buckets); + for (mutable_pointer p = (mutable_pointer)_group; p != end_it; ++p) + p->~mutable_value_type(); + } + alloc.deallocate(_group, (typename allocator_type::size_type)num_alloc); + _group = NULL; + } + } + + // private because should not be called - no allocator! + sparsegroup &operator=(const sparsegroup& x); + + static size_type _pos_to_offset(group_bm_type bm, size_type pos) + { + //return (size_type)((uint32_t)~((int32_t(-1) + pos) >> 31) & spp_popcount(bm << (SPP_GROUP_SIZE - pos))); + //return (size_type)(pos ? spp_popcount(bm << (SPP_GROUP_SIZE - pos)) : 0); + return static_cast(spp_popcount(bm & ((static_cast(1) << pos) - 1))); + } + +public: + + // get_iter() in sparsetable needs it + size_type pos_to_offset(size_type pos) const + { + return _pos_to_offset(_bitmap, pos); + } + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable : 4146) +#endif + + // Returns the (logical) position in the bm[] array, i, such that + // bm[i] is the offset-th set bit in the array. It is the inverse + // of pos_to_offset. get_pos() uses this function to find the index + // of an ne_iterator in the table. Bit-twiddling from + // http://hackersdelight.org/basics.pdf + // ----------------------------------------------------------------- + static size_type offset_to_pos(group_bm_type bm, size_type offset) + { + for (; offset > 0; offset--) + bm &= (bm-1); // remove right-most set bit + + // Clear all bits to the left of the rightmost bit (the &), + // and then clear the rightmost bit but set all bits to the + // right of it (the -1). + // -------------------------------------------------------- + bm = (bm & -bm) - 1; + return static_cast(spp_popcount(bm)); + } + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + + size_type offset_to_pos(size_type offset) const + { + return offset_to_pos(_bitmap, offset); + } + +public: + // Constructors -- default and copy -- and destructor + explicit sparsegroup() : + _group(0), _bitmap(0), _bm_erased(0) + { + _set_num_items(0); + _set_num_alloc(0); + } + + sparsegroup(const sparsegroup& x) : + _group(0), _bitmap(x._bitmap), _bm_erased(x._bm_erased) + { + _set_num_items(0); + _set_num_alloc(0); + assert(_group == 0); if (_group) exit(1); + } + + sparsegroup(const sparsegroup& x, allocator_type& a) : + _group(0), _bitmap(x._bitmap), _bm_erased(x._bm_erased) + { + _set_num_items(0); + _set_num_alloc(0); + + uint32_t num_items = x._num_items(); + if (num_items) + { + _group = _allocate_group(a, num_items /* , true */); + _set_num_items(num_items); + std::uninitialized_copy(x._group, x._group + num_items, _group); + } + } + + ~sparsegroup() { assert(_group == 0); if (_group) exit(1); } + + void destruct(allocator_type& a) { _free_group(a, _num_alloc()); } + + // Many STL algorithms use swap instead of copy constructors + void swap(sparsegroup& x) + { + using std::swap; + + swap(_group, x._group); + swap(_bitmap, x._bitmap); + swap(_bm_erased, x._bm_erased); +#ifdef SPP_STORE_NUM_ITEMS + swap(_num_buckets, x._num_buckets); + swap(_num_allocated, x._num_allocated); +#endif + } + + // It's always nice to be able to clear a table without deallocating it + void clear(allocator_type &alloc, bool erased) + { + _free_group(alloc, _num_alloc()); + _bitmap = 0; + if (erased) + _bm_erased = 0; + _set_num_items(0); + _set_num_alloc(0); + } + + // Functions that tell you about size. Alas, these aren't so useful + // because our table is always fixed size. + size_type size() const { return static_cast(SPP_GROUP_SIZE); } + size_type max_size() const { return static_cast(SPP_GROUP_SIZE); } + + bool empty() const { return false; } + + // We also may want to know how many *used* buckets there are + size_type num_nonempty() const { return (size_type)_num_items(); } + + // TODO(csilvers): make protected + friend + // This is used by sparse_hashtable to get an element from the table + // when we know it exists. + reference unsafe_get(size_type i) const + { + // assert(_bmtest(i)); + return (reference)_group[pos_to_offset(i)]; + } + + typedef std::pair SetResult; + +private: + //typedef spp_::integral_constant::value> check_relocatable; + typedef spp_::true_type realloc_ok_type; + typedef spp_::false_type realloc_not_ok_type; + + //typedef spp_::zero_type libc_reloc_type; + //typedef spp_::one_type spp_reloc_type; + //typedef spp_::two_type spp_not_reloc_type; + //typedef spp_::three_type generic_alloc_type; + +#if 1 + typedef typename if_<((spp_::is_same >::value || + spp_::is_same >::value) && + spp_::is_relocatable::value), realloc_ok_type, realloc_not_ok_type>::type + check_alloc_type; +#else + typedef typename if_ >::value, + typename if_::value, spp_reloc_type, spp_not_reloc_type>::type, + typename if_<(spp_::is_same >::value && + spp_::is_relocatable::value), libc_reloc_type, generic_alloc_type>::type >::type + check_alloc_type; +#endif + + + //typedef if_ >::value, + // libc_alloc_type, + // if_ >::value, + // spp_alloc_type, user_alloc_type> > check_alloc_type; + + //typedef spp_::integral_constant::value && + // (spp_::is_same >::value || + // spp_::is_same >::value)) > + // realloc_and_memmove_ok; + + // ------------------------- memory at *p is uninitialized => need to construct + void _init_val(mutable_value_type *p, reference val) + { +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + ::new (p) value_type(std::move(val)); +#else + ::new (p) value_type(val); +#endif + } + + // ------------------------- memory at *p is uninitialized => need to construct + void _init_val(mutable_value_type *p, const_reference val) + { + ::new (p) value_type(val); + } + + // ------------------------------------------------ memory at *p is initialized + void _set_val(value_type *p, reference val) + { +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + *(mutable_pointer)p = std::move(val); +#else + using std::swap; + swap(*(mutable_pointer)p, *(mutable_pointer)&val); +#endif + } + + // ------------------------------------------------ memory at *p is initialized + void _set_val(value_type *p, const_reference val) + { + *(mutable_pointer)p = *(const_mutable_pointer)&val; + } + + // Create space at _group[offset], assuming value_type is relocatable, and the + // allocator_type is the spp allocator. + // return true if the slot was constructed (i.e. contains a valid value_type + // --------------------------------------------------------------------------------- + template + void _set_aux(allocator_type &alloc, size_type offset, Val &val, realloc_ok_type) + { + //static int x=0; if (++x < 10) printf("x\n"); // check we are getting here + + uint32_t num_items = _num_items(); + uint32_t num_alloc = _sizing(num_items); + + if (num_items == num_alloc) + { + num_alloc = _sizing(num_items + 1); + _group = alloc.reallocate(_group, num_alloc); + _set_num_alloc(num_alloc); + } + + for (uint32_t i = num_items; i > offset; --i) + memcpy(_group + i, _group + i-1, sizeof(*_group)); + + _init_val((mutable_pointer)(_group + offset), val); + } + + // Create space at _group[offset], assuming value_type is *not* relocatable, and the + // allocator_type is the spp allocator. + // return true if the slot was constructed (i.e. contains a valid value_type + // --------------------------------------------------------------------------------- + template + void _set_aux(allocator_type &alloc, size_type offset, Val &val, realloc_not_ok_type) + { + uint32_t num_items = _num_items(); + uint32_t num_alloc = _sizing(num_items); + + //assert(num_alloc == (uint32_t)_num_allocated); + if (num_items < num_alloc) + { + // create new object at end and rotate it to position + _init_val((mutable_pointer)&_group[num_items], val); + std::rotate((mutable_pointer)(_group + offset), + (mutable_pointer)(_group + num_items), + (mutable_pointer)(_group + num_items + 1)); + return; + } + + // This is valid because 0 <= offset <= num_items + pointer p = _allocate_group(alloc, _sizing(num_items + 1)); + if (offset) + std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)_group), + MK_MOVE_IT((mutable_pointer)(_group + offset)), + (mutable_pointer)p); + if (num_items > offset) + std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group + offset)), + MK_MOVE_IT((mutable_pointer)(_group + num_items)), + (mutable_pointer)(p + offset + 1)); + _init_val((mutable_pointer)(p + offset), val); + _free_group(alloc, num_alloc); + _group = p; + } + + // ---------------------------------------------------------------------------------- + template + void _set(allocator_type &alloc, size_type i, size_type offset, Val &val) + { + if (!_bmtest(i)) + { + _set_aux(alloc, offset, val, check_alloc_type()); + _incr_num_items(); + _bmset(i); + } + else + _set_val(&_group[offset], val); + } + +public: + + // This returns the pointer to the inserted item + // --------------------------------------------- + template + pointer set(allocator_type &alloc, size_type i, Val &val) + { + _bme_clear(i); // in case this was an "erased" location + + size_type offset = pos_to_offset(i); + _set(alloc, i, offset, val); // may change _group pointer + return (pointer)(_group + offset); + } + + // We let you see if a bucket is non-empty without retrieving it + // ------------------------------------------------------------- + bool test(size_type i) const + { + return _bmtest(i); + } + + // also tests for erased values + // ---------------------------- + bool test_strict(size_type i) const + { + return _bmtest_strict(i); + } + +private: + // Shrink the array, assuming value_type is relocatable, and the + // allocator_type is the libc allocator (supporting reallocate). + // ------------------------------------------------------------- + void _group_erase_aux(allocator_type &alloc, size_type offset, realloc_ok_type) + { + // static int x=0; if (++x < 10) printf("Y\n"); // check we are getting here + uint32_t num_items = _num_items(); + uint32_t num_alloc = _sizing(num_items); + + if (num_items == 1) + { + assert(offset == 0); + _free_group(alloc, num_alloc); + _set_num_alloc(0); + return; + } + + _group[offset].~value_type(); + + for (size_type i = offset; i < num_items - 1; ++i) + memcpy(_group + i, _group + i + 1, sizeof(*_group)); + + if (_sizing(num_items - 1) != num_alloc) + { + num_alloc = _sizing(num_items - 1); + assert(num_alloc); // because we have at least 1 item left + _set_num_alloc(num_alloc); + _group = alloc.reallocate(_group, num_alloc); + } + } + + // Shrink the array, without any special assumptions about value_type and + // allocator_type. + // -------------------------------------------------------------------------- + void _group_erase_aux(allocator_type &alloc, size_type offset, realloc_not_ok_type) + { + uint32_t num_items = _num_items(); + uint32_t num_alloc = _sizing(num_items); + + if (_sizing(num_items - 1) != num_alloc) + { + pointer p = 0; + if (num_items > 1) + { + p = _allocate_group(alloc, num_items - 1); + if (offset) + std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group)), + MK_MOVE_IT((mutable_pointer)(_group + offset)), + (mutable_pointer)(p)); + if (static_cast(offset + 1) < num_items) + std::uninitialized_copy(MK_MOVE_IT((mutable_pointer)(_group + offset + 1)), + MK_MOVE_IT((mutable_pointer)(_group + num_items)), + (mutable_pointer)(p + offset)); + } + else + { + assert(offset == 0); + _set_num_alloc(0); + } + _free_group(alloc, num_alloc); + _group = p; + } + else + { + std::rotate((mutable_pointer)(_group + offset), + (mutable_pointer)(_group + offset + 1), + (mutable_pointer)(_group + num_items)); + ((mutable_pointer)(_group + num_items - 1))->~mutable_value_type(); + } + } + + void _group_erase(allocator_type &alloc, size_type offset) + { + _group_erase_aux(alloc, offset, check_alloc_type()); + } + +public: + template + bool erase_ne(allocator_type &alloc, twod_iter &it) + { + assert(_group && it.col_current != ne_end()); + size_type offset = (size_type)(it.col_current - ne_begin()); + size_type pos = offset_to_pos(offset); + + if (_num_items() <= 1) + { + clear(alloc, false); + it.col_current = 0; + } + else + { + _group_erase(alloc, offset); + _decr_num_items(); + _bmclear(pos); + + // in case _group_erase reallocated the buffer + it.col_current = reinterpret_cast(_group) + offset; + } + _bme_set(pos); // remember that this position has been erased + it.advance_past_end(); + return true; + } + + + // This takes the specified elements out of the group. This is + // "undefining", rather than "clearing". + // TODO(austern): Make this exception safe: handle exceptions from + // value_type's copy constructor. + // --------------------------------------------------------------- + void erase(allocator_type &alloc, size_type i) + { + if (_bmtest(i)) + { + // trivial to erase empty bucket + if (_num_items() == 1) + clear(alloc, false); + else + { + _group_erase(alloc, pos_to_offset(i)); + _decr_num_items(); + _bmclear(i); + } + _bme_set(i); // remember that this position has been erased + } + } + + // I/O + // We support reading and writing groups to disk. We don't store + // the actual array contents (which we don't know how to store), + // just the bitmap and size. Meant to be used with table I/O. + // -------------------------------------------------------------- + template bool write_metadata(OUTPUT *fp) const + { + // warning: we write 4 or 8 bytes for the bitmap, instead of 6 in the + // original google sparsehash + // ------------------------------------------------------------------ + if (!sparsehash_internal::write_data(fp, &_bitmap, sizeof(_bitmap))) + return false; + + return true; + } + + // Reading destroys the old group contents! Returns true if all was ok. + template bool read_metadata(allocator_type &alloc, INPUT *fp) + { + clear(alloc, true); + + if (!sparsehash_internal::read_data(fp, &_bitmap, sizeof(_bitmap))) + return false; + + // We'll allocate the space, but we won't fill it: it will be + // left as uninitialized raw memory. + uint32_t num_items = spp_popcount(_bitmap); // yes, _num_buckets not set + _set_num_items(num_items); + _group = num_items ? _allocate_group(alloc, num_items/* , true */) : 0; + return true; + } + + // Again, only meaningful if value_type is a POD. + template bool read_nopointer_data(INPUT *fp) + { + for (ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!sparsehash_internal::read_data(fp, &(*it), sizeof(*it))) + return false; + return true; + } + + // If your keys and values are simple enough, we can write them + // to disk for you. "simple enough" means POD and no pointers. + // However, we don't try to normalize endianness. + // ------------------------------------------------------------ + template bool write_nopointer_data(OUTPUT *fp) const + { + for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!sparsehash_internal::write_data(fp, &(*it), sizeof(*it))) + return false; + return true; + } + + + // Comparisons. We only need to define == and < -- we get + // != > <= >= via relops.h (which we happily included above). + // Note the comparisons are pretty arbitrary: we compare + // values of the first index that isn't equal (using default + // value for empty buckets). + // --------------------------------------------------------- + bool operator==(const sparsegroup& x) const + { + return (_bitmap == x._bitmap && + _bm_erased == x._bm_erased && + std::equal(_group, _group + _num_items(), x._group)); + } + + bool operator<(const sparsegroup& x) const + { + // also from + return std::lexicographical_compare(_group, _group + _num_items(), + x._group, x._group + x._num_items()); + } + + bool operator!=(const sparsegroup& x) const { return !(*this == x); } + bool operator<=(const sparsegroup& x) const { return !(x < *this); } + bool operator> (const sparsegroup& x) const { return x < *this; } + bool operator>=(const sparsegroup& x) const { return !(*this < x); } + + void mark() { _group = (value_type *)static_cast(-1); } + bool is_marked() const { return _group == (value_type *)static_cast(-1); } + +private: + // --------------------------------------------------------------------------- + template + class alloc_impl : public A + { + public: + typedef typename A::pointer pointer; + typedef typename A::size_type size_type; + + // Convert a normal allocator to one that has realloc_or_die() + explicit alloc_impl(const A& a) : A(a) { } + + // realloc_or_die should only be used when using the default + // allocator (spp::spp_allocator). + pointer realloc_or_die(pointer /*ptr*/, size_type /*n*/) + { + fprintf(stderr, "realloc_or_die is only supported for " + "spp::spp_allocator\n"); + exit(1); + return NULL; + } + }; + + // A template specialization of alloc_impl for + // spp::libc_allocator that can handle realloc_or_die. + // ----------------------------------------------------------- + template + class alloc_impl > : public spp_::libc_allocator + { + public: + typedef typename spp_::libc_allocator::pointer pointer; + typedef typename spp_::libc_allocator::size_type size_type; + + explicit alloc_impl(const spp_::libc_allocator& a) + : spp_::libc_allocator(a) + { } + + pointer realloc_or_die(pointer ptr, size_type n) + { + pointer retval = this->reallocate(ptr, n); + if (retval == NULL) + { + fprintf(stderr, "sparsehash: FATAL ERROR: failed to reallocate " + "%lu elements for ptr %p", static_cast(n), ptr); + exit(1); + } + return retval; + } + }; + + // A template specialization of alloc_impl for + // spp::spp_allocator that can handle realloc_or_die. + // ----------------------------------------------------------- + template + class alloc_impl > : public spp_::spp_allocator + { + public: + typedef typename spp_::spp_allocator::pointer pointer; + typedef typename spp_::spp_allocator::size_type size_type; + + explicit alloc_impl(const spp_::spp_allocator& a) + : spp_::spp_allocator(a) + { } + + pointer realloc_or_die(pointer ptr, size_type n) + { + pointer retval = this->reallocate(ptr, n); + if (retval == NULL) + { + fprintf(stderr, "sparsehash: FATAL ERROR: failed to reallocate " + "%lu elements for ptr %p", static_cast(n), ptr); + exit(1); + } + return retval; + } + }; + + +#ifdef SPP_STORE_NUM_ITEMS + uint32_t _num_items() const { return (uint32_t)_num_buckets; } + void _set_num_items(uint32_t val) { _num_buckets = static_cast(val); } + void _incr_num_items() { ++_num_buckets; } + void _decr_num_items() { --_num_buckets; } + uint32_t _num_alloc() const { return (uint32_t)_num_allocated; } + void _set_num_alloc(uint32_t val) { _num_allocated = static_cast(val); } +#else + uint32_t _num_items() const { return spp_popcount(_bitmap); } + void _set_num_items(uint32_t ) { } + void _incr_num_items() { } + void _decr_num_items() { } + uint32_t _num_alloc() const { return _sizing(_num_items()); } + void _set_num_alloc(uint32_t val) { } +#endif + + // The actual data + // --------------- + value_type * _group; // (small) array of T's + group_bm_type _bitmap; + group_bm_type _bm_erased; // ones where items have been erased + +#ifdef SPP_STORE_NUM_ITEMS + size_type _num_buckets; + size_type _num_allocated; +#endif +}; + +// --------------------------------------------------------------------------- +// --------------------------------------------------------------------------- +template +class sparsetable +{ +public: + typedef T value_type; + typedef Alloc allocator_type; + typedef sparsegroup group_type; + +private: + typedef typename Alloc::template rebind::other group_alloc_type; + typedef typename group_alloc_type::size_type group_size_type; + +public: + // Basic types + // ----------- + typedef typename allocator_type::size_type size_type; + typedef typename allocator_type::difference_type difference_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + typedef group_type& GroupsReference; + typedef const group_type& GroupsConstReference; + + typedef typename group_type::ne_iterator ColIterator; + typedef typename group_type::const_ne_iterator ColConstIterator; + + typedef table_iterator > iterator; // defined with index + typedef const_table_iterator > const_iterator; // defined with index + typedef std::reverse_iterator const_reverse_iterator; + typedef std::reverse_iterator reverse_iterator; + + // These are our special iterators, that go over non-empty buckets in a + // table. These aren't const only because you can change non-empty bcks. + // ---------------------------------------------------------------------- + typedef Two_d_iterator ne_iterator; + + typedef Two_d_iterator const_ne_iterator; + + // Another special iterator: it frees memory as it iterates (used to resize). + // Obviously, you can only iterate over it once, which is why it's an input iterator + // --------------------------------------------------------------------------------- + typedef Two_d_destructive_iterator destructive_iterator; + + typedef std::reverse_iterator reverse_ne_iterator; + typedef std::reverse_iterator const_reverse_ne_iterator; + + + // Iterator functions + // ------------------ + iterator begin() { return iterator(this, 0); } + const_iterator begin() const { return const_iterator(this, 0); } + const_iterator cbegin() const { return const_iterator(this, 0); } + iterator end() { return iterator(this, size()); } + const_iterator end() const { return const_iterator(this, size()); } + const_iterator cend() const { return const_iterator(this, size()); } + reverse_iterator rbegin() { return reverse_iterator(end()); } + const_reverse_iterator rbegin() const { return const_reverse_iterator(cend()); } + const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rend() const { return const_reverse_iterator(cbegin()); } + const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); } + + // Versions for our special non-empty iterator + // ------------------------------------------ + ne_iterator ne_begin() { return ne_iterator (_first_group); } + const_ne_iterator ne_begin() const { return const_ne_iterator(_first_group); } + const_ne_iterator ne_cbegin() const { return const_ne_iterator(_first_group); } + ne_iterator ne_end() { return ne_iterator (_last_group); } + const_ne_iterator ne_end() const { return const_ne_iterator(_last_group); } + const_ne_iterator ne_cend() const { return const_ne_iterator(_last_group); } + + reverse_ne_iterator ne_rbegin() { return reverse_ne_iterator(ne_end()); } + const_reverse_ne_iterator ne_rbegin() const { return const_reverse_ne_iterator(ne_end()); } + const_reverse_ne_iterator ne_crbegin() const { return const_reverse_ne_iterator(ne_end()); } + reverse_ne_iterator ne_rend() { return reverse_ne_iterator(ne_begin()); } + const_reverse_ne_iterator ne_rend() const { return const_reverse_ne_iterator(ne_begin()); } + const_reverse_ne_iterator ne_crend() const { return const_reverse_ne_iterator(ne_begin()); } + + destructive_iterator destructive_begin() + { + return destructive_iterator(_alloc, _first_group); + } + + destructive_iterator destructive_end() + { + return destructive_iterator(_alloc, _last_group); + } + + // How to deal with the proper group + static group_size_type num_groups(size_type num) + { + // how many to hold num buckets + return num == 0 ? (group_size_type)0 : + (group_size_type)(((num-1) / SPP_GROUP_SIZE) + 1); + } + + typename group_type::size_type pos_in_group(size_type i) const + { + return static_cast(i & SPP_MASK_); + } + + size_type group_num(size_type i) const + { + return (size_type)(i >> SPP_SHIFT_); + } + + GroupsReference which_group(size_type i) + { + return _first_group[group_num(i)]; + } + + GroupsConstReference which_group(size_type i) const + { + return _first_group[group_num(i)]; + } + + void _alloc_group_array(group_size_type sz, group_type *&first, group_type *&last) + { + if (sz) + { + first = _group_alloc.allocate((size_type)(sz + 1)); // + 1 for end marker + first[sz].mark(); // for the ne_iterator + last = first + sz; + } + } + + void _free_group_array(group_type *&first, group_type *&last) + { + if (first) + { + _group_alloc.deallocate(first, (group_size_type)(last - first + 1)); // + 1 for end marker + first = last = 0; + } + } + + void _allocate_groups(size_type sz) + { + if (sz) + { + _alloc_group_array(sz, _first_group, _last_group); + std::uninitialized_fill(_first_group, _last_group, group_type()); + } + } + + void _free_groups() + { + if (_first_group) + { + for (group_type *g = _first_group; g != _last_group; ++g) + g->destruct(_alloc); + _free_group_array(_first_group, _last_group); + } + } + + void _cleanup() + { + _free_groups(); // sets _first_group = _last_group = 0 + _table_size = 0; + _num_buckets = 0; + } + + void _init() + { + _first_group = 0; + _last_group = 0; + _table_size = 0; + _num_buckets = 0; + } + + void _copy(const sparsetable &o) + { + _table_size = o._table_size; + _num_buckets = o._num_buckets; + _alloc = o._alloc; // todo - copy or move allocator according to... + _group_alloc = o._group_alloc; // http://en.cppreference.com/w/cpp/container/unordered_map/unordered_map + + group_size_type sz = (group_size_type)(o._last_group - o._first_group); + if (sz) + { + _alloc_group_array(sz, _first_group, _last_group); + for (group_size_type i=0; iswap(o); + } + + sparsetable(sparsetable&& o, const allocator_type &alloc) + { + _init(); + this->swap(o); + _alloc = alloc; // [gp todo] is this correct? + } + + sparsetable& operator=(sparsetable&& o) + { + _cleanup(); + this->swap(o); + return *this; + } +#endif + + // Many STL algorithms use swap instead of copy constructors + void swap(sparsetable& o) + { + using std::swap; + + swap(_first_group, o._first_group); + swap(_last_group, o._last_group); + swap(_table_size, o._table_size); + swap(_num_buckets, o._num_buckets); + if (_alloc != o._alloc) + swap(_alloc, o._alloc); + if (_group_alloc != o._group_alloc) + swap(_group_alloc, o._group_alloc); + } + + // It's always nice to be able to clear a table without deallocating it + void clear() + { + _free_groups(); + _num_buckets = 0; + _table_size = 0; + } + + inline allocator_type get_allocator() const + { + return _alloc; + } + + + // Functions that tell you about size. + // NOTE: empty() is non-intuitive! It does not tell you the number + // of not-empty buckets (use num_nonempty() for that). Instead + // it says whether you've allocated any buckets or not. + // ---------------------------------------------------------------- + size_type size() const { return _table_size; } + size_type max_size() const { return _alloc.max_size(); } + bool empty() const { return _table_size == 0; } + size_type num_nonempty() const { return _num_buckets; } + + // OK, we'll let you resize one of these puppies + void resize(size_type new_size) + { + group_size_type sz = num_groups(new_size); + group_size_type old_sz = (group_size_type)(_last_group - _first_group); + + if (sz != old_sz) + { + // resize group array + // ------------------ + group_type *first = 0, *last = 0; + if (sz) + { + _alloc_group_array(sz, first, last); + memcpy(first, _first_group, sizeof(*first) * (std::min)(sz, old_sz)); + } + + if (sz < old_sz) + { + for (group_type *g = _first_group + sz; g != _last_group; ++g) + g->destruct(_alloc); + } + else + std::uninitialized_fill(first + old_sz, last, group_type()); + + _free_group_array(_first_group, _last_group); + _first_group = first; + _last_group = last; + } +#if 0 + // used only in test program + // todo: fix if sparsetable to be used directly + // -------------------------------------------- + if (new_size < _table_size) + { + // lower num_buckets, clear last group + if (pos_in_group(new_size) > 0) // need to clear inside last group + groups.back().erase(_alloc, groups.back().begin() + pos_in_group(new_size), + groups.back().end()); + _num_buckets = 0; // refigure # of used buckets + for (const group_type *group = _first_group; group != _last_group; ++group) + _num_buckets += group->num_nonempty(); + } +#endif + _table_size = new_size; + } + + // We let you see if a bucket is non-empty without retrieving it + // ------------------------------------------------------------- + bool test(size_type i) const + { + // assert(i < _table_size); + return which_group(i).test(pos_in_group(i)); + } + + // also tests for erased values + // ---------------------------- + bool test_strict(size_type i) const + { + // assert(i < _table_size); + return which_group(i).test_strict(pos_in_group(i)); + } + + friend struct GrpPos; + + struct GrpPos + { + typedef typename sparsetable::ne_iterator ne_iter; + GrpPos(const sparsetable &table, size_type i) : + grp(table.which_group(i)), pos(table.pos_in_group(i)) {} + + bool test_strict() const { return grp.test_strict(pos); } + bool test() const { return grp.test(pos); } + typename sparsetable::reference unsafe_get() const { return grp.unsafe_get(pos); } + ne_iter get_iter(typename sparsetable::reference ref) + { + return ne_iter((group_type *)&grp, &ref); + } + + void erase(sparsetable &table) // item *must* be present + { + assert(table._num_buckets); + ((group_type &)grp).erase(table._alloc, pos); + --table._num_buckets; + } + + private: + GrpPos* operator=(const GrpPos&); + + const group_type &grp; + typename group_type::size_type pos; + }; + + bool test(iterator pos) const + { + return which_group(pos.pos).test(pos_in_group(pos.pos)); + } + + bool test(const_iterator pos) const + { + return which_group(pos.pos).test(pos_in_group(pos.pos)); + } + + // TODO(csilvers): make protected + friend + // This is used by sparse_hashtable to get an element from the table + // when we know it exists (because the caller has called test(i)). + // ----------------------------------------------------------------- + reference unsafe_get(size_type i) const + { + assert(i < _table_size); + // assert(test(i)); + return which_group(i).unsafe_get(pos_in_group(i)); + } + + // Needed for hashtables, gets as a ne_iterator. Crashes for empty bcks + const_ne_iterator get_iter(size_type i) const + { + //assert(test(i)); // how can a ne_iterator point to an empty bucket? + + size_type grp_idx = group_num(i); + + return const_ne_iterator(_first_group + grp_idx, + (_first_group[grp_idx].ne_begin() + + _first_group[grp_idx].pos_to_offset(pos_in_group(i)))); + } + + const_ne_iterator get_iter(size_type i, ColIterator col_it) const + { + return const_ne_iterator(_first_group + group_num(i), col_it); + } + + // For nonempty we can return a non-const version + ne_iterator get_iter(size_type i) + { + //assert(test(i)); // how can a nonempty_iterator point to an empty bucket? + + size_type grp_idx = group_num(i); + + return ne_iterator(_first_group + grp_idx, + (_first_group[grp_idx].ne_begin() + + _first_group[grp_idx].pos_to_offset(pos_in_group(i)))); + } + + ne_iterator get_iter(size_type i, ColIterator col_it) + { + return ne_iterator(_first_group + group_num(i), col_it); + } + + // And the reverse transformation. + size_type get_pos(const const_ne_iterator& it) const + { + difference_type current_row = it.row_current - _first_group; + difference_type current_col = (it.col_current - _first_group[current_row].ne_begin()); + return ((current_row * SPP_GROUP_SIZE) + + _first_group[current_row].offset_to_pos(current_col)); + } + + // Val can be reference or const_reference + // --------------------------------------- + template + reference set(size_type i, Val &val) + { + assert(i < _table_size); + group_type &group = which_group(i); + typename group_type::size_type old_numbuckets = group.num_nonempty(); + pointer p(group.set(_alloc, pos_in_group(i), val)); + _num_buckets += group.num_nonempty() - old_numbuckets; + return *p; + } + + // used in _move_from (where we can move the old value instead of copying it + void move(size_type i, reference val) + { + assert(i < _table_size); + which_group(i).set(_alloc, pos_in_group(i), val); + ++_num_buckets; + } + + // This takes the specified elements out of the table. + // -------------------------------------------------- + void erase(size_type i) + { + assert(i < _table_size); + + GroupsReference grp(which_group(i)); + typename group_type::size_type old_numbuckets = grp.num_nonempty(); + grp.erase(_alloc, pos_in_group(i)); + _num_buckets += grp.num_nonempty() - old_numbuckets; + } + + void erase(iterator pos) + { + erase(pos.pos); + } + + void erase(iterator start_it, iterator end_it) + { + // This could be more efficient, but then we'd need to figure + // out if we spanned groups or not. Doesn't seem worth it. + for (; start_it != end_it; ++start_it) + erase(start_it); + } + + const_ne_iterator erase(const_ne_iterator it) + { + ne_iterator res(it); + if (res.row_current->erase_ne(_alloc, res)) + _num_buckets--; + return res; + } + + const_ne_iterator erase(const_ne_iterator f, const_ne_iterator l) + { + size_t diff = l - f; + while (diff--) + f = erase(f); + return f; + } + + // We support reading and writing tables to disk. We don't store + // the actual array contents (which we don't know how to store), + // just the groups and sizes. Returns true if all went ok. + +private: + // Every time the disk format changes, this should probably change too + typedef unsigned long MagicNumberType; + static const MagicNumberType MAGIC_NUMBER = 0x24687531; + + // Old versions of this code write all data in 32 bits. We need to + // support these files as well as having support for 64-bit systems. + // So we use the following encoding scheme: for values < 2^32-1, we + // store in 4 bytes in big-endian order. For values > 2^32, we + // store 0xFFFFFFF followed by 8 bytes in big-endian order. This + // causes us to mis-read old-version code that stores exactly + // 0xFFFFFFF, but I don't think that is likely to have happened for + // these particular values. + template + static bool write_32_or_64(OUTPUT* fp, IntType value) + { + if (value < 0xFFFFFFFFULL) // fits in 4 bytes + { + if (!sparsehash_internal::write_bigendian_number(fp, value, 4)) + return false; + } + else + { + if (!sparsehash_internal::write_bigendian_number(fp, 0xFFFFFFFFUL, 4)) + return false; + if (!sparsehash_internal::write_bigendian_number(fp, value, 8)) + return false; + } + return true; + } + + template + static bool read_32_or_64(INPUT* fp, IntType *value) + { + // reads into value + MagicNumberType first4 = 0; // a convenient 32-bit unsigned type + if (!sparsehash_internal::read_bigendian_number(fp, &first4, 4)) + return false; + + if (first4 < 0xFFFFFFFFULL) + { + *value = first4; + } + else + { + if (!sparsehash_internal::read_bigendian_number(fp, value, 8)) + return false; + } + return true; + } + +public: + // read/write_metadata() and read_write/nopointer_data() are DEPRECATED. + // Use serialize() and unserialize(), below, for new code. + + template + bool write_metadata(OUTPUT *fp) const + { + if (!write_32_or_64(fp, MAGIC_NUMBER)) return false; + if (!write_32_or_64(fp, _table_size)) return false; + if (!write_32_or_64(fp, _num_buckets)) return false; + + for (const group_type *group = _first_group; group != _last_group; ++group) + if (group->write_metadata(fp) == false) + return false; + return true; + } + + // Reading destroys the old table contents! Returns true if read ok. + template + bool read_metadata(INPUT *fp) + { + size_type magic_read = 0; + if (!read_32_or_64(fp, &magic_read)) return false; + if (magic_read != MAGIC_NUMBER) + { + clear(); // just to be consistent + return false; + } + + if (!read_32_or_64(fp, &_table_size)) return false; + if (!read_32_or_64(fp, &_num_buckets)) return false; + + resize(_table_size); // so the vector's sized ok + for (group_type *group = _first_group; group != _last_group; ++group) + if (group->read_metadata(_alloc, fp) == false) + return false; + return true; + } + + // This code is identical to that for SparseGroup + // If your keys and values are simple enough, we can write them + // to disk for you. "simple enough" means no pointers. + // However, we don't try to normalize endianness + bool write_nopointer_data(FILE *fp) const + { + for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!fwrite(&*it, sizeof(*it), 1, fp)) + return false; + return true; + } + + // When reading, we have to override the potential const-ness of *it + bool read_nopointer_data(FILE *fp) + { + for (ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!fread(reinterpret_cast(&(*it)), sizeof(*it), 1, fp)) + return false; + return true; + } + + // INPUT and OUTPUT must be either a FILE, *or* a C++ stream + // (istream, ostream, etc) *or* a class providing + // Read(void*, size_t) and Write(const void*, size_t) + // (respectively), which writes a buffer into a stream + // (which the INPUT/OUTPUT instance presumably owns). + + typedef sparsehash_internal::pod_serializer NopointerSerializer; + + // ValueSerializer: a functor. operator()(OUTPUT*, const value_type&) + template + bool serialize(ValueSerializer serializer, OUTPUT *fp) + { + if (!write_metadata(fp)) + return false; + for (const_ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!serializer(fp, *it)) + return false; + return true; + } + + // ValueSerializer: a functor. operator()(INPUT*, value_type*) + template + bool unserialize(ValueSerializer serializer, INPUT *fp) + { + clear(); + if (!read_metadata(fp)) + return false; + for (ne_iterator it = ne_begin(); it != ne_end(); ++it) + if (!serializer(fp, &*it)) + return false; + return true; + } + + // Comparisons. Note the comparisons are pretty arbitrary: we + // compare values of the first index that isn't equal (using default + // value for empty buckets). + bool operator==(const sparsetable& x) const + { + return (_table_size == x._table_size && + _num_buckets == x._num_buckets && + _first_group == x._first_group); + } + + bool operator<(const sparsetable& x) const + { + return std::lexicographical_compare(begin(), end(), x.begin(), x.end()); + } + bool operator!=(const sparsetable& x) const { return !(*this == x); } + bool operator<=(const sparsetable& x) const { return !(x < *this); } + bool operator>(const sparsetable& x) const { return x < *this; } + bool operator>=(const sparsetable& x) const { return !(*this < x); } + + +private: + // The actual data + // --------------- + group_type * _first_group; + group_type * _last_group; + size_type _table_size; // how many buckets they want + size_type _num_buckets; // number of non-empty buckets + group_alloc_type _group_alloc; + allocator_type _alloc; +}; + +// ---------------------------------------------------------------------- +// S P A R S E _ H A S H T A B L E +// ---------------------------------------------------------------------- +// Hashtable class, used to implement the hashed associative containers +// hash_set and hash_map. +// +// Value: what is stored in the table (each bucket is a Value). +// Key: something in a 1-to-1 correspondence to a Value, that can be used +// to search for a Value in the table (find() takes a Key). +// HashFcn: Takes a Key and returns an integer, the more unique the better. +// ExtractKey: given a Value, returns the unique Key associated with it. +// Must inherit from unary_function, or at least have a +// result_type enum indicating the return type of operator(). +// EqualKey: Given two Keys, says whether they are the same (that is, +// if they are both associated with the same Value). +// Alloc: STL allocator to use to allocate memory. +// +// ---------------------------------------------------------------------- + +// The probing method +// ------------------ +// Linear probing +// #define JUMP_(key, num_probes) ( 1 ) +// Quadratic probing +#define JUMP_(key, num_probes) ( num_probes ) + + +// ------------------------------------------------------------------- +// ------------------------------------------------------------------- +template +class sparse_hashtable +{ +public: + typedef Key key_type; + typedef Value value_type; + typedef HashFcn hasher; // user provided or spp_hash + typedef EqualKey key_equal; + typedef Alloc allocator_type; + + typedef typename allocator_type::size_type size_type; + typedef typename allocator_type::difference_type difference_type; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef value_type* pointer; + typedef const value_type* const_pointer; + + // Table is the main storage class. + typedef sparsetable Table; + typedef typename Table::ne_iterator ne_it; + typedef typename Table::const_ne_iterator cne_it; + typedef typename Table::destructive_iterator dest_it; + typedef typename Table::ColIterator ColIterator; + + typedef ne_it iterator; + typedef cne_it const_iterator; + typedef dest_it destructive_iterator; + + // These come from tr1. For us they're the same as regular iterators. + // ------------------------------------------------------------------- + typedef iterator local_iterator; + typedef const_iterator const_local_iterator; + + // How full we let the table get before we resize + // ---------------------------------------------- + static const int HT_OCCUPANCY_PCT; // = 80 (out of 100); + + // How empty we let the table get before we resize lower, by default. + // (0.0 means never resize lower.) + // It should be less than OCCUPANCY_PCT / 2 or we thrash resizing + // ------------------------------------------------------------------ + static const int HT_EMPTY_PCT; // = 0.4 * HT_OCCUPANCY_PCT; + + // Minimum size we're willing to let hashtables be. + // Must be a power of two, and at least 4. + // Note, however, that for a given hashtable, the initial size is a + // function of the first constructor arg, and may be >HT_MIN_BUCKETS. + // ------------------------------------------------------------------ + static const size_type HT_MIN_BUCKETS = 4; + + // By default, if you don't specify a hashtable size at + // construction-time, we use this size. Must be a power of two, and + // at least HT_MIN_BUCKETS. + // ----------------------------------------------------------------- + static const size_type HT_DEFAULT_STARTING_BUCKETS = 32; + + // iterators + // --------- + iterator begin() { return _mk_iterator(table.ne_begin()); } + iterator end() { return _mk_iterator(table.ne_end()); } + const_iterator begin() const { return _mk_const_iterator(table.ne_cbegin()); } + const_iterator end() const { return _mk_const_iterator(table.ne_cend()); } + const_iterator cbegin() const { return _mk_const_iterator(table.ne_cbegin()); } + const_iterator cend() const { return _mk_const_iterator(table.ne_cend()); } + + // These come from tr1 unordered_map. They iterate over 'bucket' n. + // For sparsehashtable, we could consider each 'group' to be a bucket, + // I guess, but I don't really see the point. We'll just consider + // bucket n to be the n-th element of the sparsetable, if it's occupied, + // or some empty element, otherwise. + // --------------------------------------------------------------------- + local_iterator begin(size_type i) + { + return _mk_iterator(table.test(i) ? table.get_iter(i) : table.ne_end()); + } + + local_iterator end(size_type i) + { + local_iterator it = begin(i); + if (table.test(i)) + ++it; + return _mk_iterator(it); + } + + const_local_iterator begin(size_type i) const + { + return _mk_const_iterator(table.test(i) ? table.get_iter(i) : table.ne_cend()); + } + + const_local_iterator end(size_type i) const + { + const_local_iterator it = begin(i); + if (table.test(i)) + ++it; + return _mk_const_iterator(it); + } + + const_local_iterator cbegin(size_type i) const { return begin(i); } + const_local_iterator cend(size_type i) const { return end(i); } + + // This is used when resizing + // -------------------------- + destructive_iterator destructive_begin() { return _mk_destructive_iterator(table.destructive_begin()); } + destructive_iterator destructive_end() { return _mk_destructive_iterator(table.destructive_end()); } + + + // accessor functions for the things we templatize on, basically + // ------------------------------------------------------------- + hasher hash_funct() const { return settings; } + key_equal key_eq() const { return key_info; } + allocator_type get_allocator() const { return table.get_allocator(); } + + // Accessor function for statistics gathering. + unsigned int num_table_copies() const { return settings.num_ht_copies(); } + +private: + // This is used as a tag for the copy constructor, saying to destroy its + // arg We have two ways of destructively copying: with potentially growing + // the hashtable as we copy, and without. To make sure the outside world + // can't do a destructive copy, we make the typename private. + // ----------------------------------------------------------------------- + enum MoveDontCopyT {MoveDontCopy, MoveDontGrow}; + + void _squash_deleted() + { + // gets rid of any deleted entries we have + // --------------------------------------- + if (num_deleted) + { + // get rid of deleted before writing + sparse_hashtable tmp(MoveDontGrow, *this); + swap(tmp); // now we are tmp + } + assert(num_deleted == 0); + } + + // creating iterators from sparsetable::ne_iterators + // ------------------------------------------------- + iterator _mk_iterator(ne_it it) const { return it; } + const_iterator _mk_const_iterator(cne_it it) const { return it; } + destructive_iterator _mk_destructive_iterator(dest_it it) const { return it; } + +public: + size_type size() const { return table.num_nonempty(); } + size_type max_size() const { return table.max_size(); } + bool empty() const { return size() == 0; } + size_type bucket_count() const { return table.size(); } + size_type max_bucket_count() const { return max_size(); } + // These are tr1 methods. Their idea of 'bucket' doesn't map well to + // what we do. We just say every bucket has 0 or 1 items in it. + size_type bucket_size(size_type i) const + { + return (size_type)(begin(i) == end(i) ? 0 : 1); + } + +private: + // Because of the above, size_type(-1) is never legal; use it for errors + // --------------------------------------------------------------------- + static const size_type ILLEGAL_BUCKET = size_type(-1); + + // Used after a string of deletes. Returns true if we actually shrunk. + // TODO(csilvers): take a delta so we can take into account inserts + // done after shrinking. Maybe make part of the Settings class? + // -------------------------------------------------------------------- + bool _maybe_shrink() + { + assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two + assert(bucket_count() >= HT_MIN_BUCKETS); + bool retval = false; + + // If you construct a hashtable with < HT_DEFAULT_STARTING_BUCKETS, + // we'll never shrink until you get relatively big, and we'll never + // shrink below HT_DEFAULT_STARTING_BUCKETS. Otherwise, something + // like "dense_hash_set x; x.insert(4); x.erase(4);" will + // shrink us down to HT_MIN_BUCKETS buckets, which is too small. + // --------------------------------------------------------------- + const size_type num_remain = table.num_nonempty(); + const size_type shrink_threshold = settings.shrink_threshold(); + if (shrink_threshold > 0 && num_remain < shrink_threshold && + bucket_count() > HT_DEFAULT_STARTING_BUCKETS) + { + const float shrink_factor = settings.shrink_factor(); + size_type sz = (size_type)(bucket_count() / 2); // find how much we should shrink + while (sz > HT_DEFAULT_STARTING_BUCKETS && + num_remain < static_cast(sz * shrink_factor)) + { + sz /= 2; // stay a power of 2 + } + sparse_hashtable tmp(MoveDontCopy, *this, sz); + swap(tmp); // now we are tmp + retval = true; + } + settings.set_consider_shrink(false); // because we just considered it + return retval; + } + + // We'll let you resize a hashtable -- though this makes us copy all! + // When you resize, you say, "make it big enough for this many more elements" + // Returns true if we actually resized, false if size was already ok. + // -------------------------------------------------------------------------- + bool _resize_delta(size_type delta) + { + bool did_resize = false; + if (settings.consider_shrink()) + { + // see if lots of deletes happened + if (_maybe_shrink()) + did_resize = true; + } + if (table.num_nonempty() >= + (std::numeric_limits::max)() - delta) + { + throw_exception(std::length_error("resize overflow")); + } + + size_type num_occupied = (size_type)(table.num_nonempty() + num_deleted); + + if (bucket_count() >= HT_MIN_BUCKETS && + (num_occupied + delta) <= settings.enlarge_threshold()) + return did_resize; // we're ok as we are + + // Sometimes, we need to resize just to get rid of all the + // "deleted" buckets that are clogging up the hashtable. So when + // deciding whether to resize, count the deleted buckets (which + // are currently taking up room). + // ------------------------------------------------------------- + const size_type needed_size = + settings.min_buckets((size_type)(num_occupied + delta), (size_type)0); + + if (needed_size <= bucket_count()) // we have enough buckets + return did_resize; + + size_type resize_to = settings.min_buckets((size_type)(num_occupied + delta), bucket_count()); + + if (resize_to < needed_size && // may double resize_to + resize_to < (std::numeric_limits::max)() / 2) + { + // This situation means that we have enough deleted elements, + // that once we purge them, we won't actually have needed to + // grow. But we may want to grow anyway: if we just purge one + // element, say, we'll have to grow anyway next time we + // insert. Might as well grow now, since we're already going + // through the trouble of copying (in order to purge the + // deleted elements). + const size_type target = + static_cast(settings.shrink_size((size_type)(resize_to*2))); + if (table.num_nonempty() + delta >= target) + { + // Good, we won't be below the shrink threshhold even if we double. + resize_to *= 2; + } + } + + sparse_hashtable tmp(MoveDontCopy, *this, resize_to); + swap(tmp); // now we are tmp + return true; + } + + // Used to actually do the rehashing when we grow/shrink a hashtable + // ----------------------------------------------------------------- + void _copy_from(const sparse_hashtable &ht, size_type min_buckets_wanted) + { + clear(); // clear table, set num_deleted to 0 + + // If we need to change the size of our table, do it now + const size_type resize_to = settings.min_buckets(ht.size(), min_buckets_wanted); + + if (resize_to > bucket_count()) + { + // we don't have enough buckets + table.resize(resize_to); // sets the number of buckets + settings.reset_thresholds(bucket_count()); + } + + // We use a normal iterator to get bcks from ht + // We could use insert() here, but since we know there are + // no duplicates, we can be more efficient + assert((bucket_count() & (bucket_count()-1)) == 0); // a power of two + for (const_iterator it = ht.begin(); it != ht.end(); ++it) + { + size_type num_probes = 0; // how many times we've probed + size_type bucknum; + const size_type bucket_count_minus_one = bucket_count() - 1; + for (bucknum = hash(get_key(*it)) & bucket_count_minus_one; + table.test(bucknum); // table.test() OK since no erase() + bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one) + { + ++num_probes; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + table.set(bucknum, *it); // copies the value to here + } + settings.inc_num_ht_copies(); + } + + // Implementation is like _copy_from, but it destroys the table of the + // "from" guy by freeing sparsetable memory as we iterate. This is + // useful in resizing, since we're throwing away the "from" guy anyway. + // -------------------------------------------------------------------- + void _move_from(MoveDontCopyT mover, sparse_hashtable &ht, + size_type min_buckets_wanted) + { + clear(); + + // If we need to change the size of our table, do it now + size_type resize_to; + if (mover == MoveDontGrow) + resize_to = ht.bucket_count(); // keep same size as old ht + else // MoveDontCopy + resize_to = settings.min_buckets(ht.size(), min_buckets_wanted); + if (resize_to > bucket_count()) + { + // we don't have enough buckets + table.resize(resize_to); // sets the number of buckets + settings.reset_thresholds(bucket_count()); + } + + // We use a normal iterator to get bcks from ht + // We could use insert() here, but since we know there are + // no duplicates, we can be more efficient + assert((bucket_count() & (bucket_count()-1)) == 0); // a power of two + const size_type bucket_count_minus_one = (const size_type)(bucket_count() - 1); + + // THIS IS THE MAJOR LINE THAT DIFFERS FROM COPY_FROM(): + for (destructive_iterator it = ht.destructive_begin(); + it != ht.destructive_end(); ++it) + { + size_type num_probes = 0; + size_type bucknum; + for (bucknum = hash(get_key(*it)) & bucket_count_minus_one; + table.test(bucknum); // table.test() OK since no erase() + bucknum = (size_type)((bucknum + JUMP_(key, num_probes)) & (bucket_count()-1))) + { + ++num_probes; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + table.move(bucknum, *it); // moves the value to here + } + settings.inc_num_ht_copies(); + } + + + // Required by the spec for hashed associative container +public: + // Though the docs say this should be num_buckets, I think it's much + // more useful as num_elements. As a special feature, calling with + // req_elements==0 will cause us to shrink if we can, saving space. + // ----------------------------------------------------------------- + void resize(size_type req_elements) + { + // resize to this or larger + if (settings.consider_shrink() || req_elements == 0) + _maybe_shrink(); + if (req_elements > table.num_nonempty()) // we only grow + _resize_delta((size_type)(req_elements - table.num_nonempty())); + } + + // Get and change the value of shrink_factor and enlarge_factor. The + // description at the beginning of this file explains how to choose + // the values. Setting the shrink parameter to 0.0 ensures that the + // table never shrinks. + // ------------------------------------------------------------------ + void get_resizing_parameters(float* shrink, float* grow) const + { + *shrink = settings.shrink_factor(); + *grow = settings.enlarge_factor(); + } + + float get_shrink_factor() const { return settings.shrink_factor(); } + float get_enlarge_factor() const { return settings.enlarge_factor(); } + + void set_resizing_parameters(float shrink, float grow) + { + settings.set_resizing_parameters(shrink, grow); + settings.reset_thresholds(bucket_count()); + } + + void set_shrink_factor(float shrink) + { + set_resizing_parameters(shrink, get_enlarge_factor()); + } + + void set_enlarge_factor(float grow) + { + set_resizing_parameters(get_shrink_factor(), grow); + } + + // CONSTRUCTORS -- as required by the specs, we take a size, + // but also let you specify a hashfunction, key comparator, + // and key extractor. We also define a copy constructor and =. + // DESTRUCTOR -- the default is fine, surprisingly. + // ------------------------------------------------------------ + explicit sparse_hashtable(size_type expected_max_items_in_table = 0, + const HashFcn& hf = HashFcn(), + const EqualKey& eql = EqualKey(), + const ExtractKey& ext = ExtractKey(), + const SetKey& set = SetKey(), + const allocator_type& alloc = allocator_type()) + : settings(hf), + key_info(ext, set, eql), + num_deleted(0), + table((expected_max_items_in_table == 0 + ? HT_DEFAULT_STARTING_BUCKETS + : settings.min_buckets(expected_max_items_in_table, 0)), + alloc) + { + settings.reset_thresholds(bucket_count()); + } + + // As a convenience for resize(), we allow an optional second argument + // which lets you make this new hashtable a different size than ht. + // We also provide a mechanism of saying you want to "move" the ht argument + // into us instead of copying. + // ------------------------------------------------------------------------ + sparse_hashtable(const sparse_hashtable& ht, + size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS) + : settings(ht.settings), + key_info(ht.key_info), + num_deleted(0), + table(0) + { + settings.reset_thresholds(bucket_count()); + _copy_from(ht, min_buckets_wanted); + } + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + + sparse_hashtable(sparse_hashtable&& o) : + settings(std::move(o.settings)), + key_info(std::move(o.key_info)), + num_deleted(o.num_deleted), + table(std::move(o.table)) + { + } + + sparse_hashtable(sparse_hashtable&& o, const allocator_type& alloc) : + settings(std::move(o.settings)), + key_info(std::move(o.key_info)), + num_deleted(o.num_deleted), + table(std::move(o.table), alloc) + { + } + + sparse_hashtable& operator=(sparse_hashtable&& o) + { + using std::swap; + + sparse_hashtable tmp(std::move(o)); + swap(tmp, *this); + return *this; + } +#endif + + sparse_hashtable(MoveDontCopyT mover, + sparse_hashtable& ht, + size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS) + : settings(ht.settings), + key_info(ht.key_info), + num_deleted(0), + table(min_buckets_wanted, ht.table.get_allocator()) + //table(min_buckets_wanted) + { + settings.reset_thresholds(bucket_count()); + _move_from(mover, ht, min_buckets_wanted); + } + + sparse_hashtable& operator=(const sparse_hashtable& ht) + { + if (&ht == this) + return *this; // don't copy onto ourselves + settings = ht.settings; + key_info = ht.key_info; + num_deleted = ht.num_deleted; + + // _copy_from() calls clear and sets num_deleted to 0 too + _copy_from(ht, HT_MIN_BUCKETS); + + // we purposefully don't copy the allocator, which may not be copyable + return *this; + } + + // Many STL algorithms use swap instead of copy constructors + void swap(sparse_hashtable& ht) + { + using std::swap; + + swap(settings, ht.settings); + swap(key_info, ht.key_info); + swap(num_deleted, ht.num_deleted); + table.swap(ht.table); + settings.reset_thresholds(bucket_count()); // also resets consider_shrink + ht.settings.reset_thresholds(ht.bucket_count()); + // we purposefully don't swap the allocator, which may not be swap-able + } + + // It's always nice to be able to clear a table without deallocating it + void clear() + { + if (!empty() || num_deleted != 0) + { + table.clear(); + table = Table(HT_DEFAULT_STARTING_BUCKETS); + } + settings.reset_thresholds(bucket_count()); + num_deleted = 0; + } + + // LOOKUP ROUTINES +private: + + enum pos_type { pt_empty = 0, pt_erased, pt_full }; + // ------------------------------------------------------------------- + class Position + { + public: + + Position() : _t(pt_empty) {} + Position(pos_type t, size_type idx) : _t(t), _idx(idx) {} + + pos_type _t; + size_type _idx; + }; + + // Returns a pair: + // - 'first' is a code, 2 if key already present, 0 or 1 otherwise. + // - 'second' is a position, where the key should go + // Note: because of deletions where-to-insert is not trivial: it's the + // first deleted bucket we see, as long as we don't find the key later + // ------------------------------------------------------------------- + Position _find_position(const key_type &key) const + { + size_type num_probes = 0; // how many times we've probed + const size_type bucket_count_minus_one = (const size_type)(bucket_count() - 1); + size_type bucknum = hash(key) & bucket_count_minus_one; + Position pos; + + while (1) + { + // probe until something happens + // ----------------------------- + typename Table::GrpPos grp_pos(table, bucknum); + + if (!grp_pos.test_strict()) + { + // bucket is empty => key not present + return pos._t ? pos : Position(pt_empty, bucknum); + } + else if (grp_pos.test()) + { + reference ref(grp_pos.unsafe_get()); + + if (equals(key, get_key(ref))) + return Position(pt_full, bucknum); + } + else if (pos._t == pt_empty) + { + // first erased position + pos._t = pt_erased; + pos._idx = bucknum; + } + + ++num_probes; // we're doing another probe + bucknum = (size_type)((bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one); + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + } + +public: + // I hate to duplicate find() like that, but it is + // significantly faster to not have the intermediate pair + // ------------------------------------------------------------------ + iterator find(const key_type& key) + { + size_type num_probes = 0; // how many times we've probed + const size_type bucket_count_minus_one = bucket_count() - 1; + size_type bucknum = hash(key) & bucket_count_minus_one; + + while (1) // probe until something happens + { + typename Table::GrpPos grp_pos(table, bucknum); + + if (!grp_pos.test_strict()) + return end(); // bucket is empty + if (grp_pos.test()) + { + reference ref(grp_pos.unsafe_get()); + + if (equals(key, get_key(ref))) + return grp_pos.get_iter(ref); + } + ++num_probes; // we're doing another probe + bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + } + + // Wish I could avoid the duplicate find() const and non-const. + // ------------------------------------------------------------ + const_iterator find(const key_type& key) const + { + size_type num_probes = 0; // how many times we've probed + const size_type bucket_count_minus_one = bucket_count() - 1; + size_type bucknum = hash(key) & bucket_count_minus_one; + + while (1) // probe until something happens + { + typename Table::GrpPos grp_pos(table, bucknum); + + if (!grp_pos.test_strict()) + return end(); // bucket is empty + else if (grp_pos.test()) + { + reference ref(grp_pos.unsafe_get()); + + if (equals(key, get_key(ref))) + return _mk_const_iterator(table.get_iter(bucknum, &ref)); + } + ++num_probes; // we're doing another probe + bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + } + + // This is a tr1 method: the bucket a given key is in, or what bucket + // it would be put in, if it were to be inserted. Shrug. + // ------------------------------------------------------------------ + size_type bucket(const key_type& key) const + { + Position pos = _find_position(key); + return pos._idx; + } + + // Counts how many elements have key key. For maps, it's either 0 or 1. + // --------------------------------------------------------------------- + size_type count(const key_type &key) const + { + Position pos = _find_position(key); + return (size_type)(pos._t == pt_full ? 1 : 0); + } + + // Likewise, equal_range doesn't really make sense for us. Oh well. + // ----------------------------------------------------------------- + std::pair equal_range(const key_type& key) + { + iterator pos = find(key); // either an iterator or end + if (pos == end()) + return std::pair(pos, pos); + else + { + const iterator startpos = pos++; + return std::pair(startpos, pos); + } + } + + std::pair equal_range(const key_type& key) const + { + const_iterator pos = find(key); // either an iterator or end + if (pos == end()) + return std::pair(pos, pos); + else + { + const const_iterator startpos = pos++; + return std::pair(startpos, pos); + } + } + + + // INSERTION ROUTINES +private: + // Private method used by insert_noresize and find_or_insert. + template + reference _insert_at(T& obj, size_type pos, bool erased) + { + if (size() >= max_size()) + { + throw_exception(std::length_error("insert overflow")); + } + if (erased) + { + assert(num_deleted); + --num_deleted; + } + return table.set(pos, obj); + } + + // If you know *this is big enough to hold obj, use this routine + template + std::pair _insert_noresize(T& obj) + { + Position pos = _find_position(get_key(obj)); + bool already_there = (pos._t == pt_full); + + if (!already_there) + { + reference ref(_insert_at(obj, pos._idx, pos._t == pt_erased)); + return std::pair(_mk_iterator(table.get_iter(pos._idx, &ref)), true); + } + return std::pair(_mk_iterator(table.get_iter(pos._idx)), false); + } + + // Specializations of insert(it, it) depending on the power of the iterator: + // (1) Iterator supports operator-, resize before inserting + template + void _insert(ForwardIterator f, ForwardIterator l, std::forward_iterator_tag /*unused*/) + { + int64_t dist = std::distance(f, l); + if (dist < 0 || static_cast(dist) >= (std::numeric_limits::max)()) + throw_exception(std::length_error("insert-range overflow")); + + _resize_delta(static_cast(dist)); + + for (; dist > 0; --dist, ++f) + _insert_noresize(*f); + } + + // (2) Arbitrary iterator, can't tell how much to resize + template + void _insert(InputIterator f, InputIterator l, std::input_iterator_tag /*unused*/) + { + for (; f != l; ++f) + _insert(*f); + } + +public: + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) + template + std::pair emplace(Args&&... args) + { + _resize_delta(1); + value_type obj(std::forward(args)...); + return _insert_noresize(obj); + } +#endif + + // This is the normal insert routine, used by the outside world + std::pair insert(const_reference obj) + { + _resize_delta(1); // adding an object, grow if need be + return _insert_noresize(obj); + } + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + template< class P > + std::pair insert(P &&obj) + { + _resize_delta(1); // adding an object, grow if need be + value_type val(std::forward(obj)); + return _insert_noresize(val); + } +#endif + + // When inserting a lot at a time, we specialize on the type of iterator + template + void insert(InputIterator f, InputIterator l) + { + // specializes on iterator type + _insert(f, l, + typename std::iterator_traits::iterator_category()); + } + + // DefaultValue is a functor that takes a key and returns a value_type + // representing the default value to be inserted if none is found. + template + value_type& find_or_insert(const key_type& key) + { + size_type num_probes = 0; // how many times we've probed + const size_type bucket_count_minus_one = bucket_count() - 1; + size_type bucknum = hash(key) & bucket_count_minus_one; + DefaultValue default_value; + size_type erased_pos = 0; + bool erased = false; + + while (1) // probe until something happens + { + typename Table::GrpPos grp_pos(table, bucknum); + + if (!grp_pos.test_strict()) + { + // not found + if (_resize_delta(1)) + { + // needed to rehash to make room + // Since we resized, we can't use pos, so recalculate where to insert. + value_type def(default_value(key)); + return *(_insert_noresize(def).first); + } + else + { + // no need to rehash, insert right here + value_type def(default_value(key)); + return _insert_at(def, erased ? erased_pos : bucknum, erased); + } + } + if (grp_pos.test()) + { + reference ref(grp_pos.unsafe_get()); + + if (equals(key, get_key(ref))) + return ref; + } + else if (!erased) + { + // first erased position + erased_pos = bucknum; + erased = true; + } + + ++num_probes; // we're doing another probe + bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + } + + size_type erase(const key_type& key) + { + size_type num_probes = 0; // how many times we've probed + const size_type bucket_count_minus_one = bucket_count() - 1; + size_type bucknum = hash(key) & bucket_count_minus_one; + + while (1) // probe until something happens + { + typename Table::GrpPos grp_pos(table, bucknum); + + if (!grp_pos.test_strict()) + return 0; // bucket is empty, we deleted nothing + if (grp_pos.test()) + { + reference ref(grp_pos.unsafe_get()); + + if (equals(key, get_key(ref))) + { + grp_pos.erase(table); + ++num_deleted; + settings.set_consider_shrink(true); // will think about shrink after next insert + return 1; // because we deleted one thing + } + } + ++num_probes; // we're doing another probe + bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; + assert(num_probes < bucket_count() + && "Hashtable is full: an error in key_equal<> or hash<>"); + } + } + + const_iterator erase(const_iterator pos) + { + if (pos == cend()) + return cend(); // sanity check + + const_iterator nextpos = table.erase(pos); + ++num_deleted; + settings.set_consider_shrink(true); + return nextpos; + } + + const_iterator erase(const_iterator f, const_iterator l) + { + if (f == cend()) + return cend(); // sanity check + + size_type num_before = table.num_nonempty(); + const_iterator nextpos = table.erase(f, l); + num_deleted += num_before - table.num_nonempty(); + settings.set_consider_shrink(true); + return nextpos; + } + + // Deleted key routines - just to keep google test framework happy + // we don't actually use the deleted key + // --------------------------------------------------------------- + void set_deleted_key(const key_type&) + { + } + + void clear_deleted_key() + { + } + + bool operator==(const sparse_hashtable& ht) const + { + if (this == &ht) + return true; + + if (size() != ht.size()) + return false; + + for (const_iterator it = begin(); it != end(); ++it) + { + const_iterator it2 = ht.find(get_key(*it)); + if ((it2 == ht.end()) || (*it != *it2)) + return false; + } + + return true; + } + + bool operator!=(const sparse_hashtable& ht) const + { + return !(*this == ht); + } + + + // I/O + // We support reading and writing hashtables to disk. NOTE that + // this only stores the hashtable metadata, not the stuff you've + // actually put in the hashtable! Alas, since I don't know how to + // write a hasher or key_equal, you have to make sure everything + // but the table is the same. We compact before writing. + // + // The OUTPUT type needs to support a Write() operation. File and + // OutputBuffer are appropriate types to pass in. + // + // The INPUT type needs to support a Read() operation. File and + // InputBuffer are appropriate types to pass in. + // ------------------------------------------------------------- + template + bool write_metadata(OUTPUT *fp) + { + return table.write_metadata(fp); + } + + template + bool read_metadata(INPUT *fp) + { + num_deleted = 0; // since we got rid before writing + const bool result = table.read_metadata(fp); + settings.reset_thresholds(bucket_count()); + return result; + } + + // Only meaningful if value_type is a POD. + template + bool write_nopointer_data(OUTPUT *fp) + { + return table.write_nopointer_data(fp); + } + + // Only meaningful if value_type is a POD. + template + bool read_nopointer_data(INPUT *fp) + { + return table.read_nopointer_data(fp); + } + + // INPUT and OUTPUT must be either a FILE, *or* a C++ stream + // (istream, ostream, etc) *or* a class providing + // Read(void*, size_t) and Write(const void*, size_t) + // (respectively), which writes a buffer into a stream + // (which the INPUT/OUTPUT instance presumably owns). + + typedef sparsehash_internal::pod_serializer NopointerSerializer; + + // ValueSerializer: a functor. operator()(OUTPUT*, const value_type&) + template + bool serialize(ValueSerializer serializer, OUTPUT *fp) + { + return table.serialize(serializer, fp); + } + + // ValueSerializer: a functor. operator()(INPUT*, value_type*) + template + bool unserialize(ValueSerializer serializer, INPUT *fp) + { + num_deleted = 0; // since we got rid before writing + const bool result = table.unserialize(serializer, fp); + settings.reset_thresholds(bucket_count()); + return result; + } + +private: + + // Package templated functors with the other types to eliminate memory + // needed for storing these zero-size operators. Since ExtractKey and + // hasher's operator() might have the same function signature, they + // must be packaged in different classes. + // ------------------------------------------------------------------------- + struct Settings : + sparsehash_internal::sh_hashtable_settings + { + explicit Settings(const hasher& hf) + : sparsehash_internal::sh_hashtable_settings + (hf, HT_OCCUPANCY_PCT / 100.0f, HT_EMPTY_PCT / 100.0f) {} + }; + + // KeyInfo stores delete key and packages zero-size functors: + // ExtractKey and SetKey. + // --------------------------------------------------------- + class KeyInfo : public ExtractKey, public SetKey, public EqualKey + { + public: + KeyInfo(const ExtractKey& ek, const SetKey& sk, const EqualKey& eq) + : ExtractKey(ek), SetKey(sk), EqualKey(eq) + { + } + + // We want to return the exact same type as ExtractKey: Key or const Key& + typename ExtractKey::result_type get_key(const_reference v) const + { + return ExtractKey::operator()(v); + } + + bool equals(const key_type& a, const key_type& b) const + { + return EqualKey::operator()(a, b); + } + }; + + // Utility functions to access the templated operators + size_t hash(const key_type& v) const + { + return settings.hash(v); + } + + bool equals(const key_type& a, const key_type& b) const + { + return key_info.equals(a, b); + } + + typename ExtractKey::result_type get_key(const_reference v) const + { + return key_info.get_key(v); + } + +private: + // Actual data + // ----------- + Settings settings; + KeyInfo key_info; + size_type num_deleted; + Table table; // holds num_buckets and num_elements too +}; + +#undef JUMP_ + +// ----------------------------------------------------------------------------- +template +const typename sparse_hashtable::size_type +sparse_hashtable::ILLEGAL_BUCKET; + +// How full we let the table get before we resize. Knuth says .8 is +// good -- higher causes us to probe too much, though saves memory +// ----------------------------------------------------------------------------- +template +const int sparse_hashtable::HT_OCCUPANCY_PCT = 50; + +// How empty we let the table get before we resize lower. +// It should be less than OCCUPANCY_PCT / 2 or we thrash resizing +// ----------------------------------------------------------------------------- +template +const int sparse_hashtable::HT_EMPTY_PCT += static_cast(0.4 * + sparse_hashtable::HT_OCCUPANCY_PCT); + + +// ---------------------------------------------------------------------- +// S P A R S E _ H A S H _ M A P +// ---------------------------------------------------------------------- +template , + class EqualKey = std::equal_to, + class Alloc = SPP_DEFAULT_ALLOCATOR > > +class sparse_hash_map +{ +public: + typedef typename std::pair value_type; + +private: + // Apparently select1st is not stl-standard, so we define our own + struct SelectKey + { + typedef const Key& result_type; + + inline const Key& operator()(const value_type& p) const + { + return p.first; + } + }; + + struct SetKey + { + inline void operator()(value_type* value, const Key& new_key) const + { + *const_cast(&value->first) = new_key; + } + }; + + // For operator[]. + struct DefaultValue + { + inline value_type operator()(const Key& key) const + { + return std::make_pair(key, T()); + } + }; + + // The actual data + typedef sparse_hashtable ht; + +public: + typedef typename ht::key_type key_type; + typedef T data_type; + typedef T mapped_type; + typedef typename ht::hasher hasher; + typedef typename ht::key_equal key_equal; + typedef Alloc allocator_type; + + typedef typename ht::size_type size_type; + typedef typename ht::difference_type difference_type; + typedef typename ht::pointer pointer; + typedef typename ht::const_pointer const_pointer; + typedef typename ht::reference reference; + typedef typename ht::const_reference const_reference; + + typedef typename ht::iterator iterator; + typedef typename ht::const_iterator const_iterator; + typedef typename ht::local_iterator local_iterator; + typedef typename ht::const_local_iterator const_local_iterator; + + // Iterator functions + iterator begin() { return rep.begin(); } + iterator end() { return rep.end(); } + const_iterator begin() const { return rep.cbegin(); } + const_iterator end() const { return rep.cend(); } + const_iterator cbegin() const { return rep.cbegin(); } + const_iterator cend() const { return rep.cend(); } + + // These come from tr1's unordered_map. For us, a bucket has 0 or 1 elements. + local_iterator begin(size_type i) { return rep.begin(i); } + local_iterator end(size_type i) { return rep.end(i); } + const_local_iterator begin(size_type i) const { return rep.begin(i); } + const_local_iterator end(size_type i) const { return rep.end(i); } + const_local_iterator cbegin(size_type i) const { return rep.cbegin(i); } + const_local_iterator cend(size_type i) const { return rep.cend(i); } + + // Accessor functions + // ------------------ + allocator_type get_allocator() const { return rep.get_allocator(); } + hasher hash_funct() const { return rep.hash_funct(); } + hasher hash_function() const { return hash_funct(); } + key_equal key_eq() const { return rep.key_eq(); } + + + // Constructors + // ------------ + explicit sparse_hash_map(size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) + : rep(n, hf, eql, SelectKey(), SetKey(), alloc) + { + } + + explicit sparse_hash_map(const allocator_type& alloc) : + rep(0, hasher(), key_equal(), SelectKey(), SetKey(), alloc) + { + } + + sparse_hash_map(size_type n, const allocator_type& alloc) : + rep(n, hasher(), key_equal(), SelectKey(), SetKey(), alloc) + { + } + + sparse_hash_map(size_type n, const hasher& hf, const allocator_type& alloc) : + rep(n, hf, key_equal(), SelectKey(), SetKey(), alloc) + { + } + + template + sparse_hash_map(InputIterator f, InputIterator l, + size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) + : rep(n, hf, eql, SelectKey(), SetKey(), alloc) + { + rep.insert(f, l); + } + + template + sparse_hash_map(InputIterator f, InputIterator l, + size_type n, const allocator_type& alloc) + : rep(n, hasher(), key_equal(), SelectKey(), SetKey(), alloc) + { + rep.insert(f, l); + } + + template + sparse_hash_map(InputIterator f, InputIterator l, + size_type n, const hasher& hf, const allocator_type& alloc) + : rep(n, hf, key_equal(), SelectKey(), SetKey(), alloc) + { + rep.insert(f, l); + } + + sparse_hash_map(const sparse_hash_map &o) : + rep(o.rep) + {} + + sparse_hash_map(const sparse_hash_map &o, + const allocator_type& alloc) : + rep(o.rep, alloc) + {} + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + sparse_hash_map(sparse_hash_map &&o) : + rep(std::move(o.rep)) + {} + + sparse_hash_map(sparse_hash_map &&o, + const allocator_type& alloc) : + rep(std::move(o.rep), alloc) + {} +#endif + +#if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST) + sparse_hash_map(std::initializer_list init, + size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) + : rep(n, hf, eql, SelectKey(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_map(std::initializer_list init, + size_type n, const allocator_type& alloc) : + rep(n, hasher(), key_equal(), SelectKey(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_map(std::initializer_list init, + size_type n, const hasher& hf, const allocator_type& alloc) : + rep(n, hf, key_equal(), SelectKey(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_map& operator=(std::initializer_list init) + { + rep.clear(); + rep.insert(init.begin(), init.end()); + return *this; + } + + void insert(std::initializer_list init) + { + rep.insert(init.begin(), init.end()); + } +#endif + + sparse_hash_map& operator=(const sparse_hash_map &o) + { + rep = o.rep; + return *this; + } + + void clear() { rep.clear(); } + void swap(sparse_hash_map& hs) { rep.swap(hs.rep); } + + // Functions concerning size + // ------------------------- + size_type size() const { return rep.size(); } + size_type max_size() const { return rep.max_size(); } + bool empty() const { return rep.empty(); } + size_type bucket_count() const { return rep.bucket_count(); } + size_type max_bucket_count() const { return rep.max_bucket_count(); } + + size_type bucket_size(size_type i) const { return rep.bucket_size(i); } + size_type bucket(const key_type& key) const { return rep.bucket(key); } + float load_factor() const { return size() * 1.0f / bucket_count(); } + + float max_load_factor() const { return rep.get_enlarge_factor(); } + void max_load_factor(float grow) { rep.set_enlarge_factor(grow); } + + float min_load_factor() const { return rep.get_shrink_factor(); } + void min_load_factor(float shrink){ rep.set_shrink_factor(shrink); } + + void set_resizing_parameters(float shrink, float grow) + { + rep.set_resizing_parameters(shrink, grow); + } + + void resize(size_type cnt) { rep.resize(cnt); } + void rehash(size_type cnt) { resize(cnt); } // c++11 name + void reserve(size_type cnt) { resize(cnt); } // c++11 + + // Lookup + // ------ + iterator find(const key_type& key) { return rep.find(key); } + const_iterator find(const key_type& key) const { return rep.find(key); } + bool contains(const key_type& key) const { return rep.find(key) != rep.end(); } + + mapped_type& operator[](const key_type& key) + { + return rep.template find_or_insert(key).second; + } + + size_type count(const key_type& key) const { return rep.count(key); } + + std::pair + equal_range(const key_type& key) { return rep.equal_range(key); } + + std::pair + equal_range(const key_type& key) const { return rep.equal_range(key); } + + mapped_type& at(const key_type& key) + { + iterator it = rep.find(key); + if (it == rep.end()) + throw_exception(std::out_of_range("at: key not present")); + return it->second; + } + + const mapped_type& at(const key_type& key) const + { + const_iterator it = rep.find(key); + if (it == rep.cend()) + throw_exception(std::out_of_range("at: key not present")); + return it->second; + } + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) + template + std::pair emplace(Args&&... args) + { + return rep.emplace(std::forward(args)...); + } + + template + iterator emplace_hint(const_iterator , Args&&... args) + { + return rep.emplace(std::forward(args)...).first; + } +#endif + + // Insert + // ------ + std::pair + insert(const value_type& obj) { return rep.insert(obj); } + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + template< class P > + std::pair insert(P&& obj) { return rep.insert(std::forward

(obj)); } +#endif + + template + void insert(InputIterator f, InputIterator l) { rep.insert(f, l); } + + void insert(const_iterator f, const_iterator l) { rep.insert(f, l); } + + iterator insert(iterator /*unused*/, const value_type& obj) { return insert(obj).first; } + iterator insert(const_iterator /*unused*/, const value_type& obj) { return insert(obj).first; } + + // Deleted key routines - just to keep google test framework happy + // we don't actually use the deleted key + // --------------------------------------------------------------- + void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); } + void clear_deleted_key() { rep.clear_deleted_key(); } + key_type deleted_key() const { return rep.deleted_key(); } + + // Erase + // ----- + size_type erase(const key_type& key) { return rep.erase(key); } + iterator erase(iterator it) { return rep.erase(it); } + iterator erase(iterator f, iterator l) { return rep.erase(f, l); } + iterator erase(const_iterator it) { return rep.erase(it); } + iterator erase(const_iterator f, const_iterator l){ return rep.erase(f, l); } + + // Comparison + // ---------- + bool operator==(const sparse_hash_map& hs) const { return rep == hs.rep; } + bool operator!=(const sparse_hash_map& hs) const { return rep != hs.rep; } + + + // I/O -- this is an add-on for writing metainformation to disk + // + // For maximum flexibility, this does not assume a particular + // file type (though it will probably be a FILE *). We just pass + // the fp through to rep. + + // If your keys and values are simple enough, you can pass this + // serializer to serialize()/unserialize(). "Simple enough" means + // value_type is a POD type that contains no pointers. Note, + // however, we don't try to normalize endianness. + // --------------------------------------------------------------- + typedef typename ht::NopointerSerializer NopointerSerializer; + + // serializer: a class providing operator()(OUTPUT*, const value_type&) + // (writing value_type to OUTPUT). You can specify a + // NopointerSerializer object if appropriate (see above). + // fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a + // pointer to a class providing size_t Write(const void*, size_t), + // which writes a buffer into a stream (which fp presumably + // owns) and returns the number of bytes successfully written. + // Note basic_ostream is not currently supported. + // --------------------------------------------------------------- + template + bool serialize(ValueSerializer serializer, OUTPUT* fp) + { + return rep.serialize(serializer, fp); + } + + // serializer: a functor providing operator()(INPUT*, value_type*) + // (reading from INPUT and into value_type). You can specify a + // NopointerSerializer object if appropriate (see above). + // fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a + // pointer to a class providing size_t Read(void*, size_t), + // which reads into a buffer from a stream (which fp presumably + // owns) and returns the number of bytes successfully read. + // Note basic_istream is not currently supported. + // NOTE: Since value_type is std::pair, ValueSerializer + // may need to do a const cast in order to fill in the key. + // NOTE: if Key or T are not POD types, the serializer MUST use + // placement-new to initialize their values, rather than a normal + // equals-assignment or similar. (The value_type* passed into the + // serializer points to garbage memory.) + // --------------------------------------------------------------- + template + bool unserialize(ValueSerializer serializer, INPUT* fp) + { + return rep.unserialize(serializer, fp); + } + + // The four methods below are DEPRECATED. + // Use serialize() and unserialize() for new code. + // ----------------------------------------------- + template + bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); } + + template + bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); } + + template + bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); } + + template + bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); } + + +private: + // The actual data + // --------------- + ht rep; +}; + +// ---------------------------------------------------------------------- +// S P A R S E _ H A S H _ S E T +// ---------------------------------------------------------------------- + +template , + class EqualKey = std::equal_to, + class Alloc = SPP_DEFAULT_ALLOCATOR > +class sparse_hash_set +{ +private: + // Apparently identity is not stl-standard, so we define our own + struct Identity + { + typedef const Value& result_type; + inline const Value& operator()(const Value& v) const { return v; } + }; + + struct SetKey + { + inline void operator()(Value* value, const Value& new_key) const + { + *value = new_key; + } + }; + + typedef sparse_hashtable ht; + +public: + typedef typename ht::key_type key_type; + typedef typename ht::value_type value_type; + typedef typename ht::hasher hasher; + typedef typename ht::key_equal key_equal; + typedef Alloc allocator_type; + + typedef typename ht::size_type size_type; + typedef typename ht::difference_type difference_type; + typedef typename ht::const_pointer pointer; + typedef typename ht::const_pointer const_pointer; + typedef typename ht::const_reference reference; + typedef typename ht::const_reference const_reference; + + typedef typename ht::const_iterator iterator; + typedef typename ht::const_iterator const_iterator; + typedef typename ht::const_local_iterator local_iterator; + typedef typename ht::const_local_iterator const_local_iterator; + + + // Iterator functions -- recall all iterators are const + iterator begin() const { return rep.begin(); } + iterator end() const { return rep.end(); } + const_iterator cbegin() const { return rep.cbegin(); } + const_iterator cend() const { return rep.cend(); } + + // These come from tr1's unordered_set. For us, a bucket has 0 or 1 elements. + local_iterator begin(size_type i) const { return rep.begin(i); } + local_iterator end(size_type i) const { return rep.end(i); } + local_iterator cbegin(size_type i) const { return rep.cbegin(i); } + local_iterator cend(size_type i) const { return rep.cend(i); } + + + // Accessor functions + // ------------------ + allocator_type get_allocator() const { return rep.get_allocator(); } + hasher hash_funct() const { return rep.hash_funct(); } + hasher hash_function() const { return hash_funct(); } // tr1 name + key_equal key_eq() const { return rep.key_eq(); } + + + // Constructors + // ------------ + explicit sparse_hash_set(size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) : + rep(n, hf, eql, Identity(), SetKey(), alloc) + { + } + + explicit sparse_hash_set(const allocator_type& alloc) : + rep(0, hasher(), key_equal(), Identity(), SetKey(), alloc) + { + } + + sparse_hash_set(size_type n, const allocator_type& alloc) : + rep(n, hasher(), key_equal(), Identity(), SetKey(), alloc) + { + } + + sparse_hash_set(size_type n, const hasher& hf, + const allocator_type& alloc) : + rep(n, hf, key_equal(), Identity(), SetKey(), alloc) + { + } + + template + sparse_hash_set(InputIterator f, InputIterator l, + size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) + : rep(n, hf, eql, Identity(), SetKey(), alloc) + { + rep.insert(f, l); + } + + template + sparse_hash_set(InputIterator f, InputIterator l, + size_type n, const allocator_type& alloc) + : rep(n, hasher(), key_equal(), Identity(), SetKey(), alloc) + { + rep.insert(f, l); + } + + template + sparse_hash_set(InputIterator f, InputIterator l, + size_type n, const hasher& hf, const allocator_type& alloc) + : rep(n, hf, key_equal(), Identity(), SetKey(), alloc) + { + rep.insert(f, l); + } + + sparse_hash_set(const sparse_hash_set &o) : + rep(o.rep) + {} + + sparse_hash_set(const sparse_hash_set &o, + const allocator_type& alloc) : + rep(o.rep, alloc) + {} + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + sparse_hash_set(sparse_hash_set &&o) : + rep(std::move(o.rep)) + {} + + sparse_hash_set(sparse_hash_set &&o, + const allocator_type& alloc) : + rep(std::move(o.rep), alloc) + {} +#endif + +#if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST) + sparse_hash_set(std::initializer_list init, + size_type n = 0, + const hasher& hf = hasher(), + const key_equal& eql = key_equal(), + const allocator_type& alloc = allocator_type()) : + rep(n, hf, eql, Identity(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_set(std::initializer_list init, + size_type n, const allocator_type& alloc) : + rep(n, hasher(), key_equal(), Identity(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_set(std::initializer_list init, + size_type n, const hasher& hf, + const allocator_type& alloc) : + rep(n, hf, key_equal(), Identity(), SetKey(), alloc) + { + rep.insert(init.begin(), init.end()); + } + + sparse_hash_set& operator=(std::initializer_list init) + { + rep.clear(); + rep.insert(init.begin(), init.end()); + return *this; + } + + void insert(std::initializer_list init) + { + rep.insert(init.begin(), init.end()); + } + +#endif + + sparse_hash_set& operator=(const sparse_hash_set &o) + { + rep = o.rep; + return *this; + } + + void clear() { rep.clear(); } + void swap(sparse_hash_set& hs) { rep.swap(hs.rep); } + + + // Functions concerning size + // ------------------------- + size_type size() const { return rep.size(); } + size_type max_size() const { return rep.max_size(); } + bool empty() const { return rep.empty(); } + size_type bucket_count() const { return rep.bucket_count(); } + size_type max_bucket_count() const { return rep.max_bucket_count(); } + + size_type bucket_size(size_type i) const { return rep.bucket_size(i); } + size_type bucket(const key_type& key) const { return rep.bucket(key); } + + float load_factor() const { return size() * 1.0f / bucket_count(); } + + float max_load_factor() const { return rep.get_enlarge_factor(); } + void max_load_factor(float grow) { rep.set_enlarge_factor(grow); } + + float min_load_factor() const { return rep.get_shrink_factor(); } + void min_load_factor(float shrink){ rep.set_shrink_factor(shrink); } + + void set_resizing_parameters(float shrink, float grow) + { + rep.set_resizing_parameters(shrink, grow); + } + + void resize(size_type cnt) { rep.resize(cnt); } + void rehash(size_type cnt) { resize(cnt); } // c++11 name + void reserve(size_type cnt) { resize(cnt); } // c++11 + + // Lookup + // ------ + iterator find(const key_type& key) const { return rep.find(key); } + bool contains(const key_type& key) const { return rep.find(key) != rep.end(); } + + size_type count(const key_type& key) const { return rep.count(key); } + + std::pair + equal_range(const key_type& key) const { return rep.equal_range(key); } + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) + template + std::pair emplace(Args&&... args) + { + return rep.emplace(std::forward(args)...); + } + + template + iterator emplace_hint(const_iterator , Args&&... args) + { + return rep.emplace(std::forward(args)...).first; + } +#endif + + // Insert + // ------ + std::pair insert(const value_type& obj) + { + std::pair p = rep.insert(obj); + return std::pair(p.first, p.second); // const to non-const + } + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) + template + std::pair insert(P&& obj) { return rep.insert(std::forward

(obj)); } +#endif + + template + void insert(InputIterator f, InputIterator l) { rep.insert(f, l); } + + void insert(const_iterator f, const_iterator l) { rep.insert(f, l); } + + iterator insert(iterator /*unused*/, const value_type& obj) { return insert(obj).first; } + + // Deleted key - do nothing - just to keep google test framework happy + // ------------------------------------------------------------------- + void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); } + void clear_deleted_key() { rep.clear_deleted_key(); } + key_type deleted_key() const { return rep.deleted_key(); } + + // Erase + // ----- + size_type erase(const key_type& key) { return rep.erase(key); } + iterator erase(iterator it) { return rep.erase(it); } + iterator erase(iterator f, iterator l) { return rep.erase(f, l); } + + // Comparison + // ---------- + bool operator==(const sparse_hash_set& hs) const { return rep == hs.rep; } + bool operator!=(const sparse_hash_set& hs) const { return rep != hs.rep; } + + + // I/O -- this is an add-on for writing metainformation to disk + // + // For maximum flexibility, this does not assume a particular + // file type (though it will probably be a FILE *). We just pass + // the fp through to rep. + + // If your keys and values are simple enough, you can pass this + // serializer to serialize()/unserialize(). "Simple enough" means + // value_type is a POD type that contains no pointers. Note, + // however, we don't try to normalize endianness. + // --------------------------------------------------------------- + typedef typename ht::NopointerSerializer NopointerSerializer; + + // serializer: a class providing operator()(OUTPUT*, const value_type&) + // (writing value_type to OUTPUT). You can specify a + // NopointerSerializer object if appropriate (see above). + // fp: either a FILE*, OR an ostream*/subclass_of_ostream*, OR a + // pointer to a class providing size_t Write(const void*, size_t), + // which writes a buffer into a stream (which fp presumably + // owns) and returns the number of bytes successfully written. + // Note basic_ostream is not currently supported. + // --------------------------------------------------------------- + template + bool serialize(ValueSerializer serializer, OUTPUT* fp) + { + return rep.serialize(serializer, fp); + } + + // serializer: a functor providing operator()(INPUT*, value_type*) + // (reading from INPUT and into value_type). You can specify a + // NopointerSerializer object if appropriate (see above). + // fp: either a FILE*, OR an istream*/subclass_of_istream*, OR a + // pointer to a class providing size_t Read(void*, size_t), + // which reads into a buffer from a stream (which fp presumably + // owns) and returns the number of bytes successfully read. + // Note basic_istream is not currently supported. + // NOTE: Since value_type is const Key, ValueSerializer + // may need to do a const cast in order to fill in the key. + // NOTE: if Key is not a POD type, the serializer MUST use + // placement-new to initialize its value, rather than a normal + // equals-assignment or similar. (The value_type* passed into + // the serializer points to garbage memory.) + // --------------------------------------------------------------- + template + bool unserialize(ValueSerializer serializer, INPUT* fp) + { + return rep.unserialize(serializer, fp); + } + + // The four methods below are DEPRECATED. + // Use serialize() and unserialize() for new code. + // ----------------------------------------------- + template + bool write_metadata(OUTPUT *fp) { return rep.write_metadata(fp); } + + template + bool read_metadata(INPUT *fp) { return rep.read_metadata(fp); } + + template + bool write_nopointer_data(OUTPUT *fp) { return rep.write_nopointer_data(fp); } + + template + bool read_nopointer_data(INPUT *fp) { return rep.read_nopointer_data(fp); } + +private: + // The actual data + // --------------- + ht rep; +}; + +} // spp_ namespace + + +// We need a global swap for all our classes as well +// ------------------------------------------------- + +template +inline void swap(spp_::sparsegroup &x, spp_::sparsegroup &y) +{ + x.swap(y); +} + +template +inline void swap(spp_::sparsetable &x, spp_::sparsetable &y) +{ + x.swap(y); +} + +template +inline void swap(spp_::sparse_hashtable &x, + spp_::sparse_hashtable &y) +{ + x.swap(y); +} + +template +inline void swap(spp_::sparse_hash_map& hm1, + spp_::sparse_hash_map& hm2) +{ + hm1.swap(hm2); +} + +template +inline void swap(spp_::sparse_hash_set& hs1, + spp_::sparse_hash_set& hs2) +{ + hs1.swap(hs2); +} + +#endif // sparsepp_h_guard_ diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_config.h b/resources/3rdparty/sparsepp/sparsepp/spp_config.h new file mode 100755 index 000000000..46eeee5c2 --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_config.h @@ -0,0 +1,781 @@ +#if !defined(spp_config_h_guard) +#define spp_config_h_guard + +// -------------------------------------------------- +// Sparsepp config macros +// some can be overriden on the command line +// -------------------------------------------------- +#ifndef SPP_NAMESPACE + #define SPP_NAMESPACE spp +#endif + +#ifndef spp_ + #define spp_ SPP_NAMESPACE +#endif + +#ifndef SPP_DEFAULT_ALLOCATOR + #if (defined(SPP_USE_SPP_ALLOC) && SPP_USE_SPP_ALLOC) && defined(_MSC_VER) + // ----------------------------------------------------------------------------- + // When building with the Microsoft compiler, we use a custom allocator because + // the default one fragments memory when reallocating. This is desirable only + // when creating large sparsepp hash maps. If you create lots of small hash_maps, + // define the following before including spp.h: + // #define SPP_DEFAULT_ALLOCATOR spp::libc_allocator + // ----------------------------------------------------------------------------- + #define SPP_DEFAULT_ALLOCATOR spp_::spp_allocator + #define SPP_INCLUDE_SPP_ALLOC + #else + #define SPP_DEFAULT_ALLOCATOR spp_::libc_allocator + #endif +#endif + +#ifndef SPP_GROUP_SIZE + // must be 32 or 64 + #define SPP_GROUP_SIZE 32 +#endif + +#ifndef SPP_ALLOC_SZ + // must be power of 2 (0 = agressive alloc, 1 = smallest memory usage, 2 = good compromise) + #define SPP_ALLOC_SZ 0 +#endif + +#ifndef SPP_STORE_NUM_ITEMS + // 1 uses a little bit more memory, but faster!! + #define SPP_STORE_NUM_ITEMS 1 +#endif + + +// --------------------------------------------------------------------------- +// Compiler detection code (SPP_ proprocessor macros) derived from Boost +// libraries. Therefore Boost software licence reproduced below. +// --------------------------------------------------------------------------- +// Boost Software License - Version 1.0 - August 17th, 2003 +// +// Permission is hereby granted, free of charge, to any person or organization +// obtaining a copy of the software and accompanying documentation covered by +// this license (the "Software") to use, reproduce, display, distribute, +// execute, and transmit the Software, and to prepare derivative works of the +// Software, and to permit third-parties to whom the Software is furnished to +// do so, all subject to the following: +// +// The copyright notices in the Software and this entire statement, including +// the above license grant, this restriction and the following disclaimer, +// must be included in all copies of the Software, in whole or in part, and +// all derivative works of the Software, unless such copies or derivative +// works are solely in the form of machine-executable object code generated by +// a source language processor. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// --------------------------------------------------------------------------- + +// Boost like configuration +// ------------------------ +#if defined __clang__ + + #if defined(i386) + #include + inline void spp_cpuid(int info[4], int InfoType) { + __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); + } + #endif + + #define SPP_POPCNT __builtin_popcount + #define SPP_POPCNT64 __builtin_popcountll + + #define SPP_HAS_CSTDINT + + #ifndef __has_extension + #define __has_extension __has_feature + #endif + + #if !__has_feature(cxx_exceptions) && !defined(SPP_NO_EXCEPTIONS) + #define SPP_NO_EXCEPTIONS + #endif + + #if !__has_feature(cxx_rtti) && !defined(SPP_NO_RTTI) + #define SPP_NO_RTTI + #endif + + #if !__has_feature(cxx_rtti) && !defined(SPP_NO_TYPEID) + #define SPP_NO_TYPEID + #endif + + #if defined(__int64) && !defined(__GNUC__) + #define SPP_HAS_MS_INT64 + #endif + + #define SPP_HAS_NRVO + + // Branch prediction hints + #if defined(__has_builtin) + #if __has_builtin(__builtin_expect) + #define SPP_LIKELY(x) __builtin_expect(x, 1) + #define SPP_UNLIKELY(x) __builtin_expect(x, 0) + #endif + #endif + + // Clang supports "long long" in all compilation modes. + #define SPP_HAS_LONG_LONG + + #if !__has_feature(cxx_constexpr) + #define SPP_NO_CXX11_CONSTEXPR + #endif + + #if !__has_feature(cxx_decltype) + #define SPP_NO_CXX11_DECLTYPE + #endif + + #if !__has_feature(cxx_decltype_incomplete_return_types) + #define SPP_NO_CXX11_DECLTYPE_N3276 + #endif + + #if !__has_feature(cxx_defaulted_functions) + #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS + #endif + + #if !__has_feature(cxx_deleted_functions) + #define SPP_NO_CXX11_DELETED_FUNCTIONS + #endif + + #if !__has_feature(cxx_explicit_conversions) + #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS + #endif + + #if !__has_feature(cxx_default_function_template_args) + #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS + #endif + + #if !__has_feature(cxx_generalized_initializers) + #define SPP_NO_CXX11_HDR_INITIALIZER_LIST + #endif + + #if !__has_feature(cxx_lambdas) + #define SPP_NO_CXX11_LAMBDAS + #endif + + #if !__has_feature(cxx_local_type_template_args) + #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS + #endif + + #if !__has_feature(cxx_raw_string_literals) + #define SPP_NO_CXX11_RAW_LITERALS + #endif + + #if !__has_feature(cxx_reference_qualified_functions) + #define SPP_NO_CXX11_REF_QUALIFIERS + #endif + + #if !__has_feature(cxx_generalized_initializers) + #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX + #endif + + #if !__has_feature(cxx_rvalue_references) + #define SPP_NO_CXX11_RVALUE_REFERENCES + #endif + + #if !__has_feature(cxx_static_assert) + #define SPP_NO_CXX11_STATIC_ASSERT + #endif + + #if !__has_feature(cxx_alias_templates) + #define SPP_NO_CXX11_TEMPLATE_ALIASES + #endif + + #if !__has_feature(cxx_variadic_templates) + #define SPP_NO_CXX11_VARIADIC_TEMPLATES + #endif + + #if !__has_feature(cxx_user_literals) + #define SPP_NO_CXX11_USER_DEFINED_LITERALS + #endif + + #if !__has_feature(cxx_alignas) + #define SPP_NO_CXX11_ALIGNAS + #endif + + #if !__has_feature(cxx_trailing_return) + #define SPP_NO_CXX11_TRAILING_RESULT_TYPES + #endif + + #if !__has_feature(cxx_inline_namespaces) + #define SPP_NO_CXX11_INLINE_NAMESPACES + #endif + + #if !__has_feature(cxx_override_control) + #define SPP_NO_CXX11_FINAL + #endif + + #if !(__has_feature(__cxx_binary_literals__) || __has_extension(__cxx_binary_literals__)) + #define SPP_NO_CXX14_BINARY_LITERALS + #endif + + #if !__has_feature(__cxx_decltype_auto__) + #define SPP_NO_CXX14_DECLTYPE_AUTO + #endif + + #if !__has_feature(__cxx_init_captures__) + #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES + #endif + + #if !__has_feature(__cxx_generic_lambdas__) + #define SPP_NO_CXX14_GENERIC_LAMBDAS + #endif + + + #if !__has_feature(__cxx_generic_lambdas__) || !__has_feature(__cxx_relaxed_constexpr__) + #define SPP_NO_CXX14_CONSTEXPR + #endif + + #if !__has_feature(__cxx_return_type_deduction__) + #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION + #endif + + #if !__has_feature(__cxx_variable_templates__) + #define SPP_NO_CXX14_VARIABLE_TEMPLATES + #endif + + #if __cplusplus < 201400 + #define SPP_NO_CXX14_DIGIT_SEPARATORS + #endif + + #if defined(__has_builtin) && __has_builtin(__builtin_unreachable) + #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable(); + #endif + + #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__)) + + #ifndef SPP_COMPILER + #define SPP_COMPILER "Clang version " __clang_version__ + #endif + + #define SPP_CLANG 1 + + +#elif defined __GNUC__ + + #define SPP_GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + + // definition to expand macro then apply to pragma message + // #define VALUE_TO_STRING(x) #x + // #define VALUE(x) VALUE_TO_STRING(x) + // #define VAR_NAME_VALUE(var) #var "=" VALUE(var) + // #pragma message(VAR_NAME_VALUE(SPP_GCC_VERSION)) + + #if defined(i386) + #include + inline void spp_cpuid(int info[4], int InfoType) { + __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]); + } + #endif + + // __POPCNT__ defined when the compiled with popcount support + // (-mpopcnt compiler option is given for example) + #ifdef __POPCNT__ + // slower unless compiled iwith -mpopcnt + #define SPP_POPCNT __builtin_popcount + #define SPP_POPCNT64 __builtin_popcountll + #endif + + #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L) + #define SPP_GCC_CXX11 + #endif + + #if __GNUC__ == 3 + #if defined (__PATHSCALE__) + #define SPP_NO_TWO_PHASE_NAME_LOOKUP + #define SPP_NO_IS_ABSTRACT + #endif + + #if __GNUC_MINOR__ < 4 + #define SPP_NO_IS_ABSTRACT + #endif + + #define SPP_NO_CXX11_EXTERN_TEMPLATE + #endif + + #if __GNUC__ < 4 + // + // All problems to gcc-3.x and earlier here: + // + #define SPP_NO_TWO_PHASE_NAME_LOOKUP + #ifdef __OPEN64__ + #define SPP_NO_IS_ABSTRACT + #endif + #endif + + // GCC prior to 3.4 had #pragma once too but it didn't work well with filesystem links + #if SPP_GCC_VERSION >= 30400 + #define SPP_HAS_PRAGMA_ONCE + #endif + + #if SPP_GCC_VERSION < 40400 + // Previous versions of GCC did not completely implement value-initialization: + // GCC Bug 30111, "Value-initialization of POD base class doesn't initialize + // members", reported by Jonathan Wakely in 2006, + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=30111 (fixed for GCC 4.4) + // GCC Bug 33916, "Default constructor fails to initialize array members", + // reported by Michael Elizabeth Chastain in 2007, + // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33916 (fixed for GCC 4.2.4) + // See also: http://www.boost.org/libs/utility/value_init.htm #compiler_issues + #define SPP_NO_COMPLETE_VALUE_INITIALIZATION + #endif + + #if !defined(__EXCEPTIONS) && !defined(SPP_NO_EXCEPTIONS) + #define SPP_NO_EXCEPTIONS + #endif + + // + // Threading support: Turn this on unconditionally here (except for + // those platforms where we can know for sure). It will get turned off again + // later if no threading API is detected. + // + #if !defined(__MINGW32__) && !defined(linux) && !defined(__linux) && !defined(__linux__) + #define SPP_HAS_THREADS + #endif + + // + // gcc has "long long" + // Except on Darwin with standard compliance enabled (-pedantic) + // Apple gcc helpfully defines this macro we can query + // + #if !defined(__DARWIN_NO_LONG_LONG) + #define SPP_HAS_LONG_LONG + #endif + + // + // gcc implements the named return value optimization since version 3.1 + // + #define SPP_HAS_NRVO + + // Branch prediction hints + #define SPP_LIKELY(x) __builtin_expect(x, 1) + #define SPP_UNLIKELY(x) __builtin_expect(x, 0) + + // + // Dynamic shared object (DSO) and dynamic-link library (DLL) support + // + #if __GNUC__ >= 4 + #if (defined(_WIN32) || defined(__WIN32__) || defined(WIN32)) && !defined(__CYGWIN__) + // All Win32 development environments, including 64-bit Windows and MinGW, define + // _WIN32 or one of its variant spellings. Note that Cygwin is a POSIX environment, + // so does not define _WIN32 or its variants. + #define SPP_HAS_DECLSPEC + #define SPP_SYMBOL_EXPORT __attribute__((__dllexport__)) + #define SPP_SYMBOL_IMPORT __attribute__((__dllimport__)) + #else + #define SPP_SYMBOL_EXPORT __attribute__((__visibility__("default"))) + #define SPP_SYMBOL_IMPORT + #endif + + #define SPP_SYMBOL_VISIBLE __attribute__((__visibility__("default"))) + #else + // config/platform/win32.hpp will define SPP_SYMBOL_EXPORT, etc., unless already defined + #define SPP_SYMBOL_EXPORT + #endif + + // + // RTTI and typeinfo detection is possible post gcc-4.3: + // + #if SPP_GCC_VERSION > 40300 + #ifndef __GXX_RTTI + #ifndef SPP_NO_TYPEID + #define SPP_NO_TYPEID + #endif + #ifndef SPP_NO_RTTI + #define SPP_NO_RTTI + #endif + #endif + #endif + + // + // Recent GCC versions have __int128 when in 64-bit mode. + // + // We disable this if the compiler is really nvcc with C++03 as it + // doesn't actually support __int128 as of CUDA_VERSION=7500 + // even though it defines __SIZEOF_INT128__. + // See https://svn.boost.org/trac/boost/ticket/8048 + // https://svn.boost.org/trac/boost/ticket/11852 + // Only re-enable this for nvcc if you're absolutely sure + // of the circumstances under which it's supported: + // + #if defined(__CUDACC__) + #if defined(SPP_GCC_CXX11) + #define SPP_NVCC_CXX11 + #else + #define SPP_NVCC_CXX03 + #endif + #endif + + #if defined(__SIZEOF_INT128__) && !defined(SPP_NVCC_CXX03) + #define SPP_HAS_INT128 + #endif + // + // Recent GCC versions have a __float128 native type, we need to + // include a std lib header to detect this - not ideal, but we'll + // be including later anyway when we select the std lib. + // + // Nevertheless, as of CUDA 7.5, using __float128 with the host + // compiler in pre-C++11 mode is still not supported. + // See https://svn.boost.org/trac/boost/ticket/11852 + // + #ifdef __cplusplus + #include + #else + #include + #endif + + #if defined(_GLIBCXX_USE_FLOAT128) && !defined(__STRICT_ANSI__) && !defined(SPP_NVCC_CXX03) + #define SPP_HAS_FLOAT128 + #endif + + // C++0x features in 4.3.n and later + // + #if (SPP_GCC_VERSION >= 40300) && defined(SPP_GCC_CXX11) + // C++0x features are only enabled when -std=c++0x or -std=gnu++0x are + // passed on the command line, which in turn defines + // __GXX_EXPERIMENTAL_CXX0X__. + #define SPP_HAS_DECLTYPE + #define SPP_HAS_RVALUE_REFS + #define SPP_HAS_STATIC_ASSERT + #define SPP_HAS_VARIADIC_TMPL + #define SPP_HAS_CSTDINT + #else + #define SPP_NO_CXX11_DECLTYPE + #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS + #define SPP_NO_CXX11_RVALUE_REFERENCES + #define SPP_NO_CXX11_STATIC_ASSERT + #endif + + // C++0x features in 4.4.n and later + // + #if (SPP_GCC_VERSION < 40400) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_AUTO_DECLARATIONS + #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS + #define SPP_NO_CXX11_CHAR16_T + #define SPP_NO_CXX11_CHAR32_T + #define SPP_NO_CXX11_HDR_INITIALIZER_LIST + #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS + #define SPP_NO_CXX11_DELETED_FUNCTIONS + #define SPP_NO_CXX11_TRAILING_RESULT_TYPES + #define SPP_NO_CXX11_INLINE_NAMESPACES + #define SPP_NO_CXX11_VARIADIC_TEMPLATES + #endif + + #if SPP_GCC_VERSION < 40500 + #define SPP_NO_SFINAE_EXPR + #endif + + // GCC 4.5 forbids declaration of defaulted functions in private or protected sections + #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 5) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_NON_PUBLIC_DEFAULTED_FUNCTIONS + #endif + + // C++0x features in 4.5.0 and later + // + #if (SPP_GCC_VERSION < 40500) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS + #define SPP_NO_CXX11_LAMBDAS + #define SPP_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS + #define SPP_NO_CXX11_RAW_LITERALS + #endif + + // C++0x features in 4.6.n and later + // + #if (SPP_GCC_VERSION < 40600) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_CONSTEXPR + #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX + #endif + + // C++0x features in 4.7.n and later + // + #if (SPP_GCC_VERSION < 40700) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_FINAL + #define SPP_NO_CXX11_TEMPLATE_ALIASES + #define SPP_NO_CXX11_USER_DEFINED_LITERALS + #define SPP_NO_CXX11_FIXED_LENGTH_VARIADIC_TEMPLATE_EXPANSION_PACKS + #endif + + // C++0x features in 4.8.n and later + // + #if (SPP_GCC_VERSION < 40800) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_ALIGNAS + #endif + + // C++0x features in 4.8.1 and later + // + #if (SPP_GCC_VERSION < 40801) || !defined(SPP_GCC_CXX11) + #define SPP_NO_CXX11_DECLTYPE_N3276 + #define SPP_NO_CXX11_REF_QUALIFIERS + #define SPP_NO_CXX14_BINARY_LITERALS + #endif + + // C++14 features in 4.9.0 and later + // + #if (SPP_GCC_VERSION < 40900) || (__cplusplus < 201300) + #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION + #define SPP_NO_CXX14_GENERIC_LAMBDAS + #define SPP_NO_CXX14_DIGIT_SEPARATORS + #define SPP_NO_CXX14_DECLTYPE_AUTO + #if !((SPP_GCC_VERSION >= 40801) && (SPP_GCC_VERSION < 40900) && defined(SPP_GCC_CXX11)) + #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES + #endif + #endif + + + // C++ 14: + #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304) + #define SPP_NO_CXX14_CONSTEXPR + #endif + #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304) + #define SPP_NO_CXX14_VARIABLE_TEMPLATES + #endif + + // + // Unused attribute: + #if __GNUC__ >= 4 + #define SPP_ATTRIBUTE_UNUSED __attribute__((__unused__)) + #endif + // + // __builtin_unreachable: + #if SPP_GCC_VERSION >= 40800 + #define SPP_UNREACHABLE_RETURN(x) __builtin_unreachable(); + #endif + + #ifndef SPP_COMPILER + #define SPP_COMPILER "GNU C++ version " __VERSION__ + #endif + + // ConceptGCC compiler: + // http://www.generic-programming.org/software/ConceptGCC/ + #ifdef __GXX_CONCEPTS__ + #define SPP_HAS_CONCEPTS + #define SPP_COMPILER "ConceptGCC version " __VERSION__ + #endif + +#elif defined _MSC_VER + + #include // for __popcnt() + + #define SPP_POPCNT_CHECK // slower when defined, but we have to check! + #define spp_cpuid(info, x) __cpuid(info, x) + + #define SPP_POPCNT __popcnt + #if (SPP_GROUP_SIZE == 64 && INTPTR_MAX == INT64_MAX) + #define SPP_POPCNT64 __popcnt64 + #endif + + // Attempt to suppress VC6 warnings about the length of decorated names (obsolete): + #pragma warning( disable : 4503 ) // warning: decorated name length exceeded + + #define SPP_HAS_PRAGMA_ONCE + #define SPP_HAS_CSTDINT + + // + // versions check: + // we don't support Visual C++ prior to version 7.1: + #if _MSC_VER < 1310 + #error "Antique compiler not supported" + #endif + + #if _MSC_FULL_VER < 180020827 + #define SPP_NO_FENV_H + #endif + + #if _MSC_VER < 1400 + // although a conforming signature for swprint exists in VC7.1 + // it appears not to actually work: + #define SPP_NO_SWPRINTF + + // Our extern template tests also fail for this compiler: + #define SPP_NO_CXX11_EXTERN_TEMPLATE + + // Variadic macros do not exist for VC7.1 and lower + #define SPP_NO_CXX11_VARIADIC_MACROS + #endif + + #if _MSC_VER < 1500 // 140X == VC++ 8.0 + #undef SPP_HAS_CSTDINT + #define SPP_NO_MEMBER_TEMPLATE_FRIENDS + #endif + + #if _MSC_VER < 1600 // 150X == VC++ 9.0 + // A bug in VC9: + #define SPP_NO_ADL_BARRIER + #endif + + + // MSVC (including the latest checked version) has not yet completely + // implemented value-initialization, as is reported: + // "VC++ does not value-initialize members of derived classes without + // user-declared constructor", reported in 2009 by Sylvester Hesp: + // https: //connect.microsoft.com/VisualStudio/feedback/details/484295 + // "Presence of copy constructor breaks member class initialization", + // reported in 2009 by Alex Vakulenko: + // https: //connect.microsoft.com/VisualStudio/feedback/details/499606 + // "Value-initialization in new-expression", reported in 2005 by + // Pavel Kuznetsov (MetaCommunications Engineering): + // https: //connect.microsoft.com/VisualStudio/feedback/details/100744 + // See also: http: //www.boost.org/libs/utility/value_init.htm #compiler_issues + // (Niels Dekker, LKEB, May 2010) + #define SPP_NO_COMPLETE_VALUE_INITIALIZATION + + #ifndef _NATIVE_WCHAR_T_DEFINED + #define SPP_NO_INTRINSIC_WCHAR_T + #endif + + // + // check for exception handling support: + #if !defined(_CPPUNWIND) && !defined(SPP_NO_EXCEPTIONS) + #define SPP_NO_EXCEPTIONS + #endif + + // + // __int64 support: + // + #define SPP_HAS_MS_INT64 + #if defined(_MSC_EXTENSIONS) || (_MSC_VER >= 1400) + #define SPP_HAS_LONG_LONG + #else + #define SPP_NO_LONG_LONG + #endif + + #if (_MSC_VER >= 1400) && !defined(_DEBUG) + #define SPP_HAS_NRVO + #endif + + #if _MSC_VER >= 1500 // 150X == VC++ 9.0 + #define SPP_HAS_PRAGMA_DETECT_MISMATCH + #endif + + // + // disable Win32 API's if compiler extensions are + // turned off: + // + #if !defined(_MSC_EXTENSIONS) && !defined(SPP_DISABLE_WIN32) + #define SPP_DISABLE_WIN32 + #endif + + #if !defined(_CPPRTTI) && !defined(SPP_NO_RTTI) + #define SPP_NO_RTTI + #endif + + // + // TR1 features: + // + #if _MSC_VER >= 1700 + // #define SPP_HAS_TR1_HASH // don't know if this is true yet. + // #define SPP_HAS_TR1_TYPE_TRAITS // don't know if this is true yet. + #define SPP_HAS_TR1_UNORDERED_MAP + #define SPP_HAS_TR1_UNORDERED_SET + #endif + + // + // C++0x features + // + // See above for SPP_NO_LONG_LONG + + // C++ features supported by VC++ 10 (aka 2010) + // + #if _MSC_VER < 1600 + #define SPP_NO_CXX11_AUTO_DECLARATIONS + #define SPP_NO_CXX11_AUTO_MULTIDECLARATIONS + #define SPP_NO_CXX11_LAMBDAS + #define SPP_NO_CXX11_RVALUE_REFERENCES + #define SPP_NO_CXX11_STATIC_ASSERT + #define SPP_NO_CXX11_DECLTYPE + #endif // _MSC_VER < 1600 + + #if _MSC_VER >= 1600 + #define SPP_HAS_STDINT_H + #endif + + // C++11 features supported by VC++ 11 (aka 2012) + // + #if _MSC_VER < 1700 + #define SPP_NO_CXX11_FINAL + #endif // _MSC_VER < 1700 + + // C++11 features supported by VC++ 12 (aka 2013). + // + #if _MSC_FULL_VER < 180020827 + #define SPP_NO_CXX11_DEFAULTED_FUNCTIONS + #define SPP_NO_CXX11_DELETED_FUNCTIONS + #define SPP_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS + #define SPP_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS + #define SPP_NO_CXX11_RAW_LITERALS + #define SPP_NO_CXX11_TEMPLATE_ALIASES + #define SPP_NO_CXX11_TRAILING_RESULT_TYPES + #define SPP_NO_CXX11_VARIADIC_TEMPLATES + #define SPP_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX + #define SPP_NO_CXX11_DECLTYPE_N3276 + #endif + + // C++11 features supported by VC++ 14 (aka 2014) CTP1 + #if (_MSC_FULL_VER < 190021730) + #define SPP_NO_CXX11_REF_QUALIFIERS + #define SPP_NO_CXX11_USER_DEFINED_LITERALS + #define SPP_NO_CXX11_ALIGNAS + #define SPP_NO_CXX11_INLINE_NAMESPACES + #define SPP_NO_CXX14_DECLTYPE_AUTO + #define SPP_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES + #define SPP_NO_CXX14_RETURN_TYPE_DEDUCTION + #define SPP_NO_CXX11_HDR_INITIALIZER_LIST + #endif + + // C++11 features not supported by any versions + #define SPP_NO_CXX11_CHAR16_T + #define SPP_NO_CXX11_CHAR32_T + #define SPP_NO_CXX11_CONSTEXPR + #define SPP_NO_SFINAE_EXPR + #define SPP_NO_TWO_PHASE_NAME_LOOKUP + + // C++ 14: + #if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304) + #define SPP_NO_CXX14_BINARY_LITERALS + #endif + + #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304) + #define SPP_NO_CXX14_CONSTEXPR + #endif + + #if (__cplusplus < 201304) // There's no SD6 check for this.... + #define SPP_NO_CXX14_DIGIT_SEPARATORS + #endif + + #if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304) + #define SPP_NO_CXX14_GENERIC_LAMBDAS + #endif + + #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304) + #define SPP_NO_CXX14_VARIABLE_TEMPLATES + #endif + +#endif + +// from boost/config/suffix.hpp +// ---------------------------- +#ifndef SPP_ATTRIBUTE_UNUSED + #define SPP_ATTRIBUTE_UNUSED +#endif + +/* + Try to persuade compilers to inline. +*/ +#ifndef SPP_FORCEINLINE + #if defined(__GNUC__) + #define SPP_FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define SPP_FORCEINLINE __forceinline + #else + #define SPP_FORCEINLINE inline + #endif +#endif + + +#endif // spp_config_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h b/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h new file mode 100755 index 000000000..8e063fbab --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_dlalloc.h @@ -0,0 +1,4023 @@ +#ifndef spp_dlalloc__h_ +#define spp_dlalloc__h_ + +/* This is a C++ allocator created from Doug Lea's dlmalloc + (Version 2.8.6 Wed Aug 29 06:57:58 2012) + see: http://g.oswego.edu/dl/html/malloc.html +*/ + +#include +#include + + +#ifndef SPP_FORCEINLINE + #if defined(__GNUC__) + #define SPP_FORCEINLINE __inline __attribute__ ((always_inline)) + #elif defined(_MSC_VER) + #define SPP_FORCEINLINE __forceinline + #else + #define SPP_FORCEINLINE inline + #endif +#endif + + +#ifndef SPP_IMPL + #define SPP_IMPL SPP_FORCEINLINE +#endif + +#ifndef SPP_API + #define SPP_API static +#endif + + +namespace spp +{ + // ---------------------- allocator internal API ----------------------- + typedef void* mspace; + + /* + create_mspace creates and returns a new independent space with the + given initial capacity, or, if 0, the default granularity size. It + returns null if there is no system memory available to create the + space. If argument locked is non-zero, the space uses a separate + lock to control access. The capacity of the space will grow + dynamically as needed to service mspace_malloc requests. You can + control the sizes of incremental increases of this space by + compiling with a different SPP_DEFAULT_GRANULARITY or dynamically + setting with mallopt(M_GRANULARITY, value). + */ + SPP_API mspace create_mspace(size_t capacity, int locked); + SPP_API size_t destroy_mspace(mspace msp); + SPP_API void* mspace_malloc(mspace msp, size_t bytes); + SPP_API void mspace_free(mspace msp, void* mem); + SPP_API void* mspace_realloc(mspace msp, void* mem, size_t newsize); + +#if 0 + SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked); + SPP_API int mspace_track_large_chunks(mspace msp, int enable); + SPP_API void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); + SPP_API void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); + SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]); + SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]); + SPP_API size_t mspace_footprint(mspace msp); + SPP_API size_t mspace_max_footprint(mspace msp); + SPP_API size_t mspace_usable_size(const void* mem); + SPP_API int mspace_trim(mspace msp, size_t pad); + SPP_API int mspace_mallopt(int, int); +#endif + + // ----------------------------------------------------------- + // ----------------------------------------------------------- + template + class spp_allocator + { + public: + typedef T value_type; + typedef T* pointer; + typedef ptrdiff_t difference_type; + typedef const T* const_pointer; + typedef size_t size_type; + + spp_allocator() : _space(new MSpace) {} + + void swap(spp_allocator &o) + { + std::swap(_space, o._space); + } + + pointer allocate(size_t n, const_pointer /* unused */ = 0) + { + pointer res = static_cast(mspace_malloc(_space->_sp, n * sizeof(T))); + if (!res) + throw std::bad_alloc(); + return res; + } + + void deallocate(pointer p, size_t /* unused */) + { + mspace_free(_space->_sp, p); + } + + pointer reallocate(pointer p, size_t new_size) + { + pointer res = static_cast(mspace_realloc(_space->_sp, p, new_size * sizeof(T))); + if (!res) + throw std::bad_alloc(); + return res; + } + + size_type max_size() const + { + return static_cast(-1) / sizeof(value_type); + } + + void construct(pointer p, const value_type& val) + { + new (p) value_type(val); + } + + void destroy(pointer p) { p->~value_type(); } + + template + struct rebind + { + // rebind to libc_allocator because we want to use malloc_inspect_all in destructive_iterator + // to reduce peak memory usage (we don't want mixed with value_type when + // we traverse the allocated memory). + typedef spp::spp_allocator other; + }; + + mspace space() const { return _space->_sp; } + + // check if we can clear the whole allocator memory at once => works only if the allocator + // is not be shared. If can_clear() returns true, we expect that the next allocator call + // will be clear() - not allocate() or deallocate() + bool can_clear() + { + assert(!_space_to_clear); + _space_to_clear.reset(); + _space_to_clear.swap(_space); + if (_space_to_clear->count() == 1) + return true; + else + _space_to_clear.swap(_space); + return false; + } + + void clear() + { + assert(!_space && _space_to_clear); + _space_to_clear.reset(); + _space = new MSpace; + } + + private: + struct MSpace : public spp_rc + { + MSpace() : + _sp(create_mspace(0, 0)) + {} + + ~MSpace() + { + destroy_mspace(_sp); + } + + mspace _sp; + }; + + spp_sptr _space; + spp_sptr _space_to_clear; + }; +} + + +// allocators are "equal" whenever memory allocated with one can be deallocated with the other +template +inline bool operator==(const spp_::spp_allocator &a, const spp_::spp_allocator &b) +{ + return a.space() == b.space(); +} + +template +inline bool operator!=(const spp_::spp_allocator &a, const spp_::spp_allocator &b) +{ + return !(a == b); +} + +namespace std +{ + template + inline void swap(spp_::spp_allocator &a, spp_::spp_allocator &b) + { + a.swap(b); + } +} + +#if !defined(SPP_EXCLUDE_IMPLEMENTATION) + +#ifndef WIN32 + #ifdef _WIN32 + #define WIN32 1 + #endif + #ifdef _WIN32_WCE + #define SPP_LACKS_FCNTL_H + #define WIN32 1 + #endif +#endif + +#ifdef WIN32 + #define WIN32_LEAN_AND_MEAN + #include + #include + #define SPP_HAVE_MMAP 1 + #define SPP_LACKS_UNISTD_H + #define SPP_LACKS_SYS_PARAM_H + #define SPP_LACKS_SYS_MMAN_H + #define SPP_LACKS_STRING_H + #define SPP_LACKS_STRINGS_H + #define SPP_LACKS_SYS_TYPES_H + #define SPP_LACKS_ERRNO_H + #define SPP_LACKS_SCHED_H + #ifndef SPP_MALLOC_FAILURE_ACTION + #define SPP_MALLOC_FAILURE_ACTION + #endif + #ifndef SPP_MMAP_CLEARS + #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ + #define SPP_MMAP_CLEARS 0 + #else + #define SPP_MMAP_CLEARS 1 + #endif + #endif +#endif + +#if defined(DARWIN) || defined(_DARWIN) + #define SPP_HAVE_MMAP 1 + /* OSX allocators provide 16 byte alignment */ + #ifndef SPP_MALLOC_ALIGNMENT + #define SPP_MALLOC_ALIGNMENT ((size_t)16U) + #endif +#endif + +#ifndef SPP_LACKS_SYS_TYPES_H + #include /* For size_t */ +#endif + +#ifndef SPP_MALLOC_ALIGNMENT + #define SPP_MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) +#endif + +/* ------------------- size_t and alignment properties -------------------- */ +static const size_t spp_max_size_t = ~(size_t)0; +static const size_t spp_size_t_bitsize = sizeof(size_t) << 3; +static const size_t spp_half_max_size_t = spp_max_size_t / 2U; +static const size_t spp_chunk_align_mask = SPP_MALLOC_ALIGNMENT - 1; + +#if defined(SPP_DEBUG) || !defined(NDEBUG) +static bool spp_is_aligned(void *p) { return ((size_t)p & spp_chunk_align_mask) == 0; } +#endif + +// the number of bytes to offset an address to align it +static size_t align_offset(void *p) +{ + return (((size_t)p & spp_chunk_align_mask) == 0) ? 0 : + ((SPP_MALLOC_ALIGNMENT - ((size_t)p & spp_chunk_align_mask)) & spp_chunk_align_mask); +} + + +#ifndef SPP_FOOTERS + #define SPP_FOOTERS 0 +#endif + +#ifndef SPP_ABORT + #define SPP_ABORT abort() +#endif + +#ifndef SPP_ABORT_ON_ASSERT_FAILURE + #define SPP_ABORT_ON_ASSERT_FAILURE 1 +#endif + +#ifndef SPP_PROCEED_ON_ERROR + #define SPP_PROCEED_ON_ERROR 0 +#endif + +#ifndef SPP_INSECURE + #define SPP_INSECURE 0 +#endif + +#ifndef SPP_MALLOC_INSPECT_ALL + #define SPP_MALLOC_INSPECT_ALL 0 +#endif + +#ifndef SPP_HAVE_MMAP + #define SPP_HAVE_MMAP 1 +#endif + +#ifndef SPP_MMAP_CLEARS + #define SPP_MMAP_CLEARS 1 +#endif + +#ifndef SPP_HAVE_MREMAP + #ifdef linux + #define SPP_HAVE_MREMAP 1 + #ifndef _GNU_SOURCE + #define _GNU_SOURCE /* Turns on mremap() definition */ + #endif + #else + #define SPP_HAVE_MREMAP 0 + #endif +#endif + +#ifndef SPP_MALLOC_FAILURE_ACTION + #define SPP_MALLOC_FAILURE_ACTION errno = ENOMEM +#endif + + +#ifndef SPP_DEFAULT_GRANULARITY + #if defined(WIN32) + #define SPP_DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ + #else + #define SPP_DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) + #endif +#endif + +#ifndef SPP_DEFAULT_TRIM_THRESHOLD + #define SPP_DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) +#endif + +#ifndef SPP_DEFAULT_MMAP_THRESHOLD + #if SPP_HAVE_MMAP + #define SPP_DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) + #else + #define SPP_DEFAULT_MMAP_THRESHOLD spp_max_size_t + #endif +#endif + +#ifndef SPP_MAX_RELEASE_CHECK_RATE + #if SPP_HAVE_MMAP + #define SPP_MAX_RELEASE_CHECK_RATE 4095 + #else + #define SPP_MAX_RELEASE_CHECK_RATE spp_max_size_t + #endif +#endif + +#ifndef SPP_USE_BUILTIN_FFS + #define SPP_USE_BUILTIN_FFS 0 +#endif + +#ifndef SPP_USE_DEV_RANDOM + #define SPP_USE_DEV_RANDOM 0 +#endif + +#ifndef SPP_NO_SEGMENT_TRAVERSAL + #define SPP_NO_SEGMENT_TRAVERSAL 0 +#endif + + + +/*------------------------------ internal #includes ---------------------- */ + +#ifdef _MSC_VER + #pragma warning( disable : 4146 ) /* no "unsigned" warnings */ +#endif +#ifndef SPP_LACKS_ERRNO_H + #include /* for SPP_MALLOC_FAILURE_ACTION */ +#endif + +#ifdef SPP_DEBUG + #if SPP_ABORT_ON_ASSERT_FAILURE + #undef assert + #define assert(x) if(!(x)) SPP_ABORT + #else + #include + #endif +#else + #ifndef assert + #define assert(x) + #endif + #define SPP_DEBUG 0 +#endif + +#if !defined(WIN32) && !defined(SPP_LACKS_TIME_H) + #include /* for magic initialization */ +#endif + +#ifndef SPP_LACKS_STDLIB_H + #include /* for abort() */ +#endif + +#ifndef SPP_LACKS_STRING_H + #include /* for memset etc */ +#endif + +#if SPP_USE_BUILTIN_FFS + #ifndef SPP_LACKS_STRINGS_H + #include /* for ffs */ + #endif +#endif + +#if SPP_HAVE_MMAP + #ifndef SPP_LACKS_SYS_MMAN_H + /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ + #if (defined(linux) && !defined(__USE_GNU)) + #define __USE_GNU 1 + #include /* for mmap */ + #undef __USE_GNU + #else + #include /* for mmap */ + #endif + #endif + #ifndef SPP_LACKS_FCNTL_H + #include + #endif +#endif + +#ifndef SPP_LACKS_UNISTD_H + #include /* for sbrk, sysconf */ +#else + #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) + extern void* sbrk(ptrdiff_t); + #endif +#endif + +#include + +namespace spp +{ + +/* Declarations for bit scanning on win32 */ +#if defined(_MSC_VER) && _MSC_VER>=1300 + #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ + extern "C" { + unsigned char _BitScanForward(unsigned long *index, unsigned long mask); + unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); + } + + #define BitScanForward _BitScanForward + #define BitScanReverse _BitScanReverse + #pragma intrinsic(_BitScanForward) + #pragma intrinsic(_BitScanReverse) + #endif /* BitScanForward */ +#endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ + +#ifndef WIN32 + #ifndef malloc_getpagesize + #ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ + #ifndef _SC_PAGE_SIZE + #define _SC_PAGE_SIZE _SC_PAGESIZE + #endif + #endif + #ifdef _SC_PAGE_SIZE + #define malloc_getpagesize sysconf(_SC_PAGE_SIZE) + #else + #if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) + extern size_t getpagesize(); + #define malloc_getpagesize getpagesize() + #else + #ifdef WIN32 /* use supplied emulation of getpagesize */ + #define malloc_getpagesize getpagesize() + #else + #ifndef SPP_LACKS_SYS_PARAM_H + #include + #endif + #ifdef EXEC_PAGESIZE + #define malloc_getpagesize EXEC_PAGESIZE + #else + #ifdef NBPG + #ifndef CLSIZE + #define malloc_getpagesize NBPG + #else + #define malloc_getpagesize (NBPG * CLSIZE) + #endif + #else + #ifdef NBPC + #define malloc_getpagesize NBPC + #else + #ifdef PAGESIZE + #define malloc_getpagesize PAGESIZE + #else /* just guess */ + #define malloc_getpagesize ((size_t)4096U) + #endif + #endif + #endif + #endif + #endif + #endif + #endif + #endif +#endif + +/* -------------------------- MMAP preliminaries ------------------------- */ + +/* + If SPP_HAVE_MORECORE or SPP_HAVE_MMAP are false, we just define calls and + checks to fail so compiler optimizer can delete code rather than + using so many "#if"s. +*/ + + +/* MMAP must return mfail on failure */ +static void *mfail = (void*)spp_max_size_t; +static char *cmfail = (char*)mfail; + +#if SPP_HAVE_MMAP + +#ifndef WIN32 + #define SPP_MUNMAP_DEFAULT(a, s) munmap((a), (s)) + #define SPP_MMAP_PROT (PROT_READ | PROT_WRITE) + #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) + #define MAP_ANONYMOUS MAP_ANON + #endif + + #ifdef MAP_ANONYMOUS + #define SPP_MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) + #define SPP_MMAP_DEFAULT(s) mmap(0, (s), SPP_MMAP_PROT, SPP_MMAP_FLAGS, -1, 0) + #else /* MAP_ANONYMOUS */ + /* + Nearly all versions of mmap support MAP_ANONYMOUS, so the following + is unlikely to be needed, but is supplied just in case. + */ + #define SPP_MMAP_FLAGS (MAP_PRIVATE) + static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ + void SPP_MMAP_DEFAULT(size_t s) + { + if (dev_zero_fd < 0) + dev_zero_fd = open("/dev/zero", O_RDWR); + mmap(0, s, SPP_MMAP_PROT, SPP_MMAP_FLAGS, dev_zero_fd, 0); + } + #endif /* MAP_ANONYMOUS */ + + #define SPP_DIRECT_MMAP_DEFAULT(s) SPP_MMAP_DEFAULT(s) + +#else /* WIN32 */ + + /* Win32 MMAP via VirtualAlloc */ + static SPP_FORCEINLINE void* win32mmap(size_t size) + { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + return (ptr != 0) ? ptr : mfail; + } + + /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ + static SPP_FORCEINLINE void* win32direct_mmap(size_t size) + { + void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, + PAGE_READWRITE); + return (ptr != 0) ? ptr : mfail; + } + + /* This function supports releasing coalesed segments */ + static SPP_FORCEINLINE int win32munmap(void* ptr, size_t size) + { + MEMORY_BASIC_INFORMATION minfo; + char* cptr = (char*)ptr; + while (size) + { + if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) + return -1; + if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || + minfo.State != MEM_COMMIT || minfo.RegionSize > size) + return -1; + if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) + return -1; + cptr += minfo.RegionSize; + size -= minfo.RegionSize; + } + return 0; + } + + #define SPP_MMAP_DEFAULT(s) win32mmap(s) + #define SPP_MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) + #define SPP_DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) +#endif /* WIN32 */ +#endif /* SPP_HAVE_MMAP */ + +#if SPP_HAVE_MREMAP + #ifndef WIN32 + #define SPP_MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) + #endif +#endif + +/** + * Define SPP_CALL_MMAP/SPP_CALL_MUNMAP/SPP_CALL_DIRECT_MMAP + */ +#if SPP_HAVE_MMAP + #define USE_MMAP_BIT 1 + + #ifdef SPP_MMAP + #define SPP_CALL_MMAP(s) SPP_MMAP(s) + #else + #define SPP_CALL_MMAP(s) SPP_MMAP_DEFAULT(s) + #endif + + #ifdef SPP_MUNMAP + #define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP((a), (s)) + #else + #define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP_DEFAULT((a), (s)) + #endif + + #ifdef SPP_DIRECT_MMAP + #define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP(s) + #else + #define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP_DEFAULT(s) + #endif + +#else /* SPP_HAVE_MMAP */ + #define USE_MMAP_BIT 0 + + #define SPP_MMAP(s) mfail + #define SPP_MUNMAP(a, s) (-1) + #define SPP_DIRECT_MMAP(s) mfail + #define SPP_CALL_DIRECT_MMAP(s) SPP_DIRECT_MMAP(s) + #define SPP_CALL_MMAP(s) SPP_MMAP(s) + #define SPP_CALL_MUNMAP(a, s) SPP_MUNMAP((a), (s)) +#endif + +/** + * Define SPP_CALL_MREMAP + */ +#if SPP_HAVE_MMAP && SPP_HAVE_MREMAP + #ifdef MREMAP + #define SPP_CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) + #else + #define SPP_CALL_MREMAP(addr, osz, nsz, mv) SPP_MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) + #endif +#else + #define SPP_CALL_MREMAP(addr, osz, nsz, mv) mfail +#endif + +/* mstate bit set if continguous morecore disabled or failed */ +static const unsigned USE_NONCONTIGUOUS_BIT = 4U; + +/* segment bit set in create_mspace_with_base */ +static const unsigned EXTERN_BIT = 8U; + + +/* --------------------------- flags ------------------------ */ + +static const unsigned PINUSE_BIT = 1; +static const unsigned CINUSE_BIT = 2; +static const unsigned FLAG4_BIT = 4; +static const unsigned INUSE_BITS = (PINUSE_BIT | CINUSE_BIT); +static const unsigned FLAG_BITS = (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT); + +/* ------------------- Chunks sizes and alignments ----------------------- */ + +#if SPP_FOOTERS + static const unsigned CHUNK_OVERHEAD = 2 * sizeof(size_t); +#else + static const unsigned CHUNK_OVERHEAD = sizeof(size_t); +#endif + +/* MMapped chunks need a second word of overhead ... */ +static const unsigned SPP_MMAP_CHUNK_OVERHEAD = 2 * sizeof(size_t); + +/* ... and additional padding for fake next-chunk at foot */ +static const unsigned SPP_MMAP_FOOT_PAD = 4 * sizeof(size_t); + +// =============================================================================== +struct malloc_chunk_header +{ + void set_size_and_pinuse_of_free_chunk(size_t s) + { + _head = s | PINUSE_BIT; + set_foot(s); + } + + void set_foot(size_t s) + { + ((malloc_chunk_header *)((char*)this + s))->_prev_foot = s; + } + + // extraction of fields from head words + bool cinuse() const { return !!(_head & CINUSE_BIT); } + bool pinuse() const { return !!(_head & PINUSE_BIT); } + bool flag4inuse() const { return !!(_head & FLAG4_BIT); } + bool is_inuse() const { return (_head & INUSE_BITS) != PINUSE_BIT; } + bool is_mmapped() const { return (_head & INUSE_BITS) == 0; } + + size_t chunksize() const { return _head & ~(FLAG_BITS); } + + void clear_pinuse() { _head &= ~PINUSE_BIT; } + void set_flag4() { _head |= FLAG4_BIT; } + void clear_flag4() { _head &= ~FLAG4_BIT; } + + // Treat space at ptr +/- offset as a chunk + malloc_chunk_header * chunk_plus_offset(size_t s) + { + return (malloc_chunk_header *)((char*)this + s); + } + malloc_chunk_header * chunk_minus_offset(size_t s) + { + return (malloc_chunk_header *)((char*)this - s); + } + + // Ptr to next or previous physical malloc_chunk. + malloc_chunk_header * next_chunk() + { + return (malloc_chunk_header *)((char*)this + (_head & ~FLAG_BITS)); + } + malloc_chunk_header * prev_chunk() + { + return (malloc_chunk_header *)((char*)this - (_prev_foot)); + } + + // extract next chunk's pinuse bit + size_t next_pinuse() { return next_chunk()->_head & PINUSE_BIT; } + + size_t _prev_foot; // Size of previous chunk (if free). + size_t _head; // Size and inuse bits. +}; + +// =============================================================================== +struct malloc_chunk : public malloc_chunk_header +{ + // Set size, pinuse bit, foot, and clear next pinuse + void set_free_with_pinuse(size_t s, malloc_chunk* n) + { + n->clear_pinuse(); + set_size_and_pinuse_of_free_chunk(s); + } + + // Get the internal overhead associated with chunk p + size_t overhead_for() { return is_mmapped() ? SPP_MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD; } + + // Return true if malloced space is not necessarily cleared + bool calloc_must_clear() + { +#if SPP_MMAP_CLEARS + return !is_mmapped(); +#else + return true; +#endif + } + + struct malloc_chunk* _fd; // double links -- used only if free. + struct malloc_chunk* _bk; +}; + +static const unsigned MCHUNK_SIZE = sizeof(malloc_chunk); + +/* The smallest size we can malloc is an aligned minimal chunk */ +static const unsigned MIN_CHUNK_SIZE = (MCHUNK_SIZE + spp_chunk_align_mask) & ~spp_chunk_align_mask; + +typedef malloc_chunk mchunk; +typedef malloc_chunk* mchunkptr; +typedef malloc_chunk_header *hchunkptr; +typedef malloc_chunk* sbinptr; // The type of bins of chunks +typedef unsigned int bindex_t; // Described below +typedef unsigned int binmap_t; // Described below +typedef unsigned int flag_t; // The type of various bit flag sets + +// conversion from malloc headers to user pointers, and back +static SPP_FORCEINLINE void *chunk2mem(const void *p) { return (void *)((char *)p + 2 * sizeof(size_t)); } +static SPP_FORCEINLINE mchunkptr mem2chunk(const void *mem) { return (mchunkptr)((char *)mem - 2 * sizeof(size_t)); } + +// chunk associated with aligned address A +static SPP_FORCEINLINE mchunkptr align_as_chunk(char *A) { return (mchunkptr)(A + align_offset(chunk2mem(A))); } + +// Bounds on request (not chunk) sizes. +static const unsigned MAX_REQUEST = (-MIN_CHUNK_SIZE) << 2; +static const unsigned MIN_REQUEST = MIN_CHUNK_SIZE - CHUNK_OVERHEAD - 1; + +// pad request bytes into a usable size +static SPP_FORCEINLINE size_t pad_request(size_t req) +{ + return (req + CHUNK_OVERHEAD + spp_chunk_align_mask) & ~spp_chunk_align_mask; +} + +// pad request, checking for minimum (but not maximum) +static SPP_FORCEINLINE size_t request2size(size_t req) +{ + return req < MIN_REQUEST ? MIN_CHUNK_SIZE : pad_request(req); +} + + +/* ------------------ Operations on head and foot fields ----------------- */ + +/* + The head field of a chunk is or'ed with PINUSE_BIT when previous + adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in + use, unless mmapped, in which case both bits are cleared. + + FLAG4_BIT is not used by this malloc, but might be useful in extensions. +*/ + +// Head value for fenceposts +static const unsigned FENCEPOST_HEAD = INUSE_BITS | sizeof(size_t); + + +/* ---------------------- Overlaid data structures ----------------------- */ + +/* + When chunks are not in use, they are treated as nodes of either + lists or trees. + + "Small" chunks are stored in circular doubly-linked lists, and look + like this: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk in list | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space (may be 0 bytes long) . + . . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Larger chunks are kept in a form of bitwise digital trees (aka + tries) keyed on chunksizes. Because malloc_tree_chunks are only for + free chunks greater than 256 bytes, their size doesn't impose any + constraints on user chunk sizes. Each node looks like: + + chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Size of previous chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `head:' | Size of chunk, in bytes |P| + mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Forward pointer to next chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Back pointer to previous chunk of same size | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to left child (child[0]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to right child (child[1]) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Pointer to parent | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | bin index of this chunk | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Unused space . + . | +nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + `foot:' | Size of chunk, in bytes | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + Each tree holding treenodes is a tree of unique chunk sizes. Chunks + of the same size are arranged in a circularly-linked list, with only + the oldest chunk (the next to be used, in our FIFO ordering) + actually in the tree. (Tree members are distinguished by a non-null + parent pointer.) If a chunk with the same size an an existing node + is inserted, it is linked off the existing node using pointers that + work in the same way as fd/bk pointers of small chunks. + + Each tree contains a power of 2 sized range of chunk sizes (the + smallest is 0x100 <= x < 0x180), which is is divided in half at each + tree level, with the chunks in the smaller half of the range (0x100 + <= x < 0x140 for the top nose) in the left subtree and the larger + half (0x140 <= x < 0x180) in the right subtree. This is, of course, + done by inspecting individual bits. + + Using these rules, each node's left subtree contains all smaller + sizes than its right subtree. However, the node at the root of each + subtree has no particular ordering relationship to either. (The + dividing line between the subtree sizes is based on trie relation.) + If we remove the last chunk of a given size from the interior of the + tree, we need to replace it with a leaf node. The tree ordering + rules permit a node to be replaced by any leaf below it. + + The smallest chunk in a tree (a common operation in a best-fit + allocator) can be found by walking a path to the leftmost leaf in + the tree. Unlike a usual binary tree, where we follow left child + pointers until we reach a null, here we follow the right child + pointer any time the left one is null, until we reach a leaf with + both child pointers null. The smallest chunk in the tree will be + somewhere along that path. + + The worst case number of steps to add, find, or remove a node is + bounded by the number of bits differentiating chunks within + bins. Under current bin calculations, this ranges from 6 up to 21 + (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case + is of course much better. +*/ + +// =============================================================================== +struct malloc_tree_chunk : public malloc_chunk_header +{ + malloc_tree_chunk *leftmost_child() + { + return _child[0] ? _child[0] : _child[1]; + } + + + malloc_tree_chunk* _fd; + malloc_tree_chunk* _bk; + + malloc_tree_chunk* _child[2]; + malloc_tree_chunk* _parent; + bindex_t _index; +}; + +typedef malloc_tree_chunk tchunk; +typedef malloc_tree_chunk* tchunkptr; +typedef malloc_tree_chunk* tbinptr; // The type of bins of trees + +/* ----------------------------- Segments -------------------------------- */ + +/* + Each malloc space may include non-contiguous segments, held in a + list headed by an embedded malloc_segment record representing the + top-most space. Segments also include flags holding properties of + the space. Large chunks that are directly allocated by mmap are not + included in this list. They are instead independently created and + destroyed without otherwise keeping track of them. + + Segment management mainly comes into play for spaces allocated by + MMAP. Any call to MMAP might or might not return memory that is + adjacent to an existing segment. MORECORE normally contiguously + extends the current space, so this space is almost always adjacent, + which is simpler and faster to deal with. (This is why MORECORE is + used preferentially to MMAP when both are available -- see + sys_alloc.) When allocating using MMAP, we don't use any of the + hinting mechanisms (inconsistently) supported in various + implementations of unix mmap, or distinguish reserving from + committing memory. Instead, we just ask for space, and exploit + contiguity when we get it. It is probably possible to do + better than this on some systems, but no general scheme seems + to be significantly better. + + Management entails a simpler variant of the consolidation scheme + used for chunks to reduce fragmentation -- new adjacent memory is + normally prepended or appended to an existing segment. However, + there are limitations compared to chunk consolidation that mostly + reflect the fact that segment processing is relatively infrequent + (occurring only when getting memory from system) and that we + don't expect to have huge numbers of segments: + + * Segments are not indexed, so traversal requires linear scans. (It + would be possible to index these, but is not worth the extra + overhead and complexity for most programs on most platforms.) + * New segments are only appended to old ones when holding top-most + memory; if they cannot be prepended to others, they are held in + different segments. + + Except for the top-most segment of an mstate, each segment record + is kept at the tail of its segment. Segments are added by pushing + segment records onto the list headed by &mstate.seg for the + containing mstate. + + Segment flags control allocation/merge/deallocation policies: + * If EXTERN_BIT set, then we did not allocate this segment, + and so should not try to deallocate or merge with others. + (This currently holds only for the initial segment passed + into create_mspace_with_base.) + * If USE_MMAP_BIT set, the segment may be merged with + other surrounding mmapped segments and trimmed/de-allocated + using munmap. + * If neither bit is set, then the segment was obtained using + MORECORE so can be merged with surrounding MORECORE'd segments + and deallocated/trimmed using MORECORE with negative arguments. +*/ + +// =============================================================================== +struct malloc_segment +{ + bool is_mmapped_segment() { return !!(_sflags & USE_MMAP_BIT); } + bool is_extern_segment() { return !!(_sflags & EXTERN_BIT); } + + char* _base; // base address + size_t _size; // allocated size + malloc_segment* _next; // ptr to next segment + flag_t _sflags; // mmap and extern flag +}; + +typedef malloc_segment msegment; +typedef malloc_segment* msegmentptr; + +/* ------------- Malloc_params ------------------- */ + +/* + malloc_params holds global properties, including those that can be + dynamically set using mallopt. There is a single instance, mparams, + initialized in init_mparams. Note that the non-zeroness of "magic" + also serves as an initialization flag. +*/ + +// =============================================================================== +struct malloc_params +{ + malloc_params() : _magic(0) {} + + void ensure_initialization() + { + if (!_magic) + _init(); + } + + SPP_IMPL int change(int param_number, int value); + + size_t page_align(size_t sz) + { + return (sz + (_page_size - 1)) & ~(_page_size - 1); + } + + size_t granularity_align(size_t sz) + { + return (sz + (_granularity - 1)) & ~(_granularity - 1); + } + + bool is_page_aligned(char *S) + { + return ((size_t)S & (_page_size - 1)) == 0; + } + + SPP_IMPL int _init(); + + size_t _magic; + size_t _page_size; + size_t _granularity; + size_t _mmap_threshold; + size_t _trim_threshold; + flag_t _default_mflags; +}; + +static malloc_params mparams; + +/* ---------------------------- malloc_state ----------------------------- */ + +/* + A malloc_state holds all of the bookkeeping for a space. + The main fields are: + + Top + The topmost chunk of the currently active segment. Its size is + cached in topsize. The actual size of topmost space is + topsize+TOP_FOOT_SIZE, which includes space reserved for adding + fenceposts and segment records if necessary when getting more + space from the system. The size at which to autotrim top is + cached from mparams in trim_check, except that it is disabled if + an autotrim fails. + + Designated victim (dv) + This is the preferred chunk for servicing small requests that + don't have exact fits. It is normally the chunk split off most + recently to service another small request. Its size is cached in + dvsize. The link fields of this chunk are not maintained since it + is not kept in a bin. + + SmallBins + An array of bin headers for free chunks. These bins hold chunks + with sizes less than MIN_LARGE_SIZE bytes. Each bin contains + chunks of all the same size, spaced 8 bytes apart. To simplify + use in double-linked lists, each bin header acts as a malloc_chunk + pointing to the real first node, if it exists (else pointing to + itself). This avoids special-casing for headers. But to avoid + waste, we allocate only the fd/bk pointers of bins, and then use + repositioning tricks to treat these as the fields of a chunk. + + TreeBins + Treebins are pointers to the roots of trees holding a range of + sizes. There are 2 equally spaced treebins for each power of two + from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything + larger. + + Bin maps + There is one bit map for small bins ("smallmap") and one for + treebins ("treemap). Each bin sets its bit when non-empty, and + clears the bit when empty. Bit operations are then used to avoid + bin-by-bin searching -- nearly all "search" is done without ever + looking at bins that won't be selected. The bit maps + conservatively use 32 bits per map word, even if on 64bit system. + For a good description of some of the bit-based techniques used + here, see Henry S. Warren Jr's book "Hacker's Delight" (and + supplement at http://hackersdelight.org/). Many of these are + intended to reduce the branchiness of paths through malloc etc, as + well as to reduce the number of memory locations read or written. + + Segments + A list of segments headed by an embedded malloc_segment record + representing the initial space. + + Address check support + The least_addr field is the least address ever obtained from + MORECORE or MMAP. Attempted frees and reallocs of any address less + than this are trapped (unless SPP_INSECURE is defined). + + Magic tag + A cross-check field that should always hold same value as mparams._magic. + + Max allowed footprint + The maximum allowed bytes to allocate from system (zero means no limit) + + Flags + Bits recording whether to use MMAP, locks, or contiguous MORECORE + + Statistics + Each space keeps track of current and maximum system memory + obtained via MORECORE or MMAP. + + Trim support + Fields holding the amount of unused topmost memory that should trigger + trimming, and a counter to force periodic scanning to release unused + non-topmost segments. + + Extension support + A void* pointer and a size_t field that can be used to help implement + extensions to this malloc. +*/ + + +// ================================================================================ +class malloc_state +{ +public: + /* ----------------------- _malloc, _free, etc... --- */ + SPP_FORCEINLINE void* _malloc(size_t bytes); + SPP_FORCEINLINE void _free(mchunkptr p); + + + /* ------------------------ Relays to internal calls to malloc/free from realloc, memalign etc */ + void *internal_malloc(size_t b) { return mspace_malloc(this, b); } + void internal_free(void *mem) { mspace_free(this, mem); } + + /* ------------------------ ----------------------- */ + + SPP_IMPL void init_top(mchunkptr p, size_t psize); + SPP_IMPL void init_bins(); + SPP_IMPL void init(char* tbase, size_t tsize); + + /* ------------------------ System alloc/dealloc -------------------------- */ + SPP_IMPL void* sys_alloc(size_t nb); + SPP_IMPL size_t release_unused_segments(); + SPP_IMPL int sys_trim(size_t pad); + SPP_IMPL void dispose_chunk(mchunkptr p, size_t psize); + + /* ----------------------- Internal support for realloc, memalign, etc --- */ + SPP_IMPL mchunkptr try_realloc_chunk(mchunkptr p, size_t nb, int can_move); + SPP_IMPL void* internal_memalign(size_t alignment, size_t bytes); + SPP_IMPL void** ialloc(size_t n_elements, size_t* sizes, int opts, void* chunks[]); + SPP_IMPL size_t internal_bulk_free(void* array[], size_t nelem); + SPP_IMPL void internal_inspect_all(void(*handler)(void *start, void *end, + size_t used_bytes, void* callback_arg), + void* arg); + + /* -------------------------- system alloc setup (Operations on mflags) ----- */ + bool use_lock() const { return false; } + void enable_lock() {} + void set_lock(int) {} + void disable_lock() {} + + bool use_mmap() const { return !!(_mflags & USE_MMAP_BIT); } + void enable_mmap() { _mflags |= USE_MMAP_BIT; } + +#if SPP_HAVE_MMAP + void disable_mmap() { _mflags &= ~USE_MMAP_BIT; } +#else + void disable_mmap() {} +#endif + + /* ----------------------- Runtime Check Support ------------------------- */ + + /* + For security, the main invariant is that malloc/free/etc never + writes to a static address other than malloc_state, unless static + malloc_state itself has been corrupted, which cannot occur via + malloc (because of these checks). In essence this means that we + believe all pointers, sizes, maps etc held in malloc_state, but + check all of those linked or offsetted from other embedded data + structures. These checks are interspersed with main code in a way + that tends to minimize their run-time cost. + + When SPP_FOOTERS is defined, in addition to range checking, we also + verify footer fields of inuse chunks, which can be used guarantee + that the mstate controlling malloc/free is intact. This is a + streamlined version of the approach described by William Robertson + et al in "Run-time Detection of Heap-based Overflows" LISA'03 + http://www.usenix.org/events/lisa03/tech/robertson.html The footer + of an inuse chunk holds the xor of its mstate and a random seed, + that is checked upon calls to free() and realloc(). This is + (probabalistically) unguessable from outside the program, but can be + computed by any code successfully malloc'ing any chunk, so does not + itself provide protection against code that has already broken + security through some other means. Unlike Robertson et al, we + always dynamically check addresses of all offset chunks (previous, + next, etc). This turns out to be cheaper than relying on hashes. + */ + + +#if !SPP_INSECURE + // Check if address a is at least as high as any from MORECORE or MMAP + bool ok_address(void *a) const { return (char *)a >= _least_addr; } + + // Check if address of next chunk n is higher than base chunk p + static bool ok_next(void *p, void *n) { return p < n; } + + // Check if p has inuse status + static bool ok_inuse(mchunkptr p) { return p->is_inuse(); } + + // Check if p has its pinuse bit on + static bool ok_pinuse(mchunkptr p) { return p->pinuse(); } + + // Check if (alleged) mstate m has expected magic field + bool ok_magic() const { return _magic == mparams._magic; } + + // In gcc, use __builtin_expect to minimize impact of checks + #if defined(__GNUC__) && __GNUC__ >= 3 + static bool rtcheck(bool e) { return __builtin_expect(e, 1); } + #else + static bool rtcheck(bool e) { return e; } + #endif +#else + static bool ok_address(void *) { return true; } + static bool ok_next(void *, void *) { return true; } + static bool ok_inuse(mchunkptr) { return true; } + static bool ok_pinuse(mchunkptr) { return true; } + static bool ok_magic() { return true; } + static bool rtcheck(bool) { return true; } +#endif + + bool is_initialized() const { return _top != 0; } + + bool use_noncontiguous() const { return !!(_mflags & USE_NONCONTIGUOUS_BIT); } + void disable_contiguous() { _mflags |= USE_NONCONTIGUOUS_BIT; } + + // Return segment holding given address + msegmentptr segment_holding(char* addr) const + { + msegmentptr sp = (msegmentptr)&_seg; + for (;;) + { + if (addr >= sp->_base && addr < sp->_base + sp->_size) + return sp; + if ((sp = sp->_next) == 0) + return 0; + } + } + + // Return true if segment contains a segment link + int has_segment_link(msegmentptr ss) const + { + msegmentptr sp = (msegmentptr)&_seg; + for (;;) + { + if ((char*)sp >= ss->_base && (char*)sp < ss->_base + ss->_size) + return 1; + if ((sp = sp->_next) == 0) + return 0; + } + } + + bool should_trim(size_t s) const { return s > _trim_check; } + + /* -------------------------- Debugging setup ---------------------------- */ + +#if ! SPP_DEBUG + void check_free_chunk(mchunkptr) {} + void check_inuse_chunk(mchunkptr) {} + void check_malloced_chunk(void*, size_t) {} + void check_mmapped_chunk(mchunkptr) {} + void check_malloc_state() {} + void check_top_chunk(mchunkptr) {} +#else /* SPP_DEBUG */ + void check_free_chunk(mchunkptr p) { do_check_free_chunk(p); } + void check_inuse_chunk(mchunkptr p) { do_check_inuse_chunk(p); } + void check_malloced_chunk(void* p, size_t s) { do_check_malloced_chunk(p, s); } + void check_mmapped_chunk(mchunkptr p) { do_check_mmapped_chunk(p); } + void check_malloc_state() { do_check_malloc_state(); } + void check_top_chunk(mchunkptr p) { do_check_top_chunk(p); } + + void do_check_any_chunk(mchunkptr p) const; + void do_check_top_chunk(mchunkptr p) const; + void do_check_mmapped_chunk(mchunkptr p) const; + void do_check_inuse_chunk(mchunkptr p) const; + void do_check_free_chunk(mchunkptr p) const; + void do_check_malloced_chunk(void* mem, size_t s) const; + void do_check_tree(tchunkptr t); + void do_check_treebin(bindex_t i); + void do_check_smallbin(bindex_t i); + void do_check_malloc_state(); + int bin_find(mchunkptr x); + size_t traverse_and_check(); +#endif + +private: + + /* ---------------------------- Indexing Bins ---------------------------- */ + + static bool is_small(size_t s) { return (s >> SMALLBIN_SHIFT) < NSMALLBINS; } + static bindex_t small_index(size_t s) { return (bindex_t)(s >> SMALLBIN_SHIFT); } + static size_t small_index2size(size_t i) { return i << SMALLBIN_SHIFT; } + static bindex_t MIN_SMALL_INDEX() { return small_index(MIN_CHUNK_SIZE); } + + // assign tree index for size S to variable I. Use x86 asm if possible +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S) + { + unsigned int X = S >> TREEBIN_SHIFT; + if (X == 0) + return 0; + else if (X > 0xFFFF) + return NTREEBINS - 1; + + unsigned int K = (unsigned) sizeof(X) * __CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); + return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); + } + +#elif defined (__INTEL_COMPILER) + SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S) + { + size_t X = S >> TREEBIN_SHIFT; + if (X == 0) + return 0; + else if (X > 0xFFFF) + return NTREEBINS - 1; + + unsigned int K = _bit_scan_reverse(X); + return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); + } + +#elif defined(_MSC_VER) && _MSC_VER>=1300 + SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S) + { + size_t X = S >> TREEBIN_SHIFT; + if (X == 0) + return 0; + else if (X > 0xFFFF) + return NTREEBINS - 1; + + unsigned int K; + _BitScanReverse((DWORD *) &K, (DWORD) X); + return (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); + } + +#else // GNUC + SPP_FORCEINLINE static bindex_t compute_tree_index(size_t S) + { + size_t X = S >> TREEBIN_SHIFT; + if (X == 0) + return 0; + else if (X > 0xFFFF) + return NTREEBINS - 1; + + unsigned int Y = (unsigned int)X; + unsigned int N = ((Y - 0x100) >> 16) & 8; + unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; + N += K; + N += K = (((Y <<= K) - 0x4000) >> 16) & 2; + K = 14 - N + ((Y <<= K) >> 15); + return (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); + } +#endif + + // Shift placing maximum resolved bit in a treebin at i as sign bit + static bindex_t leftshift_for_tree_index(bindex_t i) + { + return (i == NTREEBINS - 1) ? 0 : + ((spp_size_t_bitsize - 1) - ((i >> 1) + TREEBIN_SHIFT - 2)); + } + + // The size of the smallest chunk held in bin with index i + static bindex_t minsize_for_tree_index(bindex_t i) + { + return ((size_t)1 << ((i >> 1) + TREEBIN_SHIFT)) | + (((size_t)(i & 1)) << ((i >> 1) + TREEBIN_SHIFT - 1)); + } + + + // ----------- isolate the least set bit of a bitmap + static binmap_t least_bit(binmap_t x) { return x & -x; } + + // ----------- mask with all bits to left of least bit of x on + static binmap_t left_bits(binmap_t x) { return (x << 1) | -(x << 1); } + + // index corresponding to given bit. Use x86 asm if possible +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + static bindex_t compute_bit2idx(binmap_t X) + { + unsigned int J; + J = __builtin_ctz(X); + return (bindex_t)J; + } + +#elif defined (__INTEL_COMPILER) + static bindex_t compute_bit2idx(binmap_t X) + { + unsigned int J; + J = _bit_scan_forward(X); + return (bindex_t)J; + } + +#elif defined(_MSC_VER) && _MSC_VER>=1300 + static bindex_t compute_bit2idx(binmap_t X) + { + unsigned int J; + _BitScanForward((DWORD *) &J, X); + return (bindex_t)J; + } + +#elif SPP_USE_BUILTIN_FFS + static bindex_t compute_bit2idx(binmap_t X) { return ffs(X) - 1; } + +#else + static bindex_t compute_bit2idx(binmap_t X) + { + unsigned int Y = X - 1; + unsigned int K = Y >> (16 - 4) & 16; + unsigned int N = K; Y >>= K; + N += K = Y >> (8 - 3) & 8; Y >>= K; + N += K = Y >> (4 - 2) & 4; Y >>= K; + N += K = Y >> (2 - 1) & 2; Y >>= K; + N += K = Y >> (1 - 0) & 1; Y >>= K; + return (bindex_t)(N + Y); + } +#endif + + /* ------------------------ Set up inuse chunks with or without footers ---*/ +#if !SPP_FOOTERS + void mark_inuse_foot(malloc_chunk_header *, size_t) {} +#else + //Set foot of inuse chunk to be xor of mstate and seed + void mark_inuse_foot(malloc_chunk_header *p, size_t s) + { + (((mchunkptr)((char*)p + s))->prev_foot = (size_t)this ^ mparams._magic); + } +#endif + + void set_inuse(malloc_chunk_header *p, size_t s) + { + p->_head = (p->_head & PINUSE_BIT) | s | CINUSE_BIT; + ((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT; + mark_inuse_foot(p, s); + } + + void set_inuse_and_pinuse(malloc_chunk_header *p, size_t s) + { + p->_head = s | PINUSE_BIT | CINUSE_BIT; + ((mchunkptr)(((char*)p) + s))->_head |= PINUSE_BIT; + mark_inuse_foot(p, s); + } + + void set_size_and_pinuse_of_inuse_chunk(malloc_chunk_header *p, size_t s) + { + p->_head = s | PINUSE_BIT | CINUSE_BIT; + mark_inuse_foot(p, s); + } + + /* ------------------------ Addressing by index. See about smallbin repositioning --- */ + sbinptr smallbin_at(bindex_t i) const { return (sbinptr)((char*)&_smallbins[i << 1]); } + tbinptr* treebin_at(bindex_t i) { return &_treebins[i]; } + + /* ----------------------- bit corresponding to given index ---------*/ + static binmap_t idx2bit(bindex_t i) { return ((binmap_t)1 << i); } + + // --------------- Mark/Clear bits with given index + void mark_smallmap(bindex_t i) { _smallmap |= idx2bit(i); } + void clear_smallmap(bindex_t i) { _smallmap &= ~idx2bit(i); } + binmap_t smallmap_is_marked(bindex_t i) const { return _smallmap & idx2bit(i); } + + void mark_treemap(bindex_t i) { _treemap |= idx2bit(i); } + void clear_treemap(bindex_t i) { _treemap &= ~idx2bit(i); } + binmap_t treemap_is_marked(bindex_t i) const { return _treemap & idx2bit(i); } + + /* ------------------------ ----------------------- */ + SPP_FORCEINLINE void insert_small_chunk(mchunkptr P, size_t S); + SPP_FORCEINLINE void unlink_small_chunk(mchunkptr P, size_t S); + SPP_FORCEINLINE void unlink_first_small_chunk(mchunkptr B, mchunkptr P, bindex_t I); + SPP_FORCEINLINE void replace_dv(mchunkptr P, size_t S); + + /* ------------------------- Operations on trees ------------------------- */ + SPP_FORCEINLINE void insert_large_chunk(tchunkptr X, size_t S); + SPP_FORCEINLINE void unlink_large_chunk(tchunkptr X); + + /* ------------------------ Relays to large vs small bin operations */ + SPP_FORCEINLINE void insert_chunk(mchunkptr P, size_t S); + SPP_FORCEINLINE void unlink_chunk(mchunkptr P, size_t S); + + /* ----------------------- Direct-mmapping chunks ----------------------- */ + SPP_IMPL void* mmap_alloc(size_t nb); + SPP_IMPL mchunkptr mmap_resize(mchunkptr oldp, size_t nb, int flags); + + SPP_IMPL void reset_on_error(); + SPP_IMPL void* prepend_alloc(char* newbase, char* oldbase, size_t nb); + SPP_IMPL void add_segment(char* tbase, size_t tsize, flag_t mmapped); + + /* ------------------------ malloc --------------------------- */ + SPP_IMPL void* tmalloc_large(size_t nb); + SPP_IMPL void* tmalloc_small(size_t nb); + + /* ------------------------Bin types, widths and sizes -------- */ + static const size_t NSMALLBINS = 32; + static const size_t NTREEBINS = 32; + static const size_t SMALLBIN_SHIFT = 3; + static const size_t SMALLBIN_WIDTH = 1 << SMALLBIN_SHIFT; + static const size_t TREEBIN_SHIFT = 8; + static const size_t MIN_LARGE_SIZE = 1 << TREEBIN_SHIFT; + static const size_t MAX_SMALL_SIZE = (MIN_LARGE_SIZE - 1); + static const size_t MAX_SMALL_REQUEST = (MAX_SMALL_SIZE - spp_chunk_align_mask - CHUNK_OVERHEAD); + + /* ------------------------ data members --------------------------- */ + binmap_t _smallmap; + binmap_t _treemap; + size_t _dvsize; + size_t _topsize; + char* _least_addr; + mchunkptr _dv; + mchunkptr _top; + size_t _trim_check; + size_t _release_checks; + size_t _magic; + mchunkptr _smallbins[(NSMALLBINS + 1) * 2]; + tbinptr _treebins[NTREEBINS]; +public: + size_t _footprint; + size_t _max_footprint; + size_t _footprint_limit; // zero means no limit + flag_t _mflags; + + msegment _seg; + +private: + void* _extp; // Unused but available for extensions + size_t _exts; +}; + +typedef malloc_state* mstate; + +/* ------------- end malloc_state ------------------- */ + +#if SPP_FOOTERS +static malloc_state* get_mstate_for(malloc_chunk_header *p) +{ + return (malloc_state*)(((mchunkptr)((char*)(p) + + (p->chunksize())))->prev_foot ^ mparams._magic); +} +#endif + +/* -------------------------- system alloc setup ------------------------- */ + + + +// For mmap, use granularity alignment on windows, else page-align +#ifdef WIN32 + #define mmap_align(S) mparams.granularity_align(S) +#else + #define mmap_align(S) mparams.page_align(S) +#endif + +// True if segment S holds address A +static bool segment_holds(msegmentptr S, mchunkptr A) +{ + return (char*)A >= S->_base && (char*)A < S->_base + S->_size; +} + +/* + top_foot_size is padding at the end of a segment, including space + that may be needed to place segment records and fenceposts when new + noncontiguous segments are added. +*/ +static SPP_FORCEINLINE size_t top_foot_size() +{ + return align_offset(chunk2mem((void *)0)) + + pad_request(sizeof(struct malloc_segment)) + + MIN_CHUNK_SIZE; +} + + +// For sys_alloc, enough padding to ensure can malloc request on success +static SPP_FORCEINLINE size_t sys_alloc_padding() +{ + return top_foot_size() + SPP_MALLOC_ALIGNMENT; +} + + +#define SPP_USAGE_ERROR_ACTION(m,p) SPP_ABORT + +/* ---------------------------- setting mparams -------------------------- */ + +// Initialize mparams +int malloc_params::_init() +{ +#ifdef NEED_GLOBAL_LOCK_INIT + if (malloc_global_mutex_status <= 0) + init_malloc_global_mutex(); +#endif + + if (_magic == 0) + { + size_t magic; + size_t psize; + size_t gsize; + +#ifndef WIN32 + psize = malloc_getpagesize; + gsize = ((SPP_DEFAULT_GRANULARITY != 0) ? SPP_DEFAULT_GRANULARITY : psize); +#else + { + SYSTEM_INFO system_info; + GetSystemInfo(&system_info); + psize = system_info.dwPageSize; + gsize = ((SPP_DEFAULT_GRANULARITY != 0) ? + SPP_DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); + } +#endif + + /* Sanity-check configuration: + size_t must be unsigned and as wide as pointer type. + ints must be at least 4 bytes. + alignment must be at least 8. + Alignment, min chunk size, and page size must all be powers of 2. + */ + if ((sizeof(size_t) != sizeof(char*)) || + (spp_max_size_t < MIN_CHUNK_SIZE) || + (sizeof(int) < 4) || + (SPP_MALLOC_ALIGNMENT < (size_t)8U) || + ((SPP_MALLOC_ALIGNMENT & (SPP_MALLOC_ALIGNMENT - 1)) != 0) || + ((MCHUNK_SIZE & (MCHUNK_SIZE - 1)) != 0) || + ((gsize & (gsize - 1)) != 0) || + ((psize & (psize - 1)) != 0)) + SPP_ABORT; + _granularity = gsize; + _page_size = psize; + _mmap_threshold = SPP_DEFAULT_MMAP_THRESHOLD; + _trim_threshold = SPP_DEFAULT_TRIM_THRESHOLD; + _default_mflags = USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT; + + { +#if SPP_USE_DEV_RANDOM + int fd; + unsigned char buf[sizeof(size_t)]; + // Try to use /dev/urandom, else fall back on using time + if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 && + read(fd, buf, sizeof(buf)) == sizeof(buf)) + { + magic = *((size_t *) buf); + close(fd); + } + else +#endif + { +#ifdef WIN32 + magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); +#elif defined(SPP_LACKS_TIME_H) + magic = (size_t)&magic ^ (size_t)0x55555555U; +#else + magic = (size_t)(time(0) ^ (size_t)0x55555555U); +#endif + } + magic |= (size_t)8U; // ensure nonzero + magic &= ~(size_t)7U; // improve chances of fault for bad values + // Until memory modes commonly available, use volatile-write + (*(volatile size_t *)(&(_magic))) = magic; + } + } + + return 1; +} + +/* + mallopt tuning options. SVID/XPG defines four standard parameter + numbers for mallopt, normally defined in malloc.h. None of these + are used in this malloc, so setting them has no effect. But this + malloc does support the following options. +*/ +static const int m_trim_threshold = -1; +static const int m_granularity = -2; +static const int m_mmap_threshold = -3; + +// support for mallopt +int malloc_params::change(int param_number, int value) +{ + size_t val; + ensure_initialization(); + val = (value == -1) ? spp_max_size_t : (size_t)value; + + switch (param_number) + { + case m_trim_threshold: + _trim_threshold = val; + return 1; + + case m_granularity: + if (val >= _page_size && ((val & (val - 1)) == 0)) + { + _granularity = val; + return 1; + } + else + return 0; + + case m_mmap_threshold: + _mmap_threshold = val; + return 1; + + default: + return 0; + } +} + +#if SPP_DEBUG +/* ------------------------- Debugging Support --------------------------- */ + +// Check properties of any chunk, whether free, inuse, mmapped etc +void malloc_state::do_check_any_chunk(mchunkptr p) const +{ + assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD)); + assert(ok_address(p)); +} + +// Check properties of top chunk +void malloc_state::do_check_top_chunk(mchunkptr p) const +{ + msegmentptr sp = segment_holding((char*)p); + size_t sz = p->_head & ~INUSE_BITS; // third-lowest bit can be set! + assert(sp != 0); + assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD)); + assert(ok_address(p)); + assert(sz == _topsize); + assert(sz > 0); + assert(sz == ((sp->_base + sp->_size) - (char*)p) - top_foot_size()); + assert(p->pinuse()); + assert(!p->chunk_plus_offset(sz)->pinuse()); +} + +// Check properties of (inuse) mmapped chunks +void malloc_state::do_check_mmapped_chunk(mchunkptr p) const +{ + size_t sz = p->chunksize(); + size_t len = (sz + (p->_prev_foot) + SPP_MMAP_FOOT_PAD); + assert(p->is_mmapped()); + assert(use_mmap()); + assert((spp_is_aligned(chunk2mem(p))) || (p->_head == FENCEPOST_HEAD)); + assert(ok_address(p)); + assert(!is_small(sz)); + assert((len & (mparams._page_size - 1)) == 0); + assert(p->chunk_plus_offset(sz)->_head == FENCEPOST_HEAD); + assert(p->chunk_plus_offset(sz + sizeof(size_t))->_head == 0); +} + +// Check properties of inuse chunks +void malloc_state::do_check_inuse_chunk(mchunkptr p) const +{ + do_check_any_chunk(p); + assert(p->is_inuse()); + assert(p->next_pinuse()); + // If not pinuse and not mmapped, previous chunk has OK offset + assert(p->is_mmapped() || p->pinuse() || (mchunkptr)p->prev_chunk()->next_chunk() == p); + if (p->is_mmapped()) + do_check_mmapped_chunk(p); +} + +// Check properties of free chunks +void malloc_state::do_check_free_chunk(mchunkptr p) const +{ + size_t sz = p->chunksize(); + mchunkptr next = (mchunkptr)p->chunk_plus_offset(sz); + do_check_any_chunk(p); + assert(!p->is_inuse()); + assert(!p->next_pinuse()); + assert(!p->is_mmapped()); + if (p != _dv && p != _top) + { + if (sz >= MIN_CHUNK_SIZE) + { + assert((sz & spp_chunk_align_mask) == 0); + assert(spp_is_aligned(chunk2mem(p))); + assert(next->_prev_foot == sz); + assert(p->pinuse()); + assert(next == _top || next->is_inuse()); + assert(p->_fd->_bk == p); + assert(p->_bk->_fd == p); + } + else // markers are always of size sizeof(size_t) + assert(sz == sizeof(size_t)); + } +} + +// Check properties of malloced chunks at the point they are malloced +void malloc_state::do_check_malloced_chunk(void* mem, size_t s) const +{ + if (mem != 0) + { + mchunkptr p = mem2chunk(mem); + size_t sz = p->_head & ~INUSE_BITS; + do_check_inuse_chunk(p); + assert((sz & spp_chunk_align_mask) == 0); + assert(sz >= MIN_CHUNK_SIZE); + assert(sz >= s); + // unless mmapped, size is less than MIN_CHUNK_SIZE more than request + assert(p->is_mmapped() || sz < (s + MIN_CHUNK_SIZE)); + } +} + +// Check a tree and its subtrees. +void malloc_state::do_check_tree(tchunkptr t) +{ + tchunkptr head = 0; + tchunkptr u = t; + bindex_t tindex = t->_index; + size_t tsize = t->chunksize(); + bindex_t idx = compute_tree_index(tsize); + assert(tindex == idx); + assert(tsize >= MIN_LARGE_SIZE); + assert(tsize >= minsize_for_tree_index(idx)); + assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)))); + + do + { + // traverse through chain of same-sized nodes + do_check_any_chunk((mchunkptr)u); + assert(u->_index == tindex); + assert(u->chunksize() == tsize); + assert(!u->is_inuse()); + assert(!u->next_pinuse()); + assert(u->_fd->_bk == u); + assert(u->_bk->_fd == u); + if (u->_parent == 0) + { + assert(u->_child[0] == 0); + assert(u->_child[1] == 0); + } + else + { + assert(head == 0); // only one node on chain has parent + head = u; + assert(u->_parent != u); + assert(u->_parent->_child[0] == u || + u->_parent->_child[1] == u || + *((tbinptr*)(u->_parent)) == u); + if (u->_child[0] != 0) + { + assert(u->_child[0]->_parent == u); + assert(u->_child[0] != u); + do_check_tree(u->_child[0]); + } + if (u->_child[1] != 0) + { + assert(u->_child[1]->_parent == u); + assert(u->_child[1] != u); + do_check_tree(u->_child[1]); + } + if (u->_child[0] != 0 && u->_child[1] != 0) + assert(u->_child[0]->chunksize() < u->_child[1]->chunksize()); + } + u = u->_fd; + } + while (u != t); + assert(head != 0); +} + +// Check all the chunks in a treebin. +void malloc_state::do_check_treebin(bindex_t i) +{ + tbinptr* tb = (tbinptr*)treebin_at(i); + tchunkptr t = *tb; + int empty = (_treemap & (1U << i)) == 0; + if (t == 0) + assert(empty); + if (!empty) + do_check_tree(t); +} + +// Check all the chunks in a smallbin. +void malloc_state::do_check_smallbin(bindex_t i) +{ + sbinptr b = smallbin_at(i); + mchunkptr p = b->_bk; + unsigned int empty = (_smallmap & (1U << i)) == 0; + if (p == b) + assert(empty); + if (!empty) + { + for (; p != b; p = p->_bk) + { + size_t size = p->chunksize(); + mchunkptr q; + // each chunk claims to be free + do_check_free_chunk(p); + // chunk belongs in bin + assert(small_index(size) == i); + assert(p->_bk == b || p->_bk->chunksize() == p->chunksize()); + // chunk is followed by an inuse chunk + q = (mchunkptr)p->next_chunk(); + if (q->_head != FENCEPOST_HEAD) + do_check_inuse_chunk(q); + } + } +} + +// Find x in a bin. Used in other check functions. +int malloc_state::bin_find(mchunkptr x) +{ + size_t size = x->chunksize(); + if (is_small(size)) + { + bindex_t sidx = small_index(size); + sbinptr b = smallbin_at(sidx); + if (smallmap_is_marked(sidx)) + { + mchunkptr p = b; + do + { + if (p == x) + return 1; + } + while ((p = p->_fd) != b); + } + } + else + { + bindex_t tidx = compute_tree_index(size); + if (treemap_is_marked(tidx)) + { + tchunkptr t = *treebin_at(tidx); + size_t sizebits = size << leftshift_for_tree_index(tidx); + while (t != 0 && t->chunksize() != size) + { + t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1]; + sizebits <<= 1; + } + if (t != 0) + { + tchunkptr u = t; + do + { + if (u == (tchunkptr)x) + return 1; + } + while ((u = u->_fd) != t); + } + } + } + return 0; +} + +// Traverse each chunk and check it; return total +size_t malloc_state::traverse_and_check() +{ + size_t sum = 0; + if (is_initialized()) + { + msegmentptr s = (msegmentptr)&_seg; + sum += _topsize + top_foot_size(); + while (s != 0) + { + mchunkptr q = align_as_chunk(s->_base); + mchunkptr lastq = 0; + assert(q->pinuse()); + while (segment_holds(s, q) && + q != _top && q->_head != FENCEPOST_HEAD) + { + sum += q->chunksize(); + if (q->is_inuse()) + { + assert(!bin_find(q)); + do_check_inuse_chunk(q); + } + else + { + assert(q == _dv || bin_find(q)); + assert(lastq == 0 || lastq->is_inuse()); // Not 2 consecutive free + do_check_free_chunk(q); + } + lastq = q; + q = (mchunkptr)q->next_chunk(); + } + s = s->_next; + } + } + return sum; +} + + +// Check all properties of malloc_state. +void malloc_state::do_check_malloc_state() +{ + bindex_t i; + size_t total; + // check bins + for (i = 0; i < NSMALLBINS; ++i) + do_check_smallbin(i); + for (i = 0; i < NTREEBINS; ++i) + do_check_treebin(i); + + if (_dvsize != 0) + { + // check dv chunk + do_check_any_chunk(_dv); + assert(_dvsize == _dv->chunksize()); + assert(_dvsize >= MIN_CHUNK_SIZE); + assert(bin_find(_dv) == 0); + } + + if (_top != 0) + { + // check top chunk + do_check_top_chunk(_top); + //assert(topsize == top->chunksize()); redundant + assert(_topsize > 0); + assert(bin_find(_top) == 0); + } + + total = traverse_and_check(); + assert(total <= _footprint); + assert(_footprint <= _max_footprint); +} +#endif // SPP_DEBUG + +/* ----------------------- Operations on smallbins ----------------------- */ + +/* + Various forms of linking and unlinking are defined as macros. Even + the ones for trees, which are very long but have very short typical + paths. This is ugly but reduces reliance on inlining support of + compilers. +*/ + +// Link a free chunk into a smallbin +void malloc_state::insert_small_chunk(mchunkptr p, size_t s) +{ + bindex_t I = small_index(s); + mchunkptr B = smallbin_at(I); + mchunkptr F = B; + assert(s >= MIN_CHUNK_SIZE); + if (!smallmap_is_marked(I)) + mark_smallmap(I); + else if (rtcheck(ok_address(B->_fd))) + F = B->_fd; + else + SPP_ABORT; + B->_fd = p; + F->_bk = p; + p->_fd = F; + p->_bk = B; +} + +// Unlink a chunk from a smallbin +void malloc_state::unlink_small_chunk(mchunkptr p, size_t s) +{ + mchunkptr F = p->_fd; + mchunkptr B = p->_bk; + bindex_t I = small_index(s); + assert(p != B); + assert(p != F); + assert(p->chunksize() == small_index2size(I)); + if (rtcheck(F == smallbin_at(I) || (ok_address(F) && F->_bk == p))) + { + if (B == F) + clear_smallmap(I); + else if (rtcheck(B == smallbin_at(I) || + (ok_address(B) && B->_fd == p))) + { + F->_bk = B; + B->_fd = F; + } + else + SPP_ABORT; + } + else + SPP_ABORT; +} + +// Unlink the first chunk from a smallbin +void malloc_state::unlink_first_small_chunk(mchunkptr B, mchunkptr p, bindex_t I) +{ + mchunkptr F = p->_fd; + assert(p != B); + assert(p != F); + assert(p->chunksize() == small_index2size(I)); + if (B == F) + clear_smallmap(I); + else if (rtcheck(ok_address(F) && F->_bk == p)) + { + F->_bk = B; + B->_fd = F; + } + else + SPP_ABORT; +} + +// Replace dv node, binning the old one +// Used only when dvsize known to be small +void malloc_state::replace_dv(mchunkptr p, size_t s) +{ + size_t DVS = _dvsize; + assert(is_small(DVS)); + if (DVS != 0) + { + mchunkptr DV = _dv; + insert_small_chunk(DV, DVS); + } + _dvsize = s; + _dv = p; +} + +/* ------------------------- Operations on trees ------------------------- */ + +// Insert chunk into tree +void malloc_state::insert_large_chunk(tchunkptr X, size_t s) +{ + tbinptr* H; + bindex_t I = compute_tree_index(s); + H = treebin_at(I); + X->_index = I; + X->_child[0] = X->_child[1] = 0; + if (!treemap_is_marked(I)) + { + mark_treemap(I); + *H = X; + X->_parent = (tchunkptr)H; + X->_fd = X->_bk = X; + } + else + { + tchunkptr T = *H; + size_t K = s << leftshift_for_tree_index(I); + for (;;) + { + if (T->chunksize() != s) + { + tchunkptr* C = &(T->_child[(K >> (spp_size_t_bitsize - 1)) & 1]); + K <<= 1; + if (*C != 0) + T = *C; + else if (rtcheck(ok_address(C))) + { + *C = X; + X->_parent = T; + X->_fd = X->_bk = X; + break; + } + else + { + SPP_ABORT; + break; + } + } + else + { + tchunkptr F = T->_fd; + if (rtcheck(ok_address(T) && ok_address(F))) + { + T->_fd = F->_bk = X; + X->_fd = F; + X->_bk = T; + X->_parent = 0; + break; + } + else + { + SPP_ABORT; + break; + } + } + } + } +} + +/* + Unlink steps: + + 1. If x is a chained node, unlink it from its same-sized fd/bk links + and choose its bk node as its replacement. + 2. If x was the last node of its size, but not a leaf node, it must + be replaced with a leaf node (not merely one with an open left or + right), to make sure that lefts and rights of descendents + correspond properly to bit masks. We use the rightmost descendent + of x. We could use any other leaf, but this is easy to locate and + tends to counteract removal of leftmosts elsewhere, and so keeps + paths shorter than minimally guaranteed. This doesn't loop much + because on average a node in a tree is near the bottom. + 3. If x is the base of a chain (i.e., has parent links) relink + x's parent and children to x's replacement (or null if none). +*/ + +void malloc_state::unlink_large_chunk(tchunkptr X) +{ + tchunkptr XP = X->_parent; + tchunkptr R; + if (X->_bk != X) + { + tchunkptr F = X->_fd; + R = X->_bk; + if (rtcheck(ok_address(F) && F->_bk == X && R->_fd == X)) + { + F->_bk = R; + R->_fd = F; + } + else + SPP_ABORT; + } + else + { + tchunkptr* RP; + if (((R = *(RP = &(X->_child[1]))) != 0) || + ((R = *(RP = &(X->_child[0]))) != 0)) + { + tchunkptr* CP; + while ((*(CP = &(R->_child[1])) != 0) || + (*(CP = &(R->_child[0])) != 0)) + R = *(RP = CP); + if (rtcheck(ok_address(RP))) + *RP = 0; + else + SPP_ABORT; + } + } + if (XP != 0) + { + tbinptr* H = treebin_at(X->_index); + if (X == *H) + { + if ((*H = R) == 0) + clear_treemap(X->_index); + } + else if (rtcheck(ok_address(XP))) + { + if (XP->_child[0] == X) + XP->_child[0] = R; + else + XP->_child[1] = R; + } + else + SPP_ABORT; + if (R != 0) + { + if (rtcheck(ok_address(R))) + { + tchunkptr C0, C1; + R->_parent = XP; + if ((C0 = X->_child[0]) != 0) + { + if (rtcheck(ok_address(C0))) + { + R->_child[0] = C0; + C0->_parent = R; + } + else + SPP_ABORT; + } + if ((C1 = X->_child[1]) != 0) + { + if (rtcheck(ok_address(C1))) + { + R->_child[1] = C1; + C1->_parent = R; + } + else + SPP_ABORT; + } + } + else + SPP_ABORT; + } + } +} + +// Relays to large vs small bin operations + +void malloc_state::insert_chunk(mchunkptr p, size_t s) +{ + if (is_small(s)) + insert_small_chunk(p, s); + else + { + tchunkptr tp = (tchunkptr)(p); + insert_large_chunk(tp, s); + } +} + +void malloc_state::unlink_chunk(mchunkptr p, size_t s) +{ + if (is_small(s)) + unlink_small_chunk(p, s); + else + { + tchunkptr tp = (tchunkptr)(p); + unlink_large_chunk(tp); + } +} + + +/* ----------------------- Direct-mmapping chunks ----------------------- */ + +/* + Directly mmapped chunks are set up with an offset to the start of + the mmapped region stored in the prev_foot field of the chunk. This + allows reconstruction of the required argument to MUNMAP when freed, + and also allows adjustment of the returned chunk to meet alignment + requirements (especially in memalign). +*/ + +// Malloc using mmap +void* malloc_state::mmap_alloc(size_t nb) +{ + size_t mmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask); + if (_footprint_limit != 0) + { + size_t fp = _footprint + mmsize; + if (fp <= _footprint || fp > _footprint_limit) + return 0; + } + if (mmsize > nb) + { + // Check for wrap around 0 + char* mm = (char*)(SPP_CALL_DIRECT_MMAP(mmsize)); + if (mm != cmfail) + { + size_t offset = align_offset(chunk2mem(mm)); + size_t psize = mmsize - offset - SPP_MMAP_FOOT_PAD; + mchunkptr p = (mchunkptr)(mm + offset); + p->_prev_foot = offset; + p->_head = psize; + mark_inuse_foot(p, psize); + p->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD; + p->chunk_plus_offset(psize + sizeof(size_t))->_head = 0; + + if (_least_addr == 0 || mm < _least_addr) + _least_addr = mm; + if ((_footprint += mmsize) > _max_footprint) + _max_footprint = _footprint; + assert(spp_is_aligned(chunk2mem(p))); + check_mmapped_chunk(p); + return chunk2mem(p); + } + } + return 0; +} + +// Realloc using mmap +mchunkptr malloc_state::mmap_resize(mchunkptr oldp, size_t nb, int flags) +{ + size_t oldsize = oldp->chunksize(); + (void)flags; // placate people compiling -Wunused + if (is_small(nb)) // Can't shrink mmap regions below small size + return 0; + + // Keep old chunk if big enough but not too big + if (oldsize >= nb + sizeof(size_t) && + (oldsize - nb) <= (mparams._granularity << 1)) + return oldp; + else + { + size_t offset = oldp->_prev_foot; + size_t oldmmsize = oldsize + offset + SPP_MMAP_FOOT_PAD; + size_t newmmsize = mmap_align(nb + 6 * sizeof(size_t) + spp_chunk_align_mask); + char* cp = (char*)SPP_CALL_MREMAP((char*)oldp - offset, + oldmmsize, newmmsize, flags); + if (cp != cmfail) + { + mchunkptr newp = (mchunkptr)(cp + offset); + size_t psize = newmmsize - offset - SPP_MMAP_FOOT_PAD; + newp->_head = psize; + mark_inuse_foot(newp, psize); + newp->chunk_plus_offset(psize)->_head = FENCEPOST_HEAD; + newp->chunk_plus_offset(psize + sizeof(size_t))->_head = 0; + + if (cp < _least_addr) + _least_addr = cp; + if ((_footprint += newmmsize - oldmmsize) > _max_footprint) + _max_footprint = _footprint; + check_mmapped_chunk(newp); + return newp; + } + } + return 0; +} + + +/* -------------------------- mspace management -------------------------- */ + +// Initialize top chunk and its size +void malloc_state::init_top(mchunkptr p, size_t psize) +{ + // Ensure alignment + size_t offset = align_offset(chunk2mem(p)); + p = (mchunkptr)((char*)p + offset); + psize -= offset; + + _top = p; + _topsize = psize; + p->_head = psize | PINUSE_BIT; + // set size of fake trailing chunk holding overhead space only once + p->chunk_plus_offset(psize)->_head = top_foot_size(); + _trim_check = mparams._trim_threshold; // reset on each update +} + +// Initialize bins for a new mstate that is otherwise zeroed out +void malloc_state::init_bins() +{ + // Establish circular links for smallbins + bindex_t i; + for (i = 0; i < NSMALLBINS; ++i) + { + sbinptr bin = smallbin_at(i); + bin->_fd = bin->_bk = bin; + } +} + +#if SPP_PROCEED_ON_ERROR + +// default corruption action +void malloc_state::reset_on_error() +{ + int i; + ++malloc_corruption_error_count; + // Reinitialize fields to forget about all memory + _smallmap = _treemap = 0; + _dvsize = _topsize = 0; + _seg._base = 0; + _seg._size = 0; + _seg._next = 0; + _top = _dv = 0; + for (i = 0; i < NTREEBINS; ++i) + *treebin_at(i) = 0; + init_bins(); +} +#endif + +/* Allocate chunk and prepend remainder with chunk in successor base. */ +void* malloc_state::prepend_alloc(char* newbase, char* oldbase, size_t nb) +{ + mchunkptr p = align_as_chunk(newbase); + mchunkptr oldfirst = align_as_chunk(oldbase); + size_t psize = (char*)oldfirst - (char*)p; + mchunkptr q = (mchunkptr)p->chunk_plus_offset(nb); + size_t qsize = psize - nb; + set_size_and_pinuse_of_inuse_chunk(p, nb); + + assert((char*)oldfirst > (char*)q); + assert(oldfirst->pinuse()); + assert(qsize >= MIN_CHUNK_SIZE); + + // consolidate remainder with first chunk of old base + if (oldfirst == _top) + { + size_t tsize = _topsize += qsize; + _top = q; + q->_head = tsize | PINUSE_BIT; + check_top_chunk(q); + } + else if (oldfirst == _dv) + { + size_t dsize = _dvsize += qsize; + _dv = q; + q->set_size_and_pinuse_of_free_chunk(dsize); + } + else + { + if (!oldfirst->is_inuse()) + { + size_t nsize = oldfirst->chunksize(); + unlink_chunk(oldfirst, nsize); + oldfirst = (mchunkptr)oldfirst->chunk_plus_offset(nsize); + qsize += nsize; + } + q->set_free_with_pinuse(qsize, oldfirst); + insert_chunk(q, qsize); + check_free_chunk(q); + } + + check_malloced_chunk(chunk2mem(p), nb); + return chunk2mem(p); +} + +// Add a segment to hold a new noncontiguous region +void malloc_state::add_segment(char* tbase, size_t tsize, flag_t mmapped) +{ + // Determine locations and sizes of segment, fenceposts, old top + char* old_top = (char*)_top; + msegmentptr oldsp = segment_holding(old_top); + char* old_end = oldsp->_base + oldsp->_size; + size_t ssize = pad_request(sizeof(struct malloc_segment)); + char* rawsp = old_end - (ssize + 4 * sizeof(size_t) + spp_chunk_align_mask); + size_t offset = align_offset(chunk2mem(rawsp)); + char* asp = rawsp + offset; + char* csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp; + mchunkptr sp = (mchunkptr)csp; + msegmentptr ss = (msegmentptr)(chunk2mem(sp)); + mchunkptr tnext = (mchunkptr)sp->chunk_plus_offset(ssize); + mchunkptr p = tnext; + int nfences = 0; + + // reset top to new space + init_top((mchunkptr)tbase, tsize - top_foot_size()); + + // Set up segment record + assert(spp_is_aligned(ss)); + set_size_and_pinuse_of_inuse_chunk(sp, ssize); + *ss = _seg; // Push current record + _seg._base = tbase; + _seg._size = tsize; + _seg._sflags = mmapped; + _seg._next = ss; + + // Insert trailing fenceposts + for (;;) + { + mchunkptr nextp = (mchunkptr)p->chunk_plus_offset(sizeof(size_t)); + p->_head = FENCEPOST_HEAD; + ++nfences; + if ((char*)(&(nextp->_head)) < old_end) + p = nextp; + else + break; + } + assert(nfences >= 2); + + // Insert the rest of old top into a bin as an ordinary free chunk + if (csp != old_top) + { + mchunkptr q = (mchunkptr)old_top; + size_t psize = csp - old_top; + mchunkptr tn = (mchunkptr)q->chunk_plus_offset(psize); + q->set_free_with_pinuse(psize, tn); + insert_chunk(q, psize); + } + + check_top_chunk(_top); +} + +/* -------------------------- System allocation -------------------------- */ + +// Get memory from system using MMAP +void* malloc_state::sys_alloc(size_t nb) +{ + char* tbase = cmfail; + size_t tsize = 0; + flag_t mmap_flag = 0; + size_t asize; // allocation size + + mparams.ensure_initialization(); + + // Directly map large chunks, but only if already initialized + if (use_mmap() && nb >= mparams._mmap_threshold && _topsize != 0) + { + void* mem = mmap_alloc(nb); + if (mem != 0) + return mem; + } + + asize = mparams.granularity_align(nb + sys_alloc_padding()); + if (asize <= nb) + return 0; // wraparound + if (_footprint_limit != 0) + { + size_t fp = _footprint + asize; + if (fp <= _footprint || fp > _footprint_limit) + return 0; + } + + /* + Try getting memory with a call to MMAP new space (disabled if not SPP_HAVE_MMAP). + We need to request enough bytes from system to ensure + we can malloc nb bytes upon success, so pad with enough space for + top_foot, plus alignment-pad to make sure we don't lose bytes if + not on boundary, and round this up to a granularity unit. + */ + + if (SPP_HAVE_MMAP && tbase == cmfail) + { + // Try MMAP + char* mp = (char*)(SPP_CALL_MMAP(asize)); + if (mp != cmfail) + { + tbase = mp; + tsize = asize; + mmap_flag = USE_MMAP_BIT; + } + } + + if (tbase != cmfail) + { + + if ((_footprint += tsize) > _max_footprint) + _max_footprint = _footprint; + + if (!is_initialized()) + { + // first-time initialization + if (_least_addr == 0 || tbase < _least_addr) + _least_addr = tbase; + _seg._base = tbase; + _seg._size = tsize; + _seg._sflags = mmap_flag; + _magic = mparams._magic; + _release_checks = SPP_MAX_RELEASE_CHECK_RATE; + init_bins(); + + // Offset top by embedded malloc_state + mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk(); + init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size()); + } + + else + { + // Try to merge with an existing segment + msegmentptr sp = &_seg; + // Only consider most recent segment if traversal suppressed + while (sp != 0 && tbase != sp->_base + sp->_size) + sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next; + if (sp != 0 && + !sp->is_extern_segment() && + (sp->_sflags & USE_MMAP_BIT) == mmap_flag && + segment_holds(sp, _top)) + { + // append + sp->_size += tsize; + init_top(_top, _topsize + tsize); + } + else + { + if (tbase < _least_addr) + _least_addr = tbase; + sp = &_seg; + while (sp != 0 && sp->_base != tbase + tsize) + sp = (SPP_NO_SEGMENT_TRAVERSAL) ? 0 : sp->_next; + if (sp != 0 && + !sp->is_extern_segment() && + (sp->_sflags & USE_MMAP_BIT) == mmap_flag) + { + char* oldbase = sp->_base; + sp->_base = tbase; + sp->_size += tsize; + return prepend_alloc(tbase, oldbase, nb); + } + else + add_segment(tbase, tsize, mmap_flag); + } + } + + if (nb < _topsize) + { + // Allocate from new or extended top space + size_t rsize = _topsize -= nb; + mchunkptr p = _top; + mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb); + r->_head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(p, nb); + check_top_chunk(_top); + check_malloced_chunk(chunk2mem(p), nb); + return chunk2mem(p); + } + } + + SPP_MALLOC_FAILURE_ACTION; + return 0; +} + +/* ----------------------- system deallocation -------------------------- */ + +// Unmap and unlink any mmapped segments that don't contain used chunks +size_t malloc_state::release_unused_segments() +{ + size_t released = 0; + int nsegs = 0; + msegmentptr pred = &_seg; + msegmentptr sp = pred->_next; + while (sp != 0) + { + char* base = sp->_base; + size_t size = sp->_size; + msegmentptr next = sp->_next; + ++nsegs; + if (sp->is_mmapped_segment() && !sp->is_extern_segment()) + { + mchunkptr p = align_as_chunk(base); + size_t psize = p->chunksize(); + // Can unmap if first chunk holds entire segment and not pinned + if (!p->is_inuse() && (char*)p + psize >= base + size - top_foot_size()) + { + tchunkptr tp = (tchunkptr)p; + assert(segment_holds(sp, p)); + if (p == _dv) + { + _dv = 0; + _dvsize = 0; + } + else + unlink_large_chunk(tp); + if (SPP_CALL_MUNMAP(base, size) == 0) + { + released += size; + _footprint -= size; + // unlink obsoleted record + sp = pred; + sp->_next = next; + } + else + { + // back out if cannot unmap + insert_large_chunk(tp, psize); + } + } + } + if (SPP_NO_SEGMENT_TRAVERSAL) // scan only first segment + break; + pred = sp; + sp = next; + } + // Reset check counter + _release_checks = (((size_t) nsegs > (size_t) SPP_MAX_RELEASE_CHECK_RATE) ? + (size_t) nsegs : (size_t) SPP_MAX_RELEASE_CHECK_RATE); + return released; +} + +int malloc_state::sys_trim(size_t pad) +{ + size_t released = 0; + mparams.ensure_initialization(); + if (pad < MAX_REQUEST && is_initialized()) + { + pad += top_foot_size(); // ensure enough room for segment overhead + + if (_topsize > pad) + { + // Shrink top space in _granularity - size units, keeping at least one + size_t unit = mparams._granularity; + size_t extra = ((_topsize - pad + (unit - 1)) / unit - + 1) * unit; + msegmentptr sp = segment_holding((char*)_top); + + if (!sp->is_extern_segment()) + { + if (sp->is_mmapped_segment()) + { + if (SPP_HAVE_MMAP && + sp->_size >= extra && + !has_segment_link(sp)) + { + // can't shrink if pinned + size_t newsize = sp->_size - extra; + (void)newsize; // placate people compiling -Wunused-variable + // Prefer mremap, fall back to munmap + if ((SPP_CALL_MREMAP(sp->_base, sp->_size, newsize, 0) != mfail) || + (SPP_CALL_MUNMAP(sp->_base + newsize, extra) == 0)) + released = extra; + } + } + } + + if (released != 0) + { + sp->_size -= released; + _footprint -= released; + init_top(_top, _topsize - released); + check_top_chunk(_top); + } + } + + // Unmap any unused mmapped segments + if (SPP_HAVE_MMAP) + released += release_unused_segments(); + + // On failure, disable autotrim to avoid repeated failed future calls + if (released == 0 && _topsize > _trim_check) + _trim_check = spp_max_size_t; + } + + return (released != 0) ? 1 : 0; +} + +/* Consolidate and bin a chunk. Differs from exported versions + of free mainly in that the chunk need not be marked as inuse. +*/ +void malloc_state::dispose_chunk(mchunkptr p, size_t psize) +{ + mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize); + if (!p->pinuse()) + { + mchunkptr prev; + size_t prevsize = p->_prev_foot; + if (p->is_mmapped()) + { + psize += prevsize + SPP_MMAP_FOOT_PAD; + if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0) + _footprint -= psize; + return; + } + prev = (mchunkptr)p->chunk_minus_offset(prevsize); + psize += prevsize; + p = prev; + if (rtcheck(ok_address(prev))) + { + // consolidate backward + if (p != _dv) + unlink_chunk(p, prevsize); + else if ((next->_head & INUSE_BITS) == INUSE_BITS) + { + _dvsize = psize; + p->set_free_with_pinuse(psize, next); + return; + } + } + else + { + SPP_ABORT; + return; + } + } + if (rtcheck(ok_address(next))) + { + if (!next->cinuse()) + { + // consolidate forward + if (next == _top) + { + size_t tsize = _topsize += psize; + _top = p; + p->_head = tsize | PINUSE_BIT; + if (p == _dv) + { + _dv = 0; + _dvsize = 0; + } + return; + } + else if (next == _dv) + { + size_t dsize = _dvsize += psize; + _dv = p; + p->set_size_and_pinuse_of_free_chunk(dsize); + return; + } + else + { + size_t nsize = next->chunksize(); + psize += nsize; + unlink_chunk(next, nsize); + p->set_size_and_pinuse_of_free_chunk(psize); + if (p == _dv) + { + _dvsize = psize; + return; + } + } + } + else + p->set_free_with_pinuse(psize, next); + insert_chunk(p, psize); + } + else + SPP_ABORT; +} + +/* ---------------------------- malloc --------------------------- */ + +// allocate a large request from the best fitting chunk in a treebin +void* malloc_state::tmalloc_large(size_t nb) +{ + tchunkptr v = 0; + size_t rsize = -nb; // Unsigned negation + tchunkptr t; + bindex_t idx = compute_tree_index(nb); + if ((t = *treebin_at(idx)) != 0) + { + // Traverse tree for this bin looking for node with size == nb + size_t sizebits = nb << leftshift_for_tree_index(idx); + tchunkptr rst = 0; // The deepest untaken right subtree + for (;;) + { + tchunkptr rt; + size_t trem = t->chunksize() - nb; + if (trem < rsize) + { + v = t; + if ((rsize = trem) == 0) + break; + } + rt = t->_child[1]; + t = t->_child[(sizebits >> (spp_size_t_bitsize - 1)) & 1]; + if (rt != 0 && rt != t) + rst = rt; + if (t == 0) + { + t = rst; // set t to least subtree holding sizes > nb + break; + } + sizebits <<= 1; + } + } + if (t == 0 && v == 0) + { + // set t to root of next non-empty treebin + binmap_t leftbits = left_bits(idx2bit(idx)) & _treemap; + if (leftbits != 0) + { + binmap_t leastbit = least_bit(leftbits); + bindex_t i = compute_bit2idx(leastbit); + t = *treebin_at(i); + } + } + + while (t != 0) + { + // find smallest of tree or subtree + size_t trem = t->chunksize() - nb; + if (trem < rsize) + { + rsize = trem; + v = t; + } + t = t->leftmost_child(); + } + + // If dv is a better fit, return 0 so malloc will use it + if (v != 0 && rsize < (size_t)(_dvsize - nb)) + { + if (rtcheck(ok_address(v))) + { + // split + mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb); + assert(v->chunksize() == rsize + nb); + if (rtcheck(ok_next(v, r))) + { + unlink_large_chunk(v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(v, (rsize + nb)); + else + { + set_size_and_pinuse_of_inuse_chunk(v, nb); + r->set_size_and_pinuse_of_free_chunk(rsize); + insert_chunk(r, rsize); + } + return chunk2mem(v); + } + } + SPP_ABORT; + } + return 0; +} + +// allocate a small request from the best fitting chunk in a treebin +void* malloc_state::tmalloc_small(size_t nb) +{ + tchunkptr t, v; + size_t rsize; + binmap_t leastbit = least_bit(_treemap); + bindex_t i = compute_bit2idx(leastbit); + v = t = *treebin_at(i); + rsize = t->chunksize() - nb; + + while ((t = t->leftmost_child()) != 0) + { + size_t trem = t->chunksize() - nb; + if (trem < rsize) + { + rsize = trem; + v = t; + } + } + + if (rtcheck(ok_address(v))) + { + mchunkptr r = (mchunkptr)v->chunk_plus_offset(nb); + assert(v->chunksize() == rsize + nb); + if (rtcheck(ok_next(v, r))) + { + unlink_large_chunk(v); + if (rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(v, (rsize + nb)); + else + { + set_size_and_pinuse_of_inuse_chunk(v, nb); + r->set_size_and_pinuse_of_free_chunk(rsize); + replace_dv(r, rsize); + } + return chunk2mem(v); + } + } + + SPP_ABORT; + return 0; +} + +/* ---------------------------- malloc --------------------------- */ + +void* malloc_state::_malloc(size_t bytes) +{ + if (1) + { + void* mem; + size_t nb; + if (bytes <= MAX_SMALL_REQUEST) + { + bindex_t idx; + binmap_t smallbits; + nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes); + idx = small_index(nb); + smallbits = _smallmap >> idx; + + if ((smallbits & 0x3U) != 0) + { + // Remainderless fit to a smallbin. + mchunkptr b, p; + idx += ~smallbits & 1; // Uses next bin if idx empty + b = smallbin_at(idx); + p = b->_fd; + assert(p->chunksize() == small_index2size(idx)); + unlink_first_small_chunk(b, p, idx); + set_inuse_and_pinuse(p, small_index2size(idx)); + mem = chunk2mem(p); + check_malloced_chunk(mem, nb); + goto postaction; + } + + else if (nb > _dvsize) + { + if (smallbits != 0) + { + // Use chunk in next nonempty smallbin + mchunkptr b, p, r; + size_t rsize; + binmap_t leftbits = (smallbits << idx) & left_bits(malloc_state::idx2bit(idx)); + binmap_t leastbit = least_bit(leftbits); + bindex_t i = compute_bit2idx(leastbit); + b = smallbin_at(i); + p = b->_fd; + assert(p->chunksize() == small_index2size(i)); + unlink_first_small_chunk(b, p, i); + rsize = small_index2size(i) - nb; + // Fit here cannot be remainderless if 4byte sizes + if (sizeof(size_t) != 4 && rsize < MIN_CHUNK_SIZE) + set_inuse_and_pinuse(p, small_index2size(i)); + else + { + set_size_and_pinuse_of_inuse_chunk(p, nb); + r = (mchunkptr)p->chunk_plus_offset(nb); + r->set_size_and_pinuse_of_free_chunk(rsize); + replace_dv(r, rsize); + } + mem = chunk2mem(p); + check_malloced_chunk(mem, nb); + goto postaction; + } + + else if (_treemap != 0 && (mem = tmalloc_small(nb)) != 0) + { + check_malloced_chunk(mem, nb); + goto postaction; + } + } + } + else if (bytes >= MAX_REQUEST) + nb = spp_max_size_t; // Too big to allocate. Force failure (in sys alloc) + else + { + nb = pad_request(bytes); + if (_treemap != 0 && (mem = tmalloc_large(nb)) != 0) + { + check_malloced_chunk(mem, nb); + goto postaction; + } + } + + if (nb <= _dvsize) + { + size_t rsize = _dvsize - nb; + mchunkptr p = _dv; + if (rsize >= MIN_CHUNK_SIZE) + { + // split dv + mchunkptr r = _dv = (mchunkptr)p->chunk_plus_offset(nb); + _dvsize = rsize; + r->set_size_and_pinuse_of_free_chunk(rsize); + set_size_and_pinuse_of_inuse_chunk(p, nb); + } + else // exhaust dv + { + size_t dvs = _dvsize; + _dvsize = 0; + _dv = 0; + set_inuse_and_pinuse(p, dvs); + } + mem = chunk2mem(p); + check_malloced_chunk(mem, nb); + goto postaction; + } + + else if (nb < _topsize) + { + // Split top + size_t rsize = _topsize -= nb; + mchunkptr p = _top; + mchunkptr r = _top = (mchunkptr)p->chunk_plus_offset(nb); + r->_head = rsize | PINUSE_BIT; + set_size_and_pinuse_of_inuse_chunk(p, nb); + mem = chunk2mem(p); + check_top_chunk(_top); + check_malloced_chunk(mem, nb); + goto postaction; + } + + mem = sys_alloc(nb); + +postaction: + return mem; + } + + return 0; +} + +/* ---------------------------- free --------------------------- */ + +void malloc_state::_free(mchunkptr p) +{ + if (1) + { + check_inuse_chunk(p); + if (rtcheck(ok_address(p) && ok_inuse(p))) + { + size_t psize = p->chunksize(); + mchunkptr next = (mchunkptr)p->chunk_plus_offset(psize); + if (!p->pinuse()) + { + size_t prevsize = p->_prev_foot; + if (p->is_mmapped()) + { + psize += prevsize + SPP_MMAP_FOOT_PAD; + if (SPP_CALL_MUNMAP((char*)p - prevsize, psize) == 0) + _footprint -= psize; + goto postaction; + } + else + { + mchunkptr prev = (mchunkptr)p->chunk_minus_offset(prevsize); + psize += prevsize; + p = prev; + if (rtcheck(ok_address(prev))) + { + // consolidate backward + if (p != _dv) + unlink_chunk(p, prevsize); + else if ((next->_head & INUSE_BITS) == INUSE_BITS) + { + _dvsize = psize; + p->set_free_with_pinuse(psize, next); + goto postaction; + } + } + else + goto erroraction; + } + } + + if (rtcheck(ok_next(p, next) && ok_pinuse(next))) + { + if (!next->cinuse()) + { + // consolidate forward + if (next == _top) + { + size_t tsize = _topsize += psize; + _top = p; + p->_head = tsize | PINUSE_BIT; + if (p == _dv) + { + _dv = 0; + _dvsize = 0; + } + if (should_trim(tsize)) + sys_trim(0); + goto postaction; + } + else if (next == _dv) + { + size_t dsize = _dvsize += psize; + _dv = p; + p->set_size_and_pinuse_of_free_chunk(dsize); + goto postaction; + } + else + { + size_t nsize = next->chunksize(); + psize += nsize; + unlink_chunk(next, nsize); + p->set_size_and_pinuse_of_free_chunk(psize); + if (p == _dv) + { + _dvsize = psize; + goto postaction; + } + } + } + else + p->set_free_with_pinuse(psize, next); + + if (is_small(psize)) + { + insert_small_chunk(p, psize); + check_free_chunk(p); + } + else + { + tchunkptr tp = (tchunkptr)p; + insert_large_chunk(tp, psize); + check_free_chunk(p); + if (--_release_checks == 0) + release_unused_segments(); + } + goto postaction; + } + } +erroraction: + SPP_USAGE_ERROR_ACTION(this, p); +postaction: + ; + } +} + +/* ------------ Internal support for realloc, memalign, etc -------------- */ + +// Try to realloc; only in-place unless can_move true +mchunkptr malloc_state::try_realloc_chunk(mchunkptr p, size_t nb, int can_move) +{ + mchunkptr newp = 0; + size_t oldsize = p->chunksize(); + mchunkptr next = (mchunkptr)p->chunk_plus_offset(oldsize); + if (rtcheck(ok_address(p) && ok_inuse(p) && + ok_next(p, next) && ok_pinuse(next))) + { + if (p->is_mmapped()) + newp = mmap_resize(p, nb, can_move); + else if (oldsize >= nb) + { + // already big enough + size_t rsize = oldsize - nb; + if (rsize >= MIN_CHUNK_SIZE) + { + // split off remainder + mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb); + set_inuse(p, nb); + set_inuse(r, rsize); + dispose_chunk(r, rsize); + } + newp = p; + } + else if (next == _top) + { + // extend into top + if (oldsize + _topsize > nb) + { + size_t newsize = oldsize + _topsize; + size_t newtopsize = newsize - nb; + mchunkptr newtop = (mchunkptr)p->chunk_plus_offset(nb); + set_inuse(p, nb); + newtop->_head = newtopsize | PINUSE_BIT; + _top = newtop; + _topsize = newtopsize; + newp = p; + } + } + else if (next == _dv) + { + // extend into dv + size_t dvs = _dvsize; + if (oldsize + dvs >= nb) + { + size_t dsize = oldsize + dvs - nb; + if (dsize >= MIN_CHUNK_SIZE) + { + mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb); + mchunkptr n = (mchunkptr)r->chunk_plus_offset(dsize); + set_inuse(p, nb); + r->set_size_and_pinuse_of_free_chunk(dsize); + n->clear_pinuse(); + _dvsize = dsize; + _dv = r; + } + else + { + // exhaust dv + size_t newsize = oldsize + dvs; + set_inuse(p, newsize); + _dvsize = 0; + _dv = 0; + } + newp = p; + } + } + else if (!next->cinuse()) + { + // extend into next free chunk + size_t nextsize = next->chunksize(); + if (oldsize + nextsize >= nb) + { + size_t rsize = oldsize + nextsize - nb; + unlink_chunk(next, nextsize); + if (rsize < MIN_CHUNK_SIZE) + { + size_t newsize = oldsize + nextsize; + set_inuse(p, newsize); + } + else + { + mchunkptr r = (mchunkptr)p->chunk_plus_offset(nb); + set_inuse(p, nb); + set_inuse(r, rsize); + dispose_chunk(r, rsize); + } + newp = p; + } + } + } + else + SPP_USAGE_ERROR_ACTION(m, chunk2mem(p)); + return newp; +} + +void* malloc_state::internal_memalign(size_t alignment, size_t bytes) +{ + void* mem = 0; + if (alignment < MIN_CHUNK_SIZE) // must be at least a minimum chunk size + alignment = MIN_CHUNK_SIZE; + if ((alignment & (alignment - 1)) != 0) + { + // Ensure a power of 2 + size_t a = SPP_MALLOC_ALIGNMENT << 1; + while (a < alignment) + a <<= 1; + alignment = a; + } + if (bytes >= MAX_REQUEST - alignment) + SPP_MALLOC_FAILURE_ACTION; + else + { + size_t nb = request2size(bytes); + size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; + mem = internal_malloc(req); + if (mem != 0) + { + mchunkptr p = mem2chunk(mem); + if ((((size_t)(mem)) & (alignment - 1)) != 0) + { + // misaligned + /* + Find an aligned spot inside chunk. Since we need to give + back leading space in a chunk of at least MIN_CHUNK_SIZE, if + the first calculation places us at a spot with less than + MIN_CHUNK_SIZE leader, we can move to the next aligned spot. + We've allocated enough total room so that this is always + possible. + */ + char* br = (char*)mem2chunk((void *)(((size_t)((char*)mem + alignment - 1)) & + -alignment)); + char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE) ? + br : br + alignment; + mchunkptr newp = (mchunkptr)pos; + size_t leadsize = pos - (char*)(p); + size_t newsize = p->chunksize() - leadsize; + + if (p->is_mmapped()) + { + // For mmapped chunks, just adjust offset + newp->_prev_foot = p->_prev_foot + leadsize; + newp->_head = newsize; + } + else + { + // Otherwise, give back leader, use the rest + set_inuse(newp, newsize); + set_inuse(p, leadsize); + dispose_chunk(p, leadsize); + } + p = newp; + } + + // Give back spare room at the end + if (!p->is_mmapped()) + { + size_t size = p->chunksize(); + if (size > nb + MIN_CHUNK_SIZE) + { + size_t remainder_size = size - nb; + mchunkptr remainder = (mchunkptr)p->chunk_plus_offset(nb); + set_inuse(p, nb); + set_inuse(remainder, remainder_size); + dispose_chunk(remainder, remainder_size); + } + } + + mem = chunk2mem(p); + assert(p->chunksize() >= nb); + assert(((size_t)mem & (alignment - 1)) == 0); + check_inuse_chunk(p); + } + } + return mem; +} + +/* + Common support for independent_X routines, handling + all of the combinations that can result. + The opts arg has: + bit 0 set if all elements are same size (using sizes[0]) + bit 1 set if elements should be zeroed +*/ +void** malloc_state::ialloc(size_t n_elements, size_t* sizes, int opts, + void* chunks[]) +{ + + size_t element_size; // chunksize of each element, if all same + size_t contents_size; // total size of elements + size_t array_size; // request size of pointer array + void* mem; // malloced aggregate space + mchunkptr p; // corresponding chunk + size_t remainder_size; // remaining bytes while splitting + void** marray; // either "chunks" or malloced ptr array + mchunkptr array_chunk; // chunk for malloced ptr array + flag_t was_enabled; // to disable mmap + size_t size; + size_t i; + + mparams.ensure_initialization(); + // compute array length, if needed + if (chunks != 0) + { + if (n_elements == 0) + return chunks; // nothing to do + marray = chunks; + array_size = 0; + } + else + { + // if empty req, must still return chunk representing empty array + if (n_elements == 0) + return (void**)internal_malloc(0); + marray = 0; + array_size = request2size(n_elements * (sizeof(void*))); + } + + // compute total element size + if (opts & 0x1) + { + // all-same-size + element_size = request2size(*sizes); + contents_size = n_elements * element_size; + } + else + { + // add up all the sizes + element_size = 0; + contents_size = 0; + for (i = 0; i != n_elements; ++i) + contents_size += request2size(sizes[i]); + } + + size = contents_size + array_size; + + /* + Allocate the aggregate chunk. First disable direct-mmapping so + malloc won't use it, since we would not be able to later + free/realloc space internal to a segregated mmap region. + */ + was_enabled = use_mmap(); + disable_mmap(); + mem = internal_malloc(size - CHUNK_OVERHEAD); + if (was_enabled) + enable_mmap(); + if (mem == 0) + return 0; + + p = mem2chunk(mem); + remainder_size = p->chunksize(); + + assert(!p->is_mmapped()); + + if (opts & 0x2) + { + // optionally clear the elements + memset((size_t*)mem, 0, remainder_size - sizeof(size_t) - array_size); + } + + // If not provided, allocate the pointer array as final part of chunk + if (marray == 0) + { + size_t array_chunk_size; + array_chunk = (mchunkptr)p->chunk_plus_offset(contents_size); + array_chunk_size = remainder_size - contents_size; + marray = (void**)(chunk2mem(array_chunk)); + set_size_and_pinuse_of_inuse_chunk(array_chunk, array_chunk_size); + remainder_size = contents_size; + } + + // split out elements + for (i = 0; ; ++i) + { + marray[i] = chunk2mem(p); + if (i != n_elements - 1) + { + if (element_size != 0) + size = element_size; + else + size = request2size(sizes[i]); + remainder_size -= size; + set_size_and_pinuse_of_inuse_chunk(p, size); + p = (mchunkptr)p->chunk_plus_offset(size); + } + else + { + // the final element absorbs any overallocation slop + set_size_and_pinuse_of_inuse_chunk(p, remainder_size); + break; + } + } + +#if SPP_DEBUG + if (marray != chunks) + { + // final element must have exactly exhausted chunk + if (element_size != 0) + assert(remainder_size == element_size); + else + assert(remainder_size == request2size(sizes[i])); + check_inuse_chunk(mem2chunk(marray)); + } + for (i = 0; i != n_elements; ++i) + check_inuse_chunk(mem2chunk(marray[i])); + +#endif + + return marray; +} + +/* Try to free all pointers in the given array. + Note: this could be made faster, by delaying consolidation, + at the price of disabling some user integrity checks, We + still optimize some consolidations by combining adjacent + chunks before freeing, which will occur often if allocated + with ialloc or the array is sorted. +*/ +size_t malloc_state::internal_bulk_free(void* array[], size_t nelem) +{ + size_t unfreed = 0; + if (1) + { + void** a; + void** fence = &(array[nelem]); + for (a = array; a != fence; ++a) + { + void* mem = *a; + if (mem != 0) + { + mchunkptr p = mem2chunk(mem); + size_t psize = p->chunksize(); +#if SPP_FOOTERS + if (get_mstate_for(p) != m) + { + ++unfreed; + continue; + } +#endif + check_inuse_chunk(p); + *a = 0; + if (rtcheck(ok_address(p) && ok_inuse(p))) + { + void ** b = a + 1; // try to merge with next chunk + mchunkptr next = (mchunkptr)p->next_chunk(); + if (b != fence && *b == chunk2mem(next)) + { + size_t newsize = next->chunksize() + psize; + set_inuse(p, newsize); + *b = chunk2mem(p); + } + else + dispose_chunk(p, psize); + } + else + { + SPP_ABORT; + break; + } + } + } + if (should_trim(_topsize)) + sys_trim(0); + } + return unfreed; +} + +void malloc_state::init(char* tbase, size_t tsize) +{ + _seg._base = _least_addr = tbase; + _seg._size = _footprint = _max_footprint = tsize; + _magic = mparams._magic; + _release_checks = SPP_MAX_RELEASE_CHECK_RATE; + _mflags = mparams._default_mflags; + _extp = 0; + _exts = 0; + disable_contiguous(); + init_bins(); + mchunkptr mn = (mchunkptr)mem2chunk(this)->next_chunk(); + init_top(mn, (size_t)((tbase + tsize) - (char*)mn) - top_foot_size()); + check_top_chunk(_top); +} + +/* Traversal */ +#if SPP_MALLOC_INSPECT_ALL +void malloc_state::internal_inspect_all(void(*handler)(void *start, void *end, + size_t used_bytes, + void* callback_arg), + void* arg) +{ + if (is_initialized()) + { + mchunkptr top = top; + msegmentptr s; + for (s = &seg; s != 0; s = s->next) + { + mchunkptr q = align_as_chunk(s->base); + while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) + { + mchunkptr next = (mchunkptr)q->next_chunk(); + size_t sz = q->chunksize(); + size_t used; + void* start; + if (q->is_inuse()) + { + used = sz - CHUNK_OVERHEAD; // must not be mmapped + start = chunk2mem(q); + } + else + { + used = 0; + if (is_small(sz)) + { + // offset by possible bookkeeping + start = (void*)((char*)q + sizeof(struct malloc_chunk)); + } + else + start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); + } + if (start < (void*)next) // skip if all space is bookkeeping + handler(start, next, used, arg); + if (q == top) + break; + q = next; + } + } + } +} +#endif // SPP_MALLOC_INSPECT_ALL + + + +/* ----------------------------- user mspaces ---------------------------- */ + +static mstate init_user_mstate(char* tbase, size_t tsize) +{ + size_t msize = pad_request(sizeof(malloc_state)); + mchunkptr msp = align_as_chunk(tbase); + mstate m = (mstate)(chunk2mem(msp)); + memset(m, 0, msize); + msp->_head = (msize | INUSE_BITS); + m->init(tbase, tsize); + return m; +} + +SPP_API mspace create_mspace(size_t capacity, int locked) +{ + mstate m = 0; + size_t msize; + mparams.ensure_initialization(); + msize = pad_request(sizeof(malloc_state)); + if (capacity < (size_t) - (msize + top_foot_size() + mparams._page_size)) + { + size_t rs = ((capacity == 0) ? mparams._granularity : + (capacity + top_foot_size() + msize)); + size_t tsize = mparams.granularity_align(rs); + char* tbase = (char*)(SPP_CALL_MMAP(tsize)); + if (tbase != cmfail) + { + m = init_user_mstate(tbase, tsize); + m->_seg._sflags = USE_MMAP_BIT; + m->set_lock(locked); + } + } + return (mspace)m; +} + +SPP_API size_t destroy_mspace(mspace msp) +{ + size_t freed = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + { + msegmentptr sp = &ms->_seg; + while (sp != 0) + { + char* base = sp->_base; + size_t size = sp->_size; + flag_t flag = sp->_sflags; + (void)base; // placate people compiling -Wunused-variable + sp = sp->_next; + if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && + SPP_CALL_MUNMAP(base, size) == 0) + freed += size; + } + } + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return freed; +} + +/* ---------------------------- mspace versions of malloc/calloc/free routines -------------------- */ +SPP_API void* mspace_malloc(mspace msp, size_t bytes) +{ + mstate ms = (mstate)msp; + if (!ms->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(ms, ms); + return 0; + } + return ms->_malloc(bytes); +} + +SPP_API void mspace_free(mspace msp, void* mem) +{ + if (mem != 0) + { + mchunkptr p = mem2chunk(mem); +#if SPP_FOOTERS + mstate fm = get_mstate_for(p); + (void)msp; // placate people compiling -Wunused +#else + mstate fm = (mstate)msp; +#endif + if (!fm->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(fm, p); + return; + } + fm->_free(p); + } +} + +SPP_API void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) +{ + void* mem; + size_t req = 0; + mstate ms = (mstate)msp; + if (!ms->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(ms, ms); + return 0; + } + if (n_elements != 0) + { + req = n_elements * elem_size; + if (((n_elements | elem_size) & ~(size_t)0xffff) && + (req / n_elements != elem_size)) + req = spp_max_size_t; // force downstream failure on overflow + } + mem = ms->internal_malloc(req); + if (mem != 0 && mem2chunk(mem)->calloc_must_clear()) + memset(mem, 0, req); + return mem; +} + +SPP_API void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) +{ + void* mem = 0; + if (oldmem == 0) + mem = mspace_malloc(msp, bytes); + else if (bytes >= MAX_REQUEST) + SPP_MALLOC_FAILURE_ACTION; +#ifdef REALLOC_ZERO_BYTES_FREES + else if (bytes == 0) + mspace_free(msp, oldmem); +#endif + else + { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! SPP_FOOTERS + mstate m = (mstate)msp; +#else + mstate m = get_mstate_for(oldp); + if (!m->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif + if (1) + { + mchunkptr newp = m->try_realloc_chunk(oldp, nb, 1); + if (newp != 0) + { + m->check_inuse_chunk(newp); + mem = chunk2mem(newp); + } + else + { + mem = mspace_malloc(m, bytes); + if (mem != 0) + { + size_t oc = oldp->chunksize() - oldp->overhead_for(); + memcpy(mem, oldmem, (oc < bytes) ? oc : bytes); + mspace_free(m, oldmem); + } + } + } + } + return mem; +} + +#if 0 + +SPP_API mspace create_mspace_with_base(void* base, size_t capacity, int locked) +{ + mstate m = 0; + size_t msize; + mparams.ensure_initialization(); + msize = pad_request(sizeof(malloc_state)); + if (capacity > msize + top_foot_size() && + capacity < (size_t) - (msize + top_foot_size() + mparams._page_size)) + { + m = init_user_mstate((char*)base, capacity); + m->_seg._sflags = EXTERN_BIT; + m->set_lock(locked); + } + return (mspace)m; +} + +SPP_API int mspace_track_large_chunks(mspace msp, int enable) +{ + int ret = 0; + mstate ms = (mstate)msp; + if (1) + { + if (!ms->use_mmap()) + ret = 1; + if (!enable) + ms->enable_mmap(); + else + ms->disable_mmap(); + } + return ret; +} + +SPP_API void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) +{ + void* mem = 0; + if (oldmem != 0) + { + if (bytes >= MAX_REQUEST) + SPP_MALLOC_FAILURE_ACTION; + else + { + size_t nb = request2size(bytes); + mchunkptr oldp = mem2chunk(oldmem); +#if ! SPP_FOOTERS + mstate m = (mstate)msp; +#else + mstate m = get_mstate_for(oldp); + (void)msp; // placate people compiling -Wunused + if (!m->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(m, oldmem); + return 0; + } +#endif + if (1) + { + mchunkptr newp = m->try_realloc_chunk(oldp, nb, 0); + if (newp == oldp) + { + m->check_inuse_chunk(newp); + mem = oldmem; + } + } + } + } + return mem; +} + +SPP_API void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) +{ + mstate ms = (mstate)msp; + if (!ms->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(ms, ms); + return 0; + } + if (alignment <= SPP_MALLOC_ALIGNMENT) + return mspace_malloc(msp, bytes); + return ms->internal_memalign(alignment, bytes); +} + +SPP_API void** mspace_independent_calloc(mspace msp, size_t n_elements, + size_t elem_size, void* chunks[]) +{ + size_t sz = elem_size; // serves as 1-element array + mstate ms = (mstate)msp; + if (!ms->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(ms, ms); + return 0; + } + return ms->ialloc(n_elements, &sz, 3, chunks); +} + +SPP_API void** mspace_independent_comalloc(mspace msp, size_t n_elements, + size_t sizes[], void* chunks[]) +{ + mstate ms = (mstate)msp; + if (!ms->ok_magic()) + { + SPP_USAGE_ERROR_ACTION(ms, ms); + return 0; + } + return ms->ialloc(n_elements, sizes, 0, chunks); +} + +#endif + +SPP_API size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) +{ + return ((mstate)msp)->internal_bulk_free(array, nelem); +} + +#if SPP_MALLOC_INSPECT_ALL +SPP_API void mspace_inspect_all(mspace msp, + void(*handler)(void *start, + void *end, + size_t used_bytes, + void* callback_arg), + void* arg) +{ + mstate ms = (mstate)msp; + if (ms->ok_magic()) + internal_inspect_all(ms, handler, arg); + else + SPP_USAGE_ERROR_ACTION(ms, ms); +} +#endif + +SPP_API int mspace_trim(mspace msp, size_t pad) +{ + int result = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + result = ms->sys_trim(pad); + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return result; +} + +SPP_API size_t mspace_footprint(mspace msp) +{ + size_t result = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + result = ms->_footprint; + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return result; +} + +SPP_API size_t mspace_max_footprint(mspace msp) +{ + size_t result = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + result = ms->_max_footprint; + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return result; +} + +SPP_API size_t mspace_footprint_limit(mspace msp) +{ + size_t result = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + { + size_t maf = ms->_footprint_limit; + result = (maf == 0) ? spp_max_size_t : maf; + } + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return result; +} + +SPP_API size_t mspace_set_footprint_limit(mspace msp, size_t bytes) +{ + size_t result = 0; + mstate ms = (mstate)msp; + if (ms->ok_magic()) + { + if (bytes == 0) + result = mparams.granularity_align(1); // Use minimal size + if (bytes == spp_max_size_t) + result = 0; // disable + else + result = mparams.granularity_align(bytes); + ms->_footprint_limit = result; + } + else + SPP_USAGE_ERROR_ACTION(ms, ms); + return result; +} + +SPP_API size_t mspace_usable_size(const void* mem) +{ + if (mem != 0) + { + mchunkptr p = mem2chunk(mem); + if (p->is_inuse()) + return p->chunksize() - p->overhead_for(); + } + return 0; +} + +SPP_API int mspace_mallopt(int param_number, int value) +{ + return mparams.change(param_number, value); +} + +} // spp_ namespace + + +#endif // SPP_EXCLUDE_IMPLEMENTATION + +#endif // spp_dlalloc__h_ diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_memory.h b/resources/3rdparty/sparsepp/sparsepp/spp_memory.h new file mode 100755 index 000000000..f208e73cb --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_memory.h @@ -0,0 +1,121 @@ +#if !defined(spp_memory_h_guard) +#define spp_memory_h_guard + +#include +#include +#include + +#if defined(_WIN32) || defined( __CYGWIN__) + #define SPP_WIN +#endif + +#ifdef SPP_WIN + #include + #include + #undef min + #undef max +#else + #include + #include +#endif + +namespace spp +{ + uint64_t GetSystemMemory() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPageFile); +#else + struct sysinfo memInfo; + sysinfo (&memInfo); + auto totalVirtualMem = memInfo.totalram; + + totalVirtualMem += memInfo.totalswap; + totalVirtualMem *= memInfo.mem_unit; + return static_cast(totalVirtualMem); +#endif + } + + uint64_t GetTotalMemoryUsed() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPageFile - memInfo.ullAvailPageFile); +#else + struct sysinfo memInfo; + sysinfo(&memInfo); + auto virtualMemUsed = memInfo.totalram - memInfo.freeram; + + virtualMemUsed += memInfo.totalswap - memInfo.freeswap; + virtualMemUsed *= memInfo.mem_unit; + + return static_cast(virtualMemUsed); +#endif + } + + uint64_t GetProcessMemoryUsed() + { +#ifdef SPP_WIN + PROCESS_MEMORY_COUNTERS_EX pmc; + GetProcessMemoryInfo(GetCurrentProcess(), reinterpret_cast(&pmc), sizeof(pmc)); + return static_cast(pmc.PrivateUsage); +#else + auto parseLine = + [](char* line)->int + { + auto i = strlen(line); + + while(*line < '0' || *line > '9') + { + line++; + } + + line[i-3] = '\0'; + i = atoi(line); + return i; + }; + + auto file = fopen("/proc/self/status", "r"); + auto result = -1; + char line[128]; + + while(fgets(line, 128, file) != nullptr) + { + if(strncmp(line, "VmSize:", 7) == 0) + { + result = parseLine(line); + break; + } + } + + fclose(file); + return static_cast(result) * 1024; +#endif + } + + uint64_t GetPhysicalMemory() + { +#ifdef SPP_WIN + MEMORYSTATUSEX memInfo; + memInfo.dwLength = sizeof(MEMORYSTATUSEX); + GlobalMemoryStatusEx(&memInfo); + return static_cast(memInfo.ullTotalPhys); +#else + struct sysinfo memInfo; + sysinfo(&memInfo); + + auto totalPhysMem = memInfo.totalram; + + totalPhysMem *= memInfo.mem_unit; + return static_cast(totalPhysMem); +#endif + } + +} + +#endif // spp_memory_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h b/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h new file mode 100755 index 000000000..28c4588e4 --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_smartptr.h @@ -0,0 +1,76 @@ +#if !defined(spp_smartptr_h_guard) +#define spp_smartptr_h_guard + + +/* ----------------------------------------------------------------------------------------------- + * quick version of intrusive_ptr + * ----------------------------------------------------------------------------------------------- + */ + +#include +#include + +// ------------------------------------------------------------------------ +class spp_rc +{ +public: + spp_rc() : _cnt(0) {} + spp_rc(const spp_rc &) : _cnt(0) {} + void increment() const { ++_cnt; } + void decrement() const { assert(_cnt); if (--_cnt == 0) delete this; } + unsigned count() const { return _cnt; } + +protected: + virtual ~spp_rc() {} + +private: + mutable unsigned _cnt; +}; + +// ------------------------------------------------------------------------ +template +class spp_sptr +{ +public: + spp_sptr() : _p(0) {} + spp_sptr(T *p) : _p(p) { if (_p) _p->increment(); } + spp_sptr(const spp_sptr &o) : _p(o._p) { if (_p) _p->increment(); } +#ifndef SPP_NO_CXX11_RVALUE_REFERENCES + spp_sptr(spp_sptr &&o) : _p(o._p) { o._p = (T *)0; } + spp_sptr& operator=(spp_sptr &&o) + { + if (_p) _p->decrement(); + _p = o._p; + o._p = (T *)0; + } +#endif + ~spp_sptr() { if (_p) _p->decrement(); } + spp_sptr& operator=(const spp_sptr &o) { reset(o._p); return *this; } + T* get() const { return _p; } + void swap(spp_sptr &o) { T *tmp = _p; _p = o._p; o._p = tmp; } + void reset(const T *p = 0) + { + if (p == _p) + return; + if (_p) _p->decrement(); + _p = (T *)p; + if (_p) _p->increment(); + } + T* operator->() const { return const_cast(_p); } + bool operator!() const { return _p == 0; } + +private: + T *_p; +}; + +// ------------------------------------------------------------------------ +namespace std +{ + template + inline void swap(spp_sptr &a, spp_sptr &b) + { + a.swap(b); + } +} + +#endif // spp_smartptr_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h b/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h new file mode 100755 index 000000000..500d3d35b --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_stdint.h @@ -0,0 +1,16 @@ +#if !defined(spp_stdint_h_guard) +#define spp_stdint_h_guard + +#include + +#if defined(SPP_HAS_CSTDINT) && (__cplusplus >= 201103) + #include +#else + #if defined(__FreeBSD__) || defined(__IBMCPP__) || defined(_AIX) + #include + #else + #include + #endif +#endif + +#endif // spp_stdint_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_timer.h b/resources/3rdparty/sparsepp/sparsepp/spp_timer.h new file mode 100755 index 000000000..48180f4d0 --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_timer.h @@ -0,0 +1,58 @@ +/** + Copyright (c) 2016 Mariano Gonzalez + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. +*/ + +#ifndef spp_timer_h_guard +#define spp_timer_h_guard + +#include + +namespace spp +{ + template + class Timer + { + public: + Timer() { reset(); } + void reset() { _start = _snap = clock::now(); } + void snap() { _snap = clock::now(); } + + float get_total() const { return get_diff(_start, clock::now()); } + float get_delta() const { return get_diff(_snap, clock::now()); } + + private: + using clock = std::chrono::high_resolution_clock; + using point = std::chrono::time_point; + + template + static T get_diff(const point& start, const point& end) + { + using duration_t = std::chrono::duration; + + return std::chrono::duration_cast(end - start).count(); + } + + point _start; + point _snap; + }; +} + +#endif // spp_timer_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_traits.h b/resources/3rdparty/sparsepp/sparsepp/spp_traits.h new file mode 100755 index 000000000..bd105093f --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_traits.h @@ -0,0 +1,122 @@ +#if !defined(spp_traits_h_guard) +#define spp_traits_h_guard + +#include + +template class HashObject; // for Google's benchmark, not in spp namespace! + +namespace spp_ +{ + +// --------------------------------------------------------------------------- +// type_traits we need +// --------------------------------------------------------------------------- +template +struct integral_constant { static const T value = v; }; + +template const T integral_constant::value; + +typedef integral_constant true_type; +typedef integral_constant false_type; + +typedef integral_constant zero_type; +typedef integral_constant one_type; +typedef integral_constant two_type; +typedef integral_constant three_type; + +template struct is_same : public false_type { }; +template struct is_same : public true_type { }; + +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type; }; + +template struct remove_volatile { typedef T type; }; +template struct remove_volatile { typedef T type; }; + +template struct remove_cv +{ + typedef typename remove_const::type>::type type; +}; + +// ---------------- is_integral ---------------------------------------- +template struct is_integral; +template struct is_integral : false_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +template<> struct is_integral : true_type { }; +#ifdef SPP_HAS_LONG_LONG + template<> struct is_integral : true_type { }; + template<> struct is_integral : true_type { }; +#endif +template struct is_integral : is_integral { }; +template struct is_integral : is_integral { }; +template struct is_integral : is_integral { }; + +// ---------------- is_floating_point ---------------------------------------- +template struct is_floating_point; +template struct is_floating_point : false_type { }; +template<> struct is_floating_point : true_type { }; +template<> struct is_floating_point : true_type { }; +template<> struct is_floating_point : true_type { }; +template struct is_floating_point : is_floating_point { }; +template struct is_floating_point : is_floating_point { }; +template struct is_floating_point : is_floating_point { }; + +// ---------------- is_pointer ---------------------------------------- +template struct is_pointer; +template struct is_pointer : false_type { }; +template struct is_pointer : true_type { }; +template struct is_pointer : is_pointer { }; +template struct is_pointer : is_pointer { }; +template struct is_pointer : is_pointer { }; + +// ---------------- is_reference ---------------------------------------- +template struct is_reference; +template struct is_reference : false_type {}; +template struct is_reference : true_type {}; + +// ---------------- is_relocatable ---------------------------------------- +// relocatable values can be moved around in memory using memcpy and remain +// correct. Most types are relocatable, an example of a type who is not would +// be a struct which contains a pointer to a buffer inside itself - this is the +// case for std::string in gcc 5. +// ------------------------------------------------------------------------ +template struct is_relocatable; +template struct is_relocatable : + integral_constant::value || is_floating_point::value)> +{ }; + +template struct is_relocatable > : true_type { }; + +template struct is_relocatable : is_relocatable { }; +template struct is_relocatable : is_relocatable { }; +template struct is_relocatable : is_relocatable { }; +template struct is_relocatable : is_relocatable { }; +template struct is_relocatable > : + integral_constant::value && is_relocatable::value)> +{ }; + +// A template helper used to select A or B based on a condition. +// ------------------------------------------------------------ +template +struct if_ +{ + typedef A type; +}; + +template +struct if_ +{ + typedef B type; +}; + +} // spp_ namespace + +#endif // spp_traits_h_guard diff --git a/resources/3rdparty/sparsepp/sparsepp/spp_utils.h b/resources/3rdparty/sparsepp/sparsepp/spp_utils.h new file mode 100755 index 000000000..743ab7bca --- /dev/null +++ b/resources/3rdparty/sparsepp/sparsepp/spp_utils.h @@ -0,0 +1,447 @@ +// ---------------------------------------------------------------------- +// Copyright (c) 2016, Steven Gregory Popovitch - greg7mdp@gmail.com +// All rights reserved. +// +// Code derived derived from Boost libraries. +// Boost software licence reproduced below. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * The name of Steven Gregory Popovitch may not be used to +// endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// ---------------------------------------------------------------------- + +// --------------------------------------------------------------------------- +// Boost Software License - Version 1.0 - August 17th, 2003 +// +// Permission is hereby granted, free of charge, to any person or organization +// obtaining a copy of the software and accompanying documentation covered by +// this license (the "Software") to use, reproduce, display, distribute, +// execute, and transmit the Software, and to prepare derivative works of the +// Software, and to permit third-parties to whom the Software is furnished to +// do so, all subject to the following: +// +// The copyright notices in the Software and this entire statement, including +// the above license grant, this restriction and the following disclaimer, +// must be included in all copies of the Software, in whole or in part, and +// all derivative works of the Software, unless such copies or derivative +// works are solely in the form of machine-executable object code generated by +// a source language processor. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. +// --------------------------------------------------------------------------- + +// ---------------------------------------------------------------------- +// H A S H F U N C T I O N S +// ---------------------------- +// +// Implements spp::spp_hash() and spp::hash_combine() +// ---------------------------------------------------------------------- + +#if !defined(spp_utils_h_guard_) +#define spp_utils_h_guard_ + +#if defined(_MSC_VER) + #if (_MSC_VER >= 1600 ) // vs2010 (1900 is vs2015) + #include + #define SPP_HASH_CLASS std::hash + #else + #include + #define SPP_HASH_CLASS stdext::hash_compare + #endif + #if (_MSC_FULL_VER < 190021730) + #define SPP_NO_CXX11_NOEXCEPT + #endif +#elif defined __clang__ + #if __has_feature(cxx_noexcept) // what to use here? + #include + #define SPP_HASH_CLASS std::hash + #else + #include + #define SPP_HASH_CLASS std::tr1::hash + #endif + + #if !__has_feature(cxx_noexcept) + #define SPP_NO_CXX11_NOEXCEPT + #endif +#elif defined(__GNUC__) + #if defined(__GXX_EXPERIMENTAL_CXX0X__) || (__cplusplus >= 201103L) + #include + #define SPP_HASH_CLASS std::hash + + #if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) < 40600 + #define SPP_NO_CXX11_NOEXCEPT + #endif + #else + #include + #define SPP_HASH_CLASS std::tr1::hash + #define SPP_NO_CXX11_NOEXCEPT + #endif +#else + #include + #define SPP_HASH_CLASS std::hash +#endif + +#ifdef SPP_NO_CXX11_NOEXCEPT + #define SPP_NOEXCEPT +#else + #define SPP_NOEXCEPT noexcept +#endif + +#ifdef SPP_NO_CXX11_CONSTEXPR + #define SPP_CONSTEXPR +#else + #define SPP_CONSTEXPR constexpr +#endif + +#define SPP_INLINE + +#ifndef spp_ + #define spp_ spp +#endif + +namespace spp_ +{ + +template T spp_min(T a, T b) { return a < b ? a : b; } +template T spp_max(T a, T b) { return a >= b ? a : b; } + +template +struct spp_hash +{ + SPP_INLINE size_t operator()(const T &__v) const SPP_NOEXCEPT + { + SPP_HASH_CLASS hasher; + return hasher(__v); + } +}; + +template +struct spp_hash +{ + static size_t spp_log2 (size_t val) SPP_NOEXCEPT + { + size_t res = 0; + while (val > 1) + { + val >>= 1; + res++; + } + return res; + } + + SPP_INLINE size_t operator()(const T *__v) const SPP_NOEXCEPT + { + static const size_t shift = 3; // spp_log2(1 + sizeof(T)); // T might be incomplete! + const uintptr_t i = (const uintptr_t)__v; + return static_cast(i >> shift); + } +}; + +// from http://burtleburtle.net/bob/hash/integer.html +// fast and efficient for power of two table sizes where we always +// consider the last bits. +// --------------------------------------------------------------- +inline size_t spp_mix_32(uint32_t a) +{ + a = a ^ (a >> 4); + a = (a ^ 0xdeadbeef) + (a << 5); + a = a ^ (a >> 11); + return static_cast(a); +} + +// Maybe we should do a more thorough scrambling as described in +// https://gist.github.com/badboy/6267743 +// ------------------------------------------------------------- +inline size_t spp_mix_64(uint64_t a) +{ + a = a ^ (a >> 4); + a = (a ^ 0xdeadbeef) + (a << 5); + a = a ^ (a >> 11); + return (size_t)a; +} + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(bool __v) const SPP_NOEXCEPT + { return static_cast(__v); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(char __v) const SPP_NOEXCEPT + { return static_cast(__v); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(signed char __v) const SPP_NOEXCEPT + { return static_cast(__v); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(unsigned char __v) const SPP_NOEXCEPT + { return static_cast(__v); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(wchar_t __v) const SPP_NOEXCEPT + { return static_cast(__v); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(int16_t __v) const SPP_NOEXCEPT + { return spp_mix_32(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(uint16_t __v) const SPP_NOEXCEPT + { return spp_mix_32(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(int32_t __v) const SPP_NOEXCEPT + { return spp_mix_32(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(uint32_t __v) const SPP_NOEXCEPT + { return spp_mix_32(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(int64_t __v) const SPP_NOEXCEPT + { return spp_mix_64(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(uint64_t __v) const SPP_NOEXCEPT + { return spp_mix_64(static_cast(__v)); } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(float __v) const SPP_NOEXCEPT + { + // -0.0 and 0.0 should return same hash + uint32_t *as_int = reinterpret_cast(&__v); + return (__v == 0) ? static_cast(0) : spp_mix_32(*as_int); + } +}; + +template <> +struct spp_hash : public std::unary_function +{ + SPP_INLINE size_t operator()(double __v) const SPP_NOEXCEPT + { + // -0.0 and 0.0 should return same hash + uint64_t *as_int = reinterpret_cast(&__v); + return (__v == 0) ? static_cast(0) : spp_mix_64(*as_int); + } +}; + +template struct Combiner +{ + inline void operator()(T& seed, T value); +}; + +template struct Combiner +{ + inline void operator()(T& seed, T value) + { + seed ^= value + 0x9e3779b9 + (seed << 6) + (seed >> 2); + } +}; + +template struct Combiner +{ + inline void operator()(T& seed, T value) + { + seed ^= value + T(0xc6a4a7935bd1e995) + (seed << 6) + (seed >> 2); + } +}; + +template +inline void hash_combine(std::size_t& seed, T const& v) +{ + spp_::spp_hash hasher; + Combiner combiner; + + combiner(seed, hasher(v)); +} + +static inline uint32_t s_spp_popcount_default(uint32_t i) SPP_NOEXCEPT +{ + i = i - ((i >> 1) & 0x55555555); + i = (i & 0x33333333) + ((i >> 2) & 0x33333333); + return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; +} + +static inline uint32_t s_spp_popcount_default(uint64_t x) SPP_NOEXCEPT +{ + const uint64_t m1 = uint64_t(0x5555555555555555); // binary: 0101... + const uint64_t m2 = uint64_t(0x3333333333333333); // binary: 00110011.. + const uint64_t m4 = uint64_t(0x0f0f0f0f0f0f0f0f); // binary: 4 zeros, 4 ones ... + const uint64_t h01 = uint64_t(0x0101010101010101); // the sum of 256 to the power of 0,1,2,3... + + x -= (x >> 1) & m1; // put count of each 2 bits into those 2 bits + x = (x & m2) + ((x >> 2) & m2); // put count of each 4 bits into those 4 bits + x = (x + (x >> 4)) & m4; // put count of each 8 bits into those 8 bits + return (x * h01)>>56; // returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24)+... +} + +#ifdef __APPLE__ + static inline uint32_t count_trailing_zeroes(size_t v) SPP_NOEXCEPT + { + size_t x = (v & -v) - 1; + // sadly sizeof() required to build on macos + return sizeof(size_t) == 8 ? s_spp_popcount_default((uint64_t)x) : s_spp_popcount_default((uint32_t)x); + } + + static inline uint32_t s_popcount(size_t v) SPP_NOEXCEPT + { + // sadly sizeof() required to build on macos + return sizeof(size_t) == 8 ? s_spp_popcount_default((uint64_t)v) : s_spp_popcount_default((uint32_t)v); + } +#else + static inline uint32_t count_trailing_zeroes(size_t v) SPP_NOEXCEPT + { + return s_spp_popcount_default((v & -(intptr_t)v) - 1); + } + + static inline uint32_t s_popcount(size_t v) SPP_NOEXCEPT + { + return s_spp_popcount_default(v); + } +#endif + +// ----------------------------------------------------------- +// ----------------------------------------------------------- +template +class libc_allocator +{ +public: + typedef T value_type; + typedef T* pointer; + typedef ptrdiff_t difference_type; + typedef const T* const_pointer; + typedef size_t size_type; + + libc_allocator() {} + libc_allocator(const libc_allocator &) {} + libc_allocator& operator=(const libc_allocator &) { return *this; } + +#ifndef SPP_NO_CXX11_RVALUE_REFERENCES + libc_allocator(libc_allocator &&) {} + libc_allocator& operator=(libc_allocator &&) { return *this; } +#endif + + pointer allocate(size_t n, const_pointer /* unused */= 0) + { + return static_cast(malloc(n * sizeof(T))); + } + + void deallocate(pointer p, size_t /* unused */) + { + free(p); + } + + pointer reallocate(pointer p, size_t new_size) + { + return static_cast(realloc(p, new_size * sizeof(T))); + } + + // extra API to match spp_allocator interface + pointer reallocate(pointer p, size_t /* old_size */, size_t new_size) + { + return static_cast(realloc(p, new_size * sizeof(T))); + } + + size_type max_size() const + { + return static_cast(-1) / sizeof(value_type); + } + + void construct(pointer p, const value_type& val) + { + new(p) value_type(val); + } + + void destroy(pointer p) { p->~value_type(); } + + template + struct rebind + { + typedef spp_::libc_allocator other; + }; + +}; + +// forward declaration +// ------------------- +template +class spp_allocator; + +} + +template +inline bool operator==(const spp_::libc_allocator &, const spp_::libc_allocator &) +{ + return true; +} + +template +inline bool operator!=(const spp_::libc_allocator &, const spp_::libc_allocator &) +{ + return false; +} + +#endif // spp_utils_h_guard_ + diff --git a/resources/3rdparty/sparsepp/spp.natvis b/resources/3rdparty/sparsepp/spp.natvis new file mode 100755 index 000000000..1ca15df6f --- /dev/null +++ b/resources/3rdparty/sparsepp/spp.natvis @@ -0,0 +1,41 @@ + + + + + + + {{size = {rep.table._num_buckets}}} + + + + + + + + rep.table._num_buckets + + + item_ptr = grp->_group + cnt = grp->_num_buckets + + + item_ptr,na + item_ptr++ + cnt-- + + ++grp + + + + + + + end() + end() + {*col_current} + + *col_current + + + + diff --git a/resources/3rdparty/sparsepp/tests/makefile b/resources/3rdparty/sparsepp/tests/makefile new file mode 100755 index 000000000..df4eb6f6c --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/makefile @@ -0,0 +1,27 @@ +CXXFLAGS = -O2 -std=c++11 -I.. +CXXFLAGS += -Wall -pedantic -Wextra -D_XOPEN_SOURCE=700 +SPP_DEPS_1 = spp.h spp_utils.h spp_dlalloc.h spp_traits.h spp_config.h +SPP_DEPS = $(addprefix ../sparsepp/,$(SPP_DEPS_1)) +TARGETS = spp_test spp_alloc_test spp_bitset_test perftest1 bench + + +ifeq ($(OS),Windows_NT) + LDFLAGS = -lpsapi +endif + +def: spp_test + +all: $(TARGETS) + +clean: + rm -rf $(TARGETS) vsprojects/x64/* vsprojects/x86/* + +test: + ./spp_test + +spp_test: spp_test.cc $(SPP_DEPS) makefile + $(CXX) $(CXXFLAGS) -D_CRT_SECURE_NO_WARNINGS spp_test.cc -o spp_test + +%: %.cc $(SPP_DEPS) makefile + $(CXX) $(CXXFLAGS) -DNDEBUG $< -o $@ $(LDFLAGS) + diff --git a/resources/3rdparty/sparsepp/tests/perftest1.cc b/resources/3rdparty/sparsepp/tests/perftest1.cc new file mode 100755 index 000000000..ae8609e79 --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/perftest1.cc @@ -0,0 +1,162 @@ +// compile on linux with: g++ -std=c++11 -O2 perftest1.cc -o perftest1 +// ----------------------------------------------------------------------- +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define SPP 1 +#define DENSE 0 +#define SPARSE 0 +#define STD 0 + +#if SPP + #include +#elif DENSE + #include +#elif SPARSE + #include +#elif STD + #include +#endif + +using std::make_pair; + +template +void test(T &s, int count) +{ + spp::Timer timer; + + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.insert(make_pair(rand(), i)); + + printf("%d random inserts in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.find(rand()); + + printf("%d random finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(1); + for (int i = 0; i < count; ++i) + s.find(rand()); + printf("%d random not-finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + s.clear(); + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.insert(make_pair(i, i)); + printf("%d sequential inserts in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.find(i); + + printf("%d sequential finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(1); + for (int i = 0; i < count; ++i) + { + int x = rand(); + s.find(x); + } + printf("%d random not-finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + s.clear(); + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.insert(make_pair(-i, -i)); + + printf("%d neg sequential inserts in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(0); + for (int i = 0; i < count; ++i) + s.find(-i); + + printf("%d neg sequential finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + timer.snap(); + srand(1); + for (int i = 0; i < count; ++i) + s.find(rand()); + printf("%d random not-finds in %5.2f seconds\n", count, timer.get_delta() / 1000); + + s.clear(); +} + + +struct Hasher64 { + size_t operator()(uint64_t k) const { return (k ^ 14695981039346656037ULL) * 1099511628211ULL; } +}; + +struct Hasher32 { + size_t operator()(uint32_t k) const { return (k ^ 2166136261U) * 16777619UL; } +}; + +struct Hasheri32 { + size_t operator()(int k) const + { + return (k ^ 2166136261U) * 16777619UL; + } +}; + +struct Hasher_32 { + size_t operator()(int k) const + { + uint32_t a = (uint32_t)k; +#if 0 + a = (a ^ 61) ^ (a >> 16); + a = a + (a << 3); + a = a ^ (a >> 4); + a = a * 0x27d4eb2d; + a = a ^ (a >> 15); + return a; +#else + a = a ^ (a >> 4); + a = (a ^ 0xdeadbeef) + (a << 5); + a = a ^ (a >> 11); + return a; +#endif + } +}; + +int main() +{ +#if SPP + spp::sparse_hash_map s; + printf ("Testing spp::sparse_hash_map\n"); +#elif DENSE + google::dense_hash_map s; + s.set_empty_key(-INT_MAX); + s.set_deleted_key(-(INT_MAX - 1)); + printf ("Testing google::dense_hash_map\n"); +#elif SPARSE + google::sparse_hash_map s; + s.set_deleted_key(-INT_MAX); + printf ("Testing google::sparse_hash_map\n"); +#elif STD + std::unordered_map s; + printf ("Testing std::unordered_map\n"); +#endif + printf ("------------------------------\n"); + test(s, 50000000); + + + return 0; +} diff --git a/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc b/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc new file mode 100755 index 000000000..06b23ac80 --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/spp_alloc_test.cc @@ -0,0 +1,189 @@ +#include +#include +#include +#include +#include +#include + +// enable debugging code in spp_bitset.h +#define SPP_TEST 1 + +#include +#include +#include + +using namespace std; + +static float _to_mb(uint64_t m) { return (float)((double)m / (1024 * 1024)); } + +// ----------------------------------------------------------- +// ----------------------------------------------------------- +template +class TestAlloc +{ +public: + TestAlloc(size_t num_alloc = 8000000) : + _num_alloc(num_alloc) + { + _allocated.resize(_num_alloc, nullptr); + _sizes.resize(_num_alloc, 0); + _start_mem_usage = spp::GetProcessMemoryUsed(); + } + + void run() + { + srand(43); // always same sequence of random numbers + + for (size_t i=0; i<_num_alloc; ++i) + _sizes[i] = std::max(2, (rand() % 5) * 2); + + spp::Timer timer; + + // allocate small buffers + // ---------------------- + for (size_t i=0; i<_num_alloc; ++i) + { + _allocated[i] = _allocator.allocate(_sizes[i]); + _set_buf(_allocated[i], _sizes[i]); + } + +#if 1 + // and grow the buffers to a max size of 24 each + // --------------------------------------------- + for (uint32_t j=4; j<26; j += 2) + { + for (size_t i=0; i<_num_alloc; ++i) + { + // if ( _sizes[i] < j) // windows allocator friendly! + if ((rand() % 4) != 3 && _sizes[i] < j) // really messes up windows allocator + { + _allocated[i] = _allocator.reallocate(_allocated[i], j); + _check_buf(_allocated[i], _sizes[i]); + _set_buf(_allocated[i], j); + _sizes[i] = j; + } + } + } +#endif + +#if 0 + // test erase (shrinking the buffers) + // --------------------------------------------- + for (uint32_t j=28; j>4; j -= 2) + { + for (size_t i=0; i<_num_alloc; ++i) + { + // if ( _sizes[i] < j) // windows allocator friendly! + if ((rand() % 4) != 3 && _sizes[i] > j) // really messes up windows allocator + { + _allocated[i] = _allocator.reallocate(_allocated[i], j); + _check_buf1(_allocated[i], _sizes[i]); + _set_buf(_allocated[i], j); + _sizes[i] = j; + } + } + } +#endif + +#if 0 + // and grow the buffers back to a max size of 24 each + // -------------------------------------------------- + for (uint32_t j=4; j<26; j += 2) + { + for (size_t i=0; i<_num_alloc; ++i) + { + // if ( _sizes[i] < j) // windows allocator friendly! + if ((rand() % 4) != 3 && _sizes[i] < j) // really messes up windows allocator + { + _allocated[i] = _allocator.reallocate(_allocated[i], j); + _check_buf(_allocated[i], _sizes[i]); + _set_buf(_allocated[i], j); + _sizes[i] = j; + } + } + } +#endif + + size_t total_units = 0; + for (size_t i=0; i<_num_alloc; ++i) + total_units += _sizes[i]; + + uint64_t mem_usage = spp::GetProcessMemoryUsed(); + uint64_t alloc_mem_usage = mem_usage - _start_mem_usage; + uint64_t expected_mem_usage = total_units * sizeof(T); + + // finally free the memory + // ----------------------- + for (size_t i=0; i<_num_alloc; ++i) + { + _check_buf(_allocated[i], _sizes[i]); + _allocator.deallocate(_allocated[i], _sizes[i]); + } + + uint64_t mem_usage_end = spp::GetProcessMemoryUsed(); + + printf("allocated %zd entities of size %zd\n", total_units, sizeof(T)); + printf("done in %3.2f seconds, mem_usage %4.1f/%4.1f/%4.1f MB\n", + timer.get_total() / 1000, _to_mb(_start_mem_usage), _to_mb(mem_usage), _to_mb(mem_usage_end)); + printf("expected mem usage: %4.1f\n", _to_mb(expected_mem_usage)); + if (expected_mem_usage <= alloc_mem_usage) + printf("overhead: %4.1f%%\n", + (float)((double)(alloc_mem_usage - expected_mem_usage) / expected_mem_usage) * 100); + else + printf("bug: alloc_mem_usage <= expected_mem_usage\n"); + + std::vector().swap(_allocated); + std::vector().swap(_sizes); + + printf("\nmem usage after freeing vectors: %4.1f\n", _to_mb(spp::GetProcessMemoryUsed())); + } + +private: + + void _set_buf(T *buff, uint32_t sz) { *buff = (T)sz; buff[sz - 1] = (T)sz; } + void _check_buf1(T *buff, uint32_t sz) + { + assert(*buff == (T)sz); + (void)(buff + sz); // silence warning + } + void _check_buf(T *buff, uint32_t sz) + { + assert(*buff == (T)sz && buff[sz - 1] == (T)sz); + (void)(buff + sz); // silence warning + } + + size_t _num_alloc; + uint64_t _start_mem_usage; + std::vector _allocated; + std::vector _sizes; + A _allocator; +}; + +// ----------------------------------------------------------- +// ----------------------------------------------------------- +template +void run_test(const char *alloc_name) +{ + printf("\n---------------- testing %s\n\n", alloc_name); + + printf("\nmem usage before the alloc test: %4.1f\n", + _to_mb(spp::GetProcessMemoryUsed())); + { + TestAlloc< X, A > test_alloc; + test_alloc.run(); + } + printf("mem usage after the alloc test: %4.1f\n", + _to_mb(spp::GetProcessMemoryUsed())); + + printf("\n\n"); +} + +// ----------------------------------------------------------- +// ----------------------------------------------------------- +int main() +{ + typedef uint64_t X; + + run_test>("libc_allocator"); + run_test>("spp_allocator"); +} diff --git a/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc b/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc new file mode 100755 index 000000000..3c775f3f3 --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/spp_bitset_test.cc @@ -0,0 +1,284 @@ +#include +#include +#include +#include +#include +#include + +// enable debugging code in spp_bitset.h +#define SPP_TEST 1 + +#include +#include +#include + +using namespace std; + +// ----------------------------------------------------------- +// ----------------------------------------------------------- +template +class TestBitset +{ +public: + typedef spp::spp_bitset BS; + + TestBitset() + {} + + void test_set(size_t num_iter) + { + size_t num_errors = 0; + BS bs, bs2; + + printf("testing set on spp_bitset<%zu> , num_iter=%6zu -> ", N, num_iter); + + for (size_t i=0; i, num_iter=%6zu -> ", N, num_iter); + + for (size_t i=0; i, num_iter=%6zu -> ", N, num_iter); + + for (size_t i=0; i<4 * N; ++i) + { + bs.set(rand() % N); + if (i > 2 * N) + { + for (size_t j=0; j, num_iter=%6zu -> ", N, num_iter); + + for (size_t i=0; i 1000) + { + bs.set(1000); + size_t longest = bs.longest_zero_sequence(); + assert(longest == 1000-11 || longest == N-1001); + if (!(longest == 1000-11 || longest == N-1001)) + ++num_errors; + } + + spp::Timer timer_lz; + spp::Timer timer_lz_slow; + float lz_time(0), lz_time_slow(0); + + printf("testing longest_zero_sequence() , num_iter=%6zu -> ", num_iter); + srand(1); + for (size_t i=0; i 1000) + { + bs.set(1000); + size_t longest = bs.longest_zero_sequence(); + assert(longest == 1000-11 || longest == N-1001); + if (!(longest == 1000-11 || longest == N-1001)) + ++num_errors; + } + + spp::Timer timer_lz; + spp::Timer timer_lz_slow; + float lz_time(0), lz_time_slow(0); + + printf("testing longest_zero_sequence2() , num_iter=%6zu -> ", num_iter); + srand(1); + for (size_t i=0; i timer_ctz; + spp::Timer timer_ctz_slow; + float ctz_time(0), ctz_time_slow(0); + + printf("testing count_trailing_zeroes() , num_iter=%6zu -> ", num_iter); + for (size_t i=0; i test_bitset_1024; + test_bitset_1024.run(); + + TestBitset<4096> test_bitset_4096; + test_bitset_4096.run(); + + //TestBitset<8192> test_bitset_8192; + //test_bitset_8192.run(); +} diff --git a/resources/3rdparty/sparsepp/tests/spp_test.cc b/resources/3rdparty/sparsepp/tests/spp_test.cc new file mode 100755 index 000000000..279dd0163 --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/spp_test.cc @@ -0,0 +1,2988 @@ +// ---------------------------------------------------------------------- +// Copyright (c) 2016, Gregory Popovitch - greg7mdp@gmail.com +// All rights reserved. +// +// This work is derived from Google's sparsehash library +// +// Copyright (c) 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// ---------------------------------------------------------------------- + +#ifdef _MSC_VER + #pragma warning( disable : 4820 ) // '6' bytes padding added after data member... + #pragma warning( disable : 4710 ) // function not inlined + #pragma warning( disable : 4514 ) // unreferenced inline function has been removed + #pragma warning( disable : 4996 ) // 'fopen': This function or variable may be unsafe +#endif + +#include + +#ifdef _MSC_VER + #pragma warning( disable : 4127 ) // conditional expression is constant + #pragma warning(push, 0) +#endif + + +#include +#include // for size_t +#include +#include +#include +#include +#include +#include +#include // for class typeinfo (returned by typeid) +#include +#include // for length_error + +namespace sparsehash_internal = SPP_NAMESPACE::sparsehash_internal; +using SPP_NAMESPACE::sparsetable; +using SPP_NAMESPACE::sparse_hashtable; +using SPP_NAMESPACE::sparse_hash_map; +using SPP_NAMESPACE::sparse_hash_set; + + + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +#ifndef _MSC_VER // windows defines its own version + #define _strdup strdup + #ifdef __MINGW32__ // mingw has trouble writing to /tmp + static std::string TmpFile(const char* basename) + { + return std::string("./#") + basename; + } + #endif +#else + #pragma warning(disable : 4996) + #define snprintf sprintf_s + #define WIN32_LEAN_AND_MEAN /* We always want minimal includes */ + #include + std::string TmpFile(const char* basename) + { + char tmppath_buffer[1024]; + int tmppath_len = GetTempPathA(sizeof(tmppath_buffer), tmppath_buffer); + if (tmppath_len <= 0 || tmppath_len >= sizeof(tmppath_buffer)) + return basename; // an error, so just bail on tmppath + + sprintf_s(tmppath_buffer + tmppath_len, 1024 - tmppath_len, "\\%s", basename); + return tmppath_buffer; + } +#endif + +#ifdef _MSC_VER + #pragma warning(pop) +#endif + + +// --------------------------------------------------------------------- +// This is the "default" interface, which just passes everything +// through to the underlying hashtable. You'll need to subclass it to +// specialize behavior for an individual hashtable. +// --------------------------------------------------------------------- +template +class BaseHashtableInterface +{ +public: + virtual ~BaseHashtableInterface() {} + + typedef typename HT::key_type key_type; + typedef typename HT::value_type value_type; + typedef typename HT::hasher hasher; + typedef typename HT::key_equal key_equal; + typedef typename HT::allocator_type allocator_type; + + typedef typename HT::size_type size_type; + typedef typename HT::difference_type difference_type; + typedef typename HT::pointer pointer; + typedef typename HT::const_pointer const_pointer; + typedef typename HT::reference reference; + typedef typename HT::const_reference const_reference; + + class const_iterator; + + class iterator : public HT::iterator + { + public: + iterator() : parent_(NULL) { } // this allows code like "iterator it;" + iterator(typename HT::iterator it, const BaseHashtableInterface* parent) + : HT::iterator(it), parent_(parent) { } + key_type key() { return parent_->it_to_key(*this); } + + private: + friend class BaseHashtableInterface::const_iterator; // for its ctor + const BaseHashtableInterface* parent_; + }; + + class const_iterator : public HT::const_iterator + { + public: + const_iterator() : parent_(NULL) { } + const_iterator(typename HT::const_iterator it, + const BaseHashtableInterface* parent) + : HT::const_iterator(it), parent_(parent) { } + + const_iterator(typename HT::iterator it, + BaseHashtableInterface* parent) + : HT::const_iterator(it), parent_(parent) { } + + // The parameter type here *should* just be "iterator", but MSVC + // gets confused by that, so I'm overly specific. + const_iterator(typename BaseHashtableInterface::iterator it) + : HT::const_iterator(it), parent_(it.parent_) { } + + key_type key() { return parent_->it_to_key(*this); } + + private: + const BaseHashtableInterface* parent_; + }; + + class const_local_iterator; + + class local_iterator : public HT::local_iterator + { + public: + local_iterator() : parent_(NULL) { } + local_iterator(typename HT::local_iterator it, + const BaseHashtableInterface* parent) + : HT::local_iterator(it), parent_(parent) { } + key_type key() { return parent_->it_to_key(*this); } + + private: + friend class BaseHashtableInterface::const_local_iterator; // for its ctor + const BaseHashtableInterface* parent_; + }; + + class const_local_iterator : public HT::const_local_iterator + { + public: + const_local_iterator() : parent_(NULL) { } + const_local_iterator(typename HT::const_local_iterator it, + const BaseHashtableInterface* parent) + : HT::const_local_iterator(it), parent_(parent) { } + const_local_iterator(typename HT::local_iterator it, + BaseHashtableInterface* parent) + : HT::const_local_iterator(it), parent_(parent) { } + const_local_iterator(local_iterator it) + : HT::const_local_iterator(it), parent_(it.parent_) { } + key_type key() { return parent_->it_to_key(*this); } + + private: + const BaseHashtableInterface* parent_; + }; + + iterator begin() { return iterator(ht_.begin(), this); } + iterator end() { return iterator(ht_.end(), this); } + const_iterator begin() const { return const_iterator(ht_.begin(), this); } + const_iterator end() const { return const_iterator(ht_.end(), this); } + local_iterator begin(size_type i) { return local_iterator(ht_.begin(i), this); } + local_iterator end(size_type i) { return local_iterator(ht_.end(i), this); } + const_local_iterator begin(size_type i) const { return const_local_iterator(ht_.begin(i), this); } + const_local_iterator end(size_type i) const { return const_local_iterator(ht_.end(i), this); } + + hasher hash_funct() const { return ht_.hash_funct(); } + hasher hash_function() const { return ht_.hash_function(); } + key_equal key_eq() const { return ht_.key_eq(); } + allocator_type get_allocator() const { return ht_.get_allocator(); } + + BaseHashtableInterface(size_type expected_max_items_in_table, + const hasher& hf, + const key_equal& eql, + const allocator_type& alloc) + : ht_(expected_max_items_in_table, hf, eql, alloc) { } + + // Not all ht_'s support this constructor: you should only call it + // from a subclass if you know your ht supports it. Otherwise call + // the previous constructor, followed by 'insert(f, l);'. + template + BaseHashtableInterface(InputIterator f, InputIterator l, + size_type expected_max_items_in_table, + const hasher& hf, + const key_equal& eql, + const allocator_type& alloc) + : ht_(f, l, expected_max_items_in_table, hf, eql, alloc) { + } + + // This is the version of the constructor used by dense_*, which + // requires an empty key in the constructor. + template + BaseHashtableInterface(InputIterator f, InputIterator l, key_type empty_k, + size_type expected_max_items_in_table, + const hasher& hf, + const key_equal& eql, + const allocator_type& alloc) + : ht_(f, l, empty_k, expected_max_items_in_table, hf, eql, alloc) { + } + + // This is the constructor appropriate for {dense,sparse}hashtable. + template + BaseHashtableInterface(size_type expected_max_items_in_table, + const hasher& hf, + const key_equal& eql, + const ExtractKey& ek, + const SetKey& sk, + const allocator_type& alloc) + : ht_(expected_max_items_in_table, hf, eql, ek, sk, alloc) { } + + + void clear() { ht_.clear(); } + void swap(BaseHashtableInterface& other) { ht_.swap(other.ht_); } + + // Only part of the API for some hashtable implementations. + void clear_no_resize() { clear(); } + + size_type size() const { return ht_.size(); } + size_type max_size() const { return ht_.max_size(); } + bool empty() const { return ht_.empty(); } + size_type bucket_count() const { return ht_.bucket_count(); } + size_type max_bucket_count() const { return ht_.max_bucket_count(); } + + size_type bucket_size(size_type i) const { + return ht_.bucket_size(i); + } + size_type bucket(const key_type& key) const { + return ht_.bucket(key); + } + + float load_factor() const { return ht_.load_factor(); } + float max_load_factor() const { return ht_.max_load_factor(); } + void max_load_factor(float grow) { ht_.max_load_factor(grow); } + float min_load_factor() const { return ht_.min_load_factor(); } + void min_load_factor(float shrink) { ht_.min_load_factor(shrink); } + void set_resizing_parameters(float shrink, float grow) { + ht_.set_resizing_parameters(shrink, grow); + } + + void resize(size_type hint) { ht_.resize(hint); } + void rehash(size_type hint) { ht_.rehash(hint); } + + iterator find(const key_type& key) { + return iterator(ht_.find(key), this); + } + + const_iterator find(const key_type& key) const { + return const_iterator(ht_.find(key), this); + } + + // Rather than try to implement operator[], which doesn't make much + // sense for set types, we implement two methods: bracket_equal and + // bracket_assign. By default, bracket_equal(a, b) returns true if + // ht[a] == b, and false otherwise. (Note that this follows + // operator[] semantics exactly, including inserting a if it's not + // already in the hashtable, before doing the equality test.) For + // sets, which have no operator[], b is ignored, and bracket_equal + // returns true if key is in the set and false otherwise. + // bracket_assign(a, b) is equivalent to ht[a] = b. For sets, b is + // ignored, and bracket_assign is equivalent to ht.insert(a). + template + bool bracket_equal(const key_type& key, const AssignValue& expected) { + return ht_[key] == expected; + } + + template + void bracket_assign(const key_type& key, const AssignValue& value) { + ht_[key] = value; + } + + size_type count(const key_type& key) const { return ht_.count(key); } + + std::pair equal_range(const key_type& key) + { + std::pair r + = ht_.equal_range(key); + return std::pair(iterator(r.first, this), + iterator(r.second, this)); + } + std::pair equal_range(const key_type& key) const + { + std::pair r + = ht_.equal_range(key); + return std::pair( + const_iterator(r.first, this), const_iterator(r.second, this)); + } + + const_iterator random_element(class ACMRandom* r) const { + return const_iterator(ht_.random_element(r), this); + } + + iterator random_element(class ACMRandom* r) { + return iterator(ht_.random_element(r), this); + } + + std::pair insert(const value_type& obj) { + std::pair r = ht_.insert(obj); + return std::pair(iterator(r.first, this), r.second); + } + template + void insert(InputIterator f, InputIterator l) { + ht_.insert(f, l); + } + void insert(typename HT::const_iterator f, typename HT::const_iterator l) { + ht_.insert(f, l); + } + iterator insert(typename HT::iterator, const value_type& obj) { + return iterator(insert(obj).first, this); + } + + // These will commonly need to be overridden by the child. + void set_empty_key(const key_type& k) { ht_.set_empty_key(k); } + void clear_empty_key() { ht_.clear_empty_key(); } + key_type empty_key() const { return ht_.empty_key(); } + + void set_deleted_key(const key_type& k) { ht_.set_deleted_key(k); } + void clear_deleted_key() { ht_.clear_deleted_key(); } + + size_type erase(const key_type& key) { return ht_.erase(key); } + void erase(typename HT::iterator it) { ht_.erase(it); } + void erase(typename HT::iterator f, typename HT::iterator l) { + ht_.erase(f, l); + } + + bool operator==(const BaseHashtableInterface& other) const { + return ht_ == other.ht_; + } + bool operator!=(const BaseHashtableInterface& other) const { + return ht_ != other.ht_; + } + + template + bool serialize(ValueSerializer serializer, OUTPUT *fp) { + return ht_.serialize(serializer, fp); + } + template + bool unserialize(ValueSerializer serializer, INPUT *fp) { + return ht_.unserialize(serializer, fp); + } + + template + bool write_metadata(OUTPUT *fp) { + return ht_.write_metadata(fp); + } + template + bool read_metadata(INPUT *fp) { + return ht_.read_metadata(fp); + } + template + bool write_nopointer_data(OUTPUT *fp) { + return ht_.write_nopointer_data(fp); + } + template + bool read_nopointer_data(INPUT *fp) { + return ht_.read_nopointer_data(fp); + } + + // low-level stats + int num_table_copies() const { return (int)ht_.num_table_copies(); } + + // Not part of the hashtable API, but is provided to make testing easier. + virtual key_type get_key(const value_type& value) const = 0; + // All subclasses should define get_data(value_type) as well. I don't + // provide an abstract-virtual definition here, because the return type + // differs between subclasses (not all subclasses define data_type). + //virtual data_type get_data(const value_type& value) const = 0; + //virtual data_type default_data() const = 0; + + // These allow introspection into the interface. "Supports" means + // that the implementation of this functionality isn't a noop. + virtual bool supports_clear_no_resize() const = 0; + virtual bool supports_empty_key() const = 0; + virtual bool supports_deleted_key() const = 0; + virtual bool supports_brackets() const = 0; // has a 'real' operator[] + virtual bool supports_readwrite() const = 0; + virtual bool supports_num_table_copies() const = 0; + virtual bool supports_serialization() const = 0; + +protected: + HT ht_; + + // These are what subclasses have to define to get class-specific behavior + virtual key_type it_to_key(const iterator& it) const = 0; + virtual key_type it_to_key(const const_iterator& it) const = 0; + virtual key_type it_to_key(const local_iterator& it) const = 0; + virtual key_type it_to_key(const const_local_iterator& it) const = 0; +}; + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template , + class EqualKey = std::equal_to, + class Alloc = SPP_DEFAULT_ALLOCATOR > > +class HashtableInterface_SparseHashMap + : public BaseHashtableInterface< sparse_hash_map > +{ +private: + typedef sparse_hash_map ht; + typedef BaseHashtableInterface p; // parent + +public: + explicit HashtableInterface_SparseHashMap( + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(expected_max_items, hf, eql, alloc) { } + + template + HashtableInterface_SparseHashMap( + InputIterator f, InputIterator l, + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(f, l, expected_max_items, hf, eql, alloc) { } + + typename p::key_type get_key(const typename p::value_type& value) const { + return value.first; + } + typename ht::data_type get_data(const typename p::value_type& value) const { + return value.second; + } + typename ht::data_type default_data() const { + return typename ht::data_type(); + } + + bool supports_clear_no_resize() const { return false; } + bool supports_empty_key() const { return false; } + bool supports_deleted_key() const { return false; } + bool supports_brackets() const { return true; } + bool supports_readwrite() const { return true; } + bool supports_num_table_copies() const { return false; } + bool supports_serialization() const { return true; } + + void set_empty_key(const typename p::key_type&) { } + void clear_empty_key() { } + typename p::key_type empty_key() const { return typename p::key_type(); } + + int num_table_copies() const { return 0; } + + typedef typename ht::NopointerSerializer NopointerSerializer; + +protected: + template + friend void swap(HashtableInterface_SparseHashMap& a, + HashtableInterface_SparseHashMap& b); + + typename p::key_type it_to_key(const typename p::iterator& it) const { + return it->first; + } + typename p::key_type it_to_key(const typename p::const_iterator& it) const { + return it->first; + } + typename p::key_type it_to_key(const typename p::local_iterator& it) const { + return it->first; + } + typename p::key_type it_to_key(const typename p::const_local_iterator& it) const { + return it->first; + } +}; + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template +void swap(HashtableInterface_SparseHashMap& a, + HashtableInterface_SparseHashMap& b) +{ + swap(a.ht_, b.ht_); +} + + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template , + class EqualKey = std::equal_to, + class Alloc = SPP_DEFAULT_ALLOCATOR > +class HashtableInterface_SparseHashSet + : public BaseHashtableInterface< sparse_hash_set > +{ +private: + typedef sparse_hash_set ht; + typedef BaseHashtableInterface p; // parent + +public: + explicit HashtableInterface_SparseHashSet( + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(expected_max_items, hf, eql, alloc) { } + + template + HashtableInterface_SparseHashSet( + InputIterator f, InputIterator l, + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(f, l, expected_max_items, hf, eql, alloc) { } + + template + bool bracket_equal(const typename p::key_type& key, const AssignValue&) { + return this->ht_.find(key) != this->ht_.end(); + } + + template + void bracket_assign(const typename p::key_type& key, const AssignValue&) { + this->ht_.insert(key); + } + + typename p::key_type get_key(const typename p::value_type& value) const { + return value; + } + // For sets, the only 'data' is that an item is actually inserted. + bool get_data(const typename p::value_type&) const { + return true; + } + bool default_data() const { + return true; + } + + bool supports_clear_no_resize() const { return false; } + bool supports_empty_key() const { return false; } + bool supports_deleted_key() const { return false; } + bool supports_brackets() const { return false; } + bool supports_readwrite() const { return true; } + bool supports_num_table_copies() const { return false; } + bool supports_serialization() const { return true; } + + void set_empty_key(const typename p::key_type&) { } + void clear_empty_key() { } + typename p::key_type empty_key() const { return typename p::key_type(); } + + int num_table_copies() const { return 0; } + + typedef typename ht::NopointerSerializer NopointerSerializer; + +protected: + template + friend void swap(HashtableInterface_SparseHashSet& a, + HashtableInterface_SparseHashSet& b); + + typename p::key_type it_to_key(const typename p::iterator& it) const { + return *it; + } + typename p::key_type it_to_key(const typename p::const_iterator& it) const { + return *it; + } + typename p::key_type it_to_key(const typename p::local_iterator& it) const { + return *it; + } + typename p::key_type it_to_key(const typename p::const_local_iterator& it) + const { + return *it; + } +}; + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template +void swap(HashtableInterface_SparseHashSet& a, + HashtableInterface_SparseHashSet& b) +{ + swap(a.ht_, b.ht_); +} + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template +class HashtableInterface_SparseHashtable + : public BaseHashtableInterface< sparse_hashtable > +{ +private: + typedef sparse_hashtable ht; + typedef BaseHashtableInterface p; // parent + +public: + explicit HashtableInterface_SparseHashtable( + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(expected_max_items, hf, eql, + ExtractKey(), SetKey(), alloc) { } + + template + HashtableInterface_SparseHashtable( + InputIterator f, InputIterator l, + typename p::size_type expected_max_items = 0, + const typename p::hasher& hf = typename p::hasher(), + const typename p::key_equal& eql = typename p::key_equal(), + const typename p::allocator_type& alloc = typename p::allocator_type()) + : BaseHashtableInterface(expected_max_items, hf, eql, + ExtractKey(), SetKey(), alloc) { + this->insert(f, l); + } + + float max_load_factor() const { + float shrink, grow; + this->ht_.get_resizing_parameters(&shrink, &grow); + return grow; + } + void max_load_factor(float new_grow) { + float shrink, grow; + this->ht_.get_resizing_parameters(&shrink, &grow); + this->ht_.set_resizing_parameters(shrink, new_grow); + } + float min_load_factor() const { + float shrink, grow; + this->ht_.get_resizing_parameters(&shrink, &grow); + return shrink; + } + void min_load_factor(float new_shrink) { + float shrink, grow; + this->ht_.get_resizing_parameters(&shrink, &grow); + this->ht_.set_resizing_parameters(new_shrink, grow); + } + + template + bool bracket_equal(const typename p::key_type&, const AssignValue&) { + return false; + } + + template + void bracket_assign(const typename p::key_type&, const AssignValue&) { + } + + typename p::key_type get_key(const typename p::value_type& value) const { + return extract_key(value); + } + typename p::value_type get_data(const typename p::value_type& value) const { + return value; + } + typename p::value_type default_data() const { + return typename p::value_type(); + } + + bool supports_clear_no_resize() const { return false; } + bool supports_empty_key() const { return false; } + bool supports_deleted_key() const { return false; } + bool supports_brackets() const { return false; } + bool supports_readwrite() const { return true; } + bool supports_num_table_copies() const { return true; } + bool supports_serialization() const { return true; } + + void set_empty_key(const typename p::key_type&) { } + void clear_empty_key() { } + typename p::key_type empty_key() const { return typename p::key_type(); } + + // These tr1 names aren't defined for sparse_hashtable. + typename p::hasher hash_function() { return this->hash_funct(); } + void rehash(typename p::size_type hint) { this->resize(hint); } + + // TODO(csilvers): also support/test destructive_begin()/destructive_end()? + + typedef typename ht::NopointerSerializer NopointerSerializer; + +protected: + template + friend void swap( + HashtableInterface_SparseHashtable& a, + HashtableInterface_SparseHashtable& b); + + typename p::key_type it_to_key(const typename p::iterator& it) const { + return extract_key(*it); + } + typename p::key_type it_to_key(const typename p::const_iterator& it) const { + return extract_key(*it); + } + typename p::key_type it_to_key(const typename p::local_iterator& it) const { + return extract_key(*it); + } + typename p::key_type it_to_key(const typename p::const_local_iterator& it) + const { + return extract_key(*it); + } + +private: + ExtractKey extract_key; +}; + +// --------------------------------------------------------------------- +// --------------------------------------------------------------------- +template +void swap(HashtableInterface_SparseHashtable& a, + HashtableInterface_SparseHashtable& b) { + swap(a.ht_, b.ht_); +} + +void EXPECT_TRUE(bool cond) +{ + if (!cond) + { + ::fputs("Test failed:\n", stderr); + ::exit(1); + } +} + +namespace spp_ +{ + +namespace testing +{ + +#define EXPECT_FALSE(a) EXPECT_TRUE(!(a)) +#define EXPECT_EQ(a, b) EXPECT_TRUE((a) == (b)) +#define EXPECT_NE(a, b) EXPECT_TRUE((a) != (b)) +#define EXPECT_LT(a, b) EXPECT_TRUE((a) < (b)) +#define EXPECT_GT(a, b) EXPECT_TRUE((a) > (b)) +#define EXPECT_LE(a, b) EXPECT_TRUE((a) <= (b)) +#define EXPECT_GE(a, b) EXPECT_TRUE((a) >= (b)) + +#define EXPECT_DEATH(cmd, expected_error_string) \ + try { \ + cmd; \ + EXPECT_FALSE("did not see expected error: " #expected_error_string); \ + } catch (const std::length_error&) { \ + /* Good, the cmd failed. */ \ + } + +#define TEST(suitename, testname) \ + class TEST_##suitename##_##testname { \ + public: \ + TEST_##suitename##_##testname() { \ + ::fputs("Running " #suitename "." #testname "\n", stderr); \ + Run(); \ + } \ + void Run(); \ + }; \ + static TEST_##suitename##_##testname \ + test_instance_##suitename##_##testname; \ + void TEST_##suitename##_##testname::Run() + + +template +struct TypeList3 +{ + typedef C1 type1; + typedef C2 type2; + typedef C3 type3; +}; + +// I need to list 9 types here, for code below to compile, though +// only the first 3 are ever used. +#define TYPED_TEST_CASE_3(classname, typelist) \ + typedef typelist::type1 classname##_type1; \ + typedef typelist::type2 classname##_type2; \ + typedef typelist::type3 classname##_type3; \ + SPP_ATTRIBUTE_UNUSED static const int classname##_numtypes = 3; \ + typedef typelist::type1 classname##_type4; \ + typedef typelist::type1 classname##_type5; \ + typedef typelist::type1 classname##_type6; \ + typedef typelist::type1 classname##_type7; \ + typedef typelist::type1 classname##_type8; \ + typedef typelist::type1 classname##_type9 + +template +struct TypeList9 +{ + typedef C1 type1; + typedef C2 type2; + typedef C3 type3; + typedef C4 type4; + typedef C5 type5; + typedef C6 type6; + typedef C7 type7; + typedef C8 type8; + typedef C9 type9; +}; + +#define TYPED_TEST_CASE_9(classname, typelist) \ + typedef typelist::type1 classname##_type1; \ + typedef typelist::type2 classname##_type2; \ + typedef typelist::type3 classname##_type3; \ + typedef typelist::type4 classname##_type4; \ + typedef typelist::type5 classname##_type5; \ + typedef typelist::type6 classname##_type6; \ + typedef typelist::type7 classname##_type7; \ + typedef typelist::type8 classname##_type8; \ + typedef typelist::type9 classname##_type9; \ + static const int classname##_numtypes = 9 + +#define TYPED_TEST(superclass, testname) \ + template \ + class TEST_onetype_##superclass##_##testname : \ + public superclass { \ + public: \ + TEST_onetype_##superclass##_##testname() { \ + Run(); \ + } \ + private: \ + void Run(); \ + }; \ + class TEST_typed_##superclass##_##testname { \ + public: \ + explicit TEST_typed_##superclass##_##testname() { \ + if (superclass##_numtypes >= 1) { \ + ::fputs("Running " #superclass "." #testname ".1\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 2) { \ + ::fputs("Running " #superclass "." #testname ".2\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 3) { \ + ::fputs("Running " #superclass "." #testname ".3\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 4) { \ + ::fputs("Running " #superclass "." #testname ".4\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 5) { \ + ::fputs("Running " #superclass "." #testname ".5\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 6) { \ + ::fputs("Running " #superclass "." #testname ".6\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 7) { \ + ::fputs("Running " #superclass "." #testname ".7\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 8) { \ + ::fputs("Running " #superclass "." #testname ".8\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + if (superclass##_numtypes >= 9) { \ + ::fputs("Running " #superclass "." #testname ".9\n", stderr); \ + TEST_onetype_##superclass##_##testname t; \ + } \ + } \ + }; \ + static TEST_typed_##superclass##_##testname \ + test_instance_typed_##superclass##_##testname; \ + template \ + void TEST_onetype_##superclass##_##testname::Run() + +// This is a dummy class just to make converting from internal-google +// to opensourcing easier. +class Test { }; + +} // namespace testing + +} // namespace spp_ + +namespace testing = SPP_NAMESPACE::testing; + +using std::cout; +using std::pair; +using std::set; +using std::string; +using std::vector; + +typedef unsigned char uint8; + +#ifdef _MSC_VER +// Below, we purposefully test having a very small allocator size. +// This causes some "type conversion too small" errors when using this +// allocator with sparsetable buckets. We're testing to make sure we +// handle that situation ok, so we don't need the compiler warnings. +#pragma warning(disable:4244) +#define ATTRIBUTE_UNUSED +#else +#define ATTRIBUTE_UNUSED __attribute__((unused)) +#endif + +namespace { + +#ifndef _MSC_VER // windows defines its own version +# ifdef __MINGW32__ // mingw has trouble writing to /tmp +static string TmpFile(const char* basename) { + return string("./#") + basename; +} +# else +static string TmpFile(const char* basename) { + string kTmpdir = "/tmp"; + return kTmpdir + "/" + basename; +} +# endif +#endif + +// Used as a value in some of the hashtable tests. It's just some +// arbitrary user-defined type with non-trivial memory management. +// --------------------------------------------------------------- +struct ValueType +{ +public: + ValueType() : s_(kDefault) { } + ValueType(const char* init_s) : s_(kDefault) { set_s(init_s); } + ~ValueType() { set_s(NULL); } + ValueType(const ValueType& that) : s_(kDefault) { operator=(that); } + void operator=(const ValueType& that) { set_s(that.s_); } + bool operator==(const ValueType& that) const { + return strcmp(this->s(), that.s()) == 0; + } + void set_s(const char* new_s) { + if (s_ != kDefault) + free(const_cast(s_)); + s_ = (new_s == NULL ? kDefault : reinterpret_cast(_strdup(new_s))); + } + const char* s() const { return s_; } +private: + const char* s_; + static const char* const kDefault; +}; + +const char* const ValueType::kDefault = "hi"; + +// This is used by the low-level sparse/dense_hashtable classes, +// which support the most general relationship between keys and +// values: the key is derived from the value through some arbitrary +// function. (For classes like sparse_hash_map, the 'value' is a +// key/data pair, and the function to derive the key is +// FirstElementOfPair.) KeyToValue is the inverse of this function, +// so GetKey(KeyToValue(key)) == key. To keep the tests a bit +// simpler, we've chosen to make the key and value actually be the +// same type, which is why we need only one template argument for the +// types, rather than two (one for the key and one for the value). +template +struct SetKey +{ + void operator()(KeyAndValueT* value, const KeyAndValueT& new_key) const + { + *value = KeyToValue()(new_key); + } +}; + +// A hash function that keeps track of how often it's called. We use +// a simple djb-hash so we don't depend on how STL hashes. We use +// this same method to do the key-comparison, so we can keep track +// of comparison-counts too. +struct Hasher +{ + explicit Hasher(int i=0) : id_(i), num_hashes_(0), num_compares_(0) { } + int id() const { return id_; } + int num_hashes() const { return num_hashes_; } + int num_compares() const { return num_compares_; } + + size_t operator()(int a) const { + num_hashes_++; + return static_cast(a); + } + size_t operator()(const char* a) const { + num_hashes_++; + size_t hash = 0; + for (size_t i = 0; a[i]; i++ ) + hash = 33 * hash + a[i]; + return hash; + } + size_t operator()(const string& a) const { + num_hashes_++; + size_t hash = 0; + for (size_t i = 0; i < a.length(); i++ ) + hash = 33 * hash + a[i]; + return hash; + } + size_t operator()(const int* a) const { + num_hashes_++; + return static_cast(reinterpret_cast(a)); + } + bool operator()(int a, int b) const { + num_compares_++; + return a == b; + } + bool operator()(const string& a, const string& b) const { + num_compares_++; + return a == b; + } + bool operator()(const char* a, const char* b) const { + num_compares_++; + // The 'a == b' test is necessary, in case a and b are both NULL. + return (a == b || (a && b && strcmp(a, b) == 0)); + } + +private: + mutable int id_; + mutable int num_hashes_; + mutable int num_compares_; +}; + +// Allocator that allows controlling its size in various ways, to test +// allocator overflow. Because we use this allocator in a vector, we +// need to define != and swap for gcc. +// ------------------------------------------------------------------ +template(~0)> +struct Alloc +{ + typedef T value_type; + typedef SizeT size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + + explicit Alloc(int i=0, int* count=NULL) : id_(i), count_(count) {} + ~Alloc() {} + pointer address(reference r) const { return &r; } + const_pointer address(const_reference r) const { return &r; } + pointer allocate(size_type n, const_pointer = 0) { + if (count_) ++(*count_); + return static_cast(malloc(n * sizeof(value_type))); + } + void deallocate(pointer p, size_type) { + free(p); + } + pointer reallocate(pointer p, size_type n) { + if (count_) ++(*count_); + return static_cast(realloc(p, n * sizeof(value_type))); + } + size_type max_size() const { + return static_cast(MAX_SIZE); + } + void construct(pointer p, const value_type& val) { + new(p) value_type(val); + } + void destroy(pointer p) { p->~value_type(); } + + bool is_custom_alloc() const { return true; } + + template + Alloc(const Alloc& that) + : id_(that.id_), count_(that.count_) { + } + + template + struct rebind { + typedef Alloc other; + }; + + bool operator==(const Alloc& that) const { + return this->id_ == that.id_ && this->count_ == that.count_; + } + bool operator!=(const Alloc& that) const { + return !this->operator==(that); + } + + int id() const { return id_; } + + // I have to make these public so the constructor used for rebinding + // can see them. Normally, I'd just make them private and say: + // template friend struct Alloc; + // but MSVC 7.1 barfs on that. So public it is. But no peeking! +public: + int id_; + int* count_; +}; + + +// Below are a few fun routines that convert a value into a key, used +// for dense_hashtable and sparse_hashtable. It's our responsibility +// to make sure, when we insert values into these objects, that the +// values match the keys we insert them under. To allow us to use +// these routines for SetKey as well, we require all these functions +// be their own inverse: f(f(x)) == x. +template +struct Negation { + typedef Value result_type; + Value operator()(Value& v) { return -v; } + const Value operator()(const Value& v) const { return -v; } +}; + +struct Capital +{ + typedef string result_type; + string operator()(string& s) { + return string(1, s[0] ^ 32) + s.substr(1); + } + const string operator()(const string& s) const { + return string(1, s[0] ^ 32) + s.substr(1); + } +}; + +struct Identity +{ // lame, I know, but an important case to test. + typedef const char* result_type; + const char* operator()(const char* s) const { + return s; + } +}; + +// This is just to avoid memory leaks -- it's a global pointer to +// all the memory allocated by UniqueObjectHelper. We'll use it +// to semi-test sparsetable as well. :-) +std::vector g_unique_charstar_objects(16, (char *)0); + +// This is an object-generator: pass in an index, and it will return a +// unique object of type ItemType. We provide specializations for the +// types we actually support. +template ItemType UniqueObjectHelper(int index); +template<> int UniqueObjectHelper(int index) +{ + return index; +} +template<> string UniqueObjectHelper(int index) +{ + char buffer[64]; + snprintf(buffer, sizeof(buffer), "%d", index); + return buffer; +} +template<> char* UniqueObjectHelper(int index) +{ + // First grow the table if need be. + size_t table_size = g_unique_charstar_objects.size(); + while (index >= static_cast(table_size)) { + assert(table_size * 2 > table_size); // avoid overflow problems + table_size *= 2; + } + if (table_size > g_unique_charstar_objects.size()) + g_unique_charstar_objects.resize(table_size, (char *)0); + + if (!g_unique_charstar_objects[static_cast(index)]) { + char buffer[64]; + snprintf(buffer, sizeof(buffer), "%d", index); + g_unique_charstar_objects[static_cast(index)] = _strdup(buffer); + } + return g_unique_charstar_objects[static_cast(index)]; +} +template<> const char* UniqueObjectHelper(int index) { + return UniqueObjectHelper(index); +} +template<> ValueType UniqueObjectHelper(int index) { + return ValueType(UniqueObjectHelper(index).c_str()); +} +template<> pair UniqueObjectHelper(int index) { + return pair(index, index + 1); +} +template<> pair UniqueObjectHelper(int index) +{ + return pair( + UniqueObjectHelper(index), UniqueObjectHelper(index + 1)); +} +template<> pair UniqueObjectHelper(int index) +{ + return pair( + UniqueObjectHelper(index), UniqueObjectHelper(index+1)); +} + +class ValueSerializer +{ +public: + bool operator()(FILE* fp, const int& value) { + return fwrite(&value, sizeof(value), 1, fp) == 1; + } + bool operator()(FILE* fp, int* value) { + return fread(value, sizeof(*value), 1, fp) == 1; + } + bool operator()(FILE* fp, const string& value) { + const size_t size = value.size(); + return (*this)(fp, (int)size) && fwrite(value.c_str(), size, 1, fp) == 1; + } + bool operator()(FILE* fp, string* value) { + int size; + if (!(*this)(fp, &size)) return false; + char* buf = new char[(size_t)size]; + if (fread(buf, (size_t)size, 1, fp) != 1) { + delete[] buf; + return false; + } + new (value) string(buf, (size_t)size); + delete[] buf; + return true; + } + template + bool operator()(OUTPUT* fp, const ValueType& v) { + return (*this)(fp, string(v.s())); + } + template + bool operator()(INPUT* fp, ValueType* v) { + string data; + if (!(*this)(fp, &data)) return false; + new(v) ValueType(data.c_str()); + return true; + } + template + bool operator()(OUTPUT* fp, const char* const& value) { + // Just store the index. + return (*this)(fp, atoi(value)); + } + template + bool operator()(INPUT* fp, const char** value) { + // Look up via index. + int index; + if (!(*this)(fp, &index)) return false; + *value = UniqueObjectHelper(index); + return true; + } + template + bool operator()(OUTPUT* fp, std::pair* value) { + return (*this)(fp, const_cast(&value->first)) + && (*this)(fp, &value->second); + } + template + bool operator()(INPUT* fp, const std::pair& value) { + return (*this)(fp, value.first) && (*this)(fp, value.second); + } +}; + +template +class HashtableTest : public ::testing::Test +{ +public: + HashtableTest() : ht_() { } + // Give syntactically-prettier access to UniqueObjectHelper. + typename HashtableType::value_type UniqueObject(int index) { + return UniqueObjectHelper(index); + } + typename HashtableType::key_type UniqueKey(int index) { + return this->ht_.get_key(this->UniqueObject(index)); + } +protected: + HashtableType ht_; +}; + +} + +// These are used to specify the empty key and deleted key in some +// contexts. They can't be in the unnamed namespace, or static, +// because the template code requires external linkage. +extern const string kEmptyString("--empty string--"); +extern const string kDeletedString("--deleted string--"); +extern const int kEmptyInt = 0; +extern const int kDeletedInt = -1234676543; // an unlikely-to-pick int +extern const char* const kEmptyCharStar = "--empty char*--"; +extern const char* const kDeletedCharStar = "--deleted char*--"; + +namespace { + +#define INT_HASHTABLES \ + HashtableInterface_SparseHashMap > >, \ + HashtableInterface_SparseHashSet >, \ + /* This is a table where the key associated with a value is -value */ \ + HashtableInterface_SparseHashtable, \ + SetKey >, \ + Hasher, Alloc > + +#define STRING_HASHTABLES \ + HashtableInterface_SparseHashMap > >, \ + HashtableInterface_SparseHashSet >, \ + /* This is a table where the key associated with a value is Cap(value) */ \ + HashtableInterface_SparseHashtable, \ + Hasher, Alloc > + +// --------------------------------------------------------------------- +// I'd like to use ValueType keys for SparseHashtable<> and +// DenseHashtable<> but I can't due to memory-management woes (nobody +// really owns the char* involved). So instead I do something simpler. +// --------------------------------------------------------------------- +#define CHARSTAR_HASHTABLES \ + HashtableInterface_SparseHashMap > >, \ + HashtableInterface_SparseHashSet >, \ + HashtableInterface_SparseHashtable, \ + Hasher, Alloc > + +// --------------------------------------------------------------------- +// This is the list of types we run each test against. +// We need to define the same class 4 times due to limitations in the +// testing framework. Basically, we associate each class below with +// the set of types we want to run tests on it with. +// --------------------------------------------------------------------- +template class HashtableIntTest + : public HashtableTest { }; + +template class HashtableStringTest + : public HashtableTest { }; + +template class HashtableCharStarTest + : public HashtableTest { }; + +template class HashtableAllTest + : public HashtableTest { }; + +typedef testing::TypeList3 IntHashtables; +typedef testing::TypeList3 StringHashtables; +typedef testing::TypeList3 CharStarHashtables; +typedef testing::TypeList9 AllHashtables; + +TYPED_TEST_CASE_3(HashtableIntTest, IntHashtables); +TYPED_TEST_CASE_3(HashtableStringTest, StringHashtables); +TYPED_TEST_CASE_3(HashtableCharStarTest, CharStarHashtables); +TYPED_TEST_CASE_9(HashtableAllTest, AllHashtables); + +// ------------------------------------------------------------------------ +// First, some testing of the underlying infrastructure. + +#if 0 + +TEST(HashtableCommonTest, HashMunging) +{ + const Hasher hasher; + + // We don't munge the hash value on non-pointer template types. + { + const sparsehash_internal::sh_hashtable_settings + settings(hasher, 0.0, 0.0); + const int v = 1000; + EXPECT_EQ(hasher(v), settings.hash(v)); + } + + { + // We do munge the hash value on pointer template types. + const sparsehash_internal::sh_hashtable_settings + settings(hasher, 0.0, 0.0); + int* v = NULL; + v += 0x10000; // get a non-trivial pointer value + EXPECT_NE(hasher(v), settings.hash(v)); + } + { + const sparsehash_internal::sh_hashtable_settings + settings(hasher, 0.0, 0.0); + const int* v = NULL; + v += 0x10000; // get a non-trivial pointer value + EXPECT_NE(hasher(v), settings.hash(v)); + } +} + +#endif + +// ------------------------------------------------------------------------ +// If the first arg to TYPED_TEST is HashtableIntTest, it will run +// this test on all the hashtable types, with key=int and value=int. +// Likewise, HashtableStringTest will have string key/values, and +// HashtableCharStarTest will have char* keys and -- just to mix it up +// a little -- ValueType values. HashtableAllTest will run all three +// key/value types on all 6 hashtables types, for 9 test-runs total +// per test. +// +// In addition, TYPED_TEST makes available the magic keyword +// TypeParam, which is the type being used for the current test. + +// This first set of tests just tests the public API, going through +// the public typedefs and methods in turn. It goes approximately +// in the definition-order in sparse_hash_map.h. +// ------------------------------------------------------------------------ +TYPED_TEST(HashtableIntTest, Typedefs) +{ + // Make sure all the standard STL-y typedefs are defined. The exact + // key/value types don't matter here, so we only bother testing on + // the int tables. This is just a compile-time "test"; nothing here + // can fail at runtime. + this->ht_.set_deleted_key(-2); // just so deleted_key succeeds + typename TypeParam::key_type kt; + typename TypeParam::value_type vt; + typename TypeParam::hasher h; + typename TypeParam::key_equal ke; + typename TypeParam::allocator_type at; + + typename TypeParam::size_type st; + typename TypeParam::difference_type dt; + typename TypeParam::pointer p; + typename TypeParam::const_pointer cp; + // I can't declare variables of reference-type, since I have nothing + // to point them to, so I just make sure that these types exist. + ATTRIBUTE_UNUSED typedef typename TypeParam::reference r; + ATTRIBUTE_UNUSED typedef typename TypeParam::const_reference cf; + + typename TypeParam::iterator i; + typename TypeParam::const_iterator ci; + typename TypeParam::local_iterator li; + typename TypeParam::const_local_iterator cli; + + // Now make sure the variables are used, so the compiler doesn't + // complain. Where possible, I "use" the variable by calling the + // method that's supposed to return the unique instance of the + // relevant type (eg. get_allocator()). Otherwise, I try to call a + // different, arbitrary function that returns the type. Sometimes + // the type isn't used at all, and there's no good way to use the + // variable. + (void)vt; // value_type may not be copyable. Easiest not to try. + h = this->ht_.hash_funct(); + ke = this->ht_.key_eq(); + at = this->ht_.get_allocator(); + st = this->ht_.size(); + (void)dt; + (void)p; + (void)cp; + (void)kt; + (void)st; + i = this->ht_.begin(); + ci = this->ht_.begin(); + li = this->ht_.begin(0); + cli = this->ht_.begin(0); +} + +TYPED_TEST(HashtableAllTest, NormalIterators) +{ + EXPECT_TRUE(this->ht_.begin() == this->ht_.end()); + this->ht_.insert(this->UniqueObject(1)); + { + typename TypeParam::iterator it = this->ht_.begin(); + EXPECT_TRUE(it != this->ht_.end()); + ++it; + EXPECT_TRUE(it == this->ht_.end()); + } +} + + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) + +template struct MyHash; +typedef std::pair StringPair; + +template<> struct MyHash +{ + size_t operator()(StringPair const& p) const + { + return std::hash()(p.first); + } +}; + +class MovableOnlyType +{ + std::string _str; + std::uint64_t _int; + +public: + // Make object movable and non-copyable + MovableOnlyType(MovableOnlyType &&) = default; + MovableOnlyType(const MovableOnlyType &) = delete; + MovableOnlyType& operator=(MovableOnlyType &&) = default; + MovableOnlyType& operator=(const MovableOnlyType &) = delete; + MovableOnlyType() : _str("whatever"), _int(2) {} +}; + +void movable_emplace_test(std::size_t iterations, int container_size) +{ + for (std::size_t i=0;i m; + m.reserve(static_cast(container_size)); + char buff[20]; + for (int j=0; j mymap; + + mymap.emplace ("NCC-1701", "J.T. Kirk"); + mymap.emplace ("NCC-1701-D", "J.L. Picard"); + mymap.emplace ("NCC-74656", "K. Janeway"); + EXPECT_TRUE(mymap["NCC-74656"] == std::string("K. Janeway")); + + sparse_hash_set > myset; + myset.emplace ("NCC-1701", "J.T. Kirk"); + } + + movable_emplace_test(10, 50); +} +#endif + + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) +TEST(HashtableTest, IncompleteTypes) +{ + int i; + sparse_hash_map ht2; + ht2[&i] = 3; + + struct Bogus; + sparse_hash_map ht3; + ht3[(Bogus *)0] = 8; +} +#endif + + +#if !defined(SPP_NO_CXX11_VARIADIC_TEMPLATES) +TEST(HashtableTest, ReferenceWrapper) +{ + sparse_hash_map> x; + int a = 5; + x.insert(std::make_pair(3, std::ref(a))); + EXPECT_EQ(x.at(3), 5); +} +#endif + +#if !defined(SPP_NO_CXX11_RVALUE_REFERENCES) +class CNonCopyable +{ +public: + CNonCopyable(CNonCopyable const &) = delete; + const CNonCopyable& operator=(CNonCopyable const &) = delete; + CNonCopyable() = default; +}; + + +struct Probe : CNonCopyable +{ + Probe() {} + Probe(Probe &&) {} + void operator=(Probe &&) {} + +private: + Probe(const Probe &); + Probe& operator=(const Probe &); +}; + +TEST(HashtableTest, NonCopyable) +{ + typedef spp::sparse_hash_map THashMap; + THashMap probes; + + probes.insert(THashMap::value_type(27, Probe())); + EXPECT_EQ(probes.begin()->first, 27); +} + +#endif + + +TEST(HashtableTest, ModifyViaIterator) +{ + // This only works for hash-maps, since only they have non-const values. + { + sparse_hash_map ht; + ht[1] = 2; + sparse_hash_map::iterator it = ht.find(1); + EXPECT_TRUE(it != ht.end()); + EXPECT_EQ(1, it->first); + EXPECT_EQ(2, it->second); + it->second = 5; + it = ht.find(1); + EXPECT_TRUE(it != ht.end()); + EXPECT_EQ(5, it->second); + } +} + +TYPED_TEST(HashtableAllTest, ConstIterators) +{ + this->ht_.insert(this->UniqueObject(1)); + typename TypeParam::const_iterator it = this->ht_.begin(); + EXPECT_TRUE(it != (typename TypeParam::const_iterator)this->ht_.end()); + ++it; + EXPECT_TRUE(it == (typename TypeParam::const_iterator)this->ht_.end()); +} + +TYPED_TEST(HashtableAllTest, LocalIterators) +{ + // Now, tr1 begin/end (the local iterator that takes a bucket-number). + // ht::bucket() returns the bucket that this key would be inserted in. + this->ht_.insert(this->UniqueObject(1)); + const typename TypeParam::size_type bucknum = + this->ht_.bucket(this->UniqueKey(1)); + typename TypeParam::local_iterator b = this->ht_.begin(bucknum); + typename TypeParam::local_iterator e = this->ht_.end(bucknum); + EXPECT_TRUE(b != e); + b++; + EXPECT_TRUE(b == e); + + // Check an empty bucket. We can just xor the bottom bit and be sure + // of getting a legal bucket, since #buckets is always a power of 2. + EXPECT_TRUE(this->ht_.begin(bucknum ^ 1) == this->ht_.end(bucknum ^ 1)); + // Another test, this time making sure we're using the right types. + typename TypeParam::local_iterator b2 = this->ht_.begin(bucknum ^ 1); + typename TypeParam::local_iterator e2 = this->ht_.end(bucknum ^ 1); + EXPECT_TRUE(b2 == e2); +} + +TYPED_TEST(HashtableAllTest, ConstLocalIterators) +{ + this->ht_.insert(this->UniqueObject(1)); + const typename TypeParam::size_type bucknum = + this->ht_.bucket(this->UniqueKey(1)); + typename TypeParam::const_local_iterator b = this->ht_.begin(bucknum); + typename TypeParam::const_local_iterator e = this->ht_.end(bucknum); + EXPECT_TRUE(b != e); + b++; + EXPECT_TRUE(b == e); + typename TypeParam::const_local_iterator b2 = this->ht_.begin(bucknum ^ 1); + typename TypeParam::const_local_iterator e2 = this->ht_.end(bucknum ^ 1); + EXPECT_TRUE(b2 == e2); +} + +TYPED_TEST(HashtableAllTest, Iterating) +{ + // Test a bit more iterating than just one ++. + this->ht_.insert(this->UniqueObject(1)); + this->ht_.insert(this->UniqueObject(11)); + this->ht_.insert(this->UniqueObject(111)); + this->ht_.insert(this->UniqueObject(1111)); + this->ht_.insert(this->UniqueObject(11111)); + this->ht_.insert(this->UniqueObject(111111)); + this->ht_.insert(this->UniqueObject(1111111)); + this->ht_.insert(this->UniqueObject(11111111)); + this->ht_.insert(this->UniqueObject(111111111)); + typename TypeParam::iterator it = this->ht_.begin(); + for (int i = 1; i <= 9; i++) { // start at 1 so i is never 0 + // && here makes it easier to tell what loop iteration the test failed on. + EXPECT_TRUE(i && (it++ != this->ht_.end())); + } + EXPECT_TRUE(it == this->ht_.end()); +} + +TYPED_TEST(HashtableIntTest, Constructors) +{ + // The key/value types don't matter here, so I just test on one set + // of tables, the ones with int keys, which can easily handle the + // placement-news we have to do below. + Hasher hasher(1); // 1 is a unique id + int alloc_count = 0; + Alloc alloc(2, &alloc_count); + + TypeParam ht_noarg; + TypeParam ht_onearg(100); + TypeParam ht_twoarg(100, hasher); + TypeParam ht_threearg(100, hasher, hasher); // hasher serves as key_equal too + TypeParam ht_fourarg(100, hasher, hasher, alloc); + + // The allocator should have been called at most once, for the last ht. + EXPECT_GE(1, alloc_count); + int old_alloc_count = alloc_count; + + const typename TypeParam::value_type input[] = { + this->UniqueObject(1), + this->UniqueObject(2), + this->UniqueObject(4), + this->UniqueObject(8) + }; + const int num_inputs = sizeof(input) / sizeof(input[0]); + const typename TypeParam::value_type *begin = &input[0]; + const typename TypeParam::value_type *end = begin + num_inputs; + TypeParam ht_iter_noarg(begin, end); + TypeParam ht_iter_onearg(begin, end, 100); + TypeParam ht_iter_twoarg(begin, end, 100, hasher); + TypeParam ht_iter_threearg(begin, end, 100, hasher, hasher); + TypeParam ht_iter_fourarg(begin, end, 100, hasher, hasher, alloc); + // Now the allocator should have been called more. + EXPECT_GT(alloc_count, old_alloc_count); + old_alloc_count = alloc_count; + + // Let's do a lot more inserting and make sure the alloc-count goes up + for (int i = 2; i < 2000; i++) + ht_fourarg.insert(this->UniqueObject(i)); + EXPECT_GT(alloc_count, old_alloc_count); + + EXPECT_LT(ht_noarg.bucket_count(), 100u); + EXPECT_GE(ht_onearg.bucket_count(), 100u); + EXPECT_GE(ht_twoarg.bucket_count(), 100u); + EXPECT_GE(ht_threearg.bucket_count(), 100u); + EXPECT_GE(ht_fourarg.bucket_count(), 100u); + EXPECT_GE(ht_iter_onearg.bucket_count(), 100u); + + // When we pass in a hasher -- it can serve both as the hash-function + // and the key-equal function -- its id should be 1. Where we don't + // pass it in and use the default Hasher object, the id should be 0. + EXPECT_EQ(0, ht_noarg.hash_funct().id()); + EXPECT_EQ(0, ht_noarg.key_eq().id()); + EXPECT_EQ(0, ht_onearg.hash_funct().id()); + EXPECT_EQ(0, ht_onearg.key_eq().id()); + EXPECT_EQ(1, ht_twoarg.hash_funct().id()); + EXPECT_EQ(0, ht_twoarg.key_eq().id()); + EXPECT_EQ(1, ht_threearg.hash_funct().id()); + EXPECT_EQ(1, ht_threearg.key_eq().id()); + + EXPECT_EQ(0, ht_iter_noarg.hash_funct().id()); + EXPECT_EQ(0, ht_iter_noarg.key_eq().id()); + EXPECT_EQ(0, ht_iter_onearg.hash_funct().id()); + EXPECT_EQ(0, ht_iter_onearg.key_eq().id()); + EXPECT_EQ(1, ht_iter_twoarg.hash_funct().id()); + EXPECT_EQ(0, ht_iter_twoarg.key_eq().id()); + EXPECT_EQ(1, ht_iter_threearg.hash_funct().id()); + EXPECT_EQ(1, ht_iter_threearg.key_eq().id()); + + // Likewise for the allocator + EXPECT_EQ(0, ht_threearg.get_allocator().id()); + EXPECT_EQ(0, ht_iter_threearg.get_allocator().id()); + EXPECT_EQ(2, ht_fourarg.get_allocator().id()); + EXPECT_EQ(2, ht_iter_fourarg.get_allocator().id()); +} + +TYPED_TEST(HashtableAllTest, OperatorEquals) +{ + { + TypeParam ht1, ht2; + ht1.set_deleted_key(this->UniqueKey(1)); + ht2.set_deleted_key(this->UniqueKey(2)); + + ht1.insert(this->UniqueObject(10)); + ht2.insert(this->UniqueObject(20)); + EXPECT_FALSE(ht1 == ht2); + ht1 = ht2; + EXPECT_TRUE(ht1 == ht2); + } + { + TypeParam ht1, ht2; + ht1.insert(this->UniqueObject(30)); + ht1 = ht2; + EXPECT_EQ(0u, ht1.size()); + } + { + TypeParam ht1, ht2; + ht1.set_deleted_key(this->UniqueKey(1)); + ht2.insert(this->UniqueObject(1)); // has same key as ht1.delkey + ht1 = ht2; // should reset deleted-key to 'unset' + EXPECT_EQ(1u, ht1.size()); + EXPECT_EQ(1u, ht1.count(this->UniqueKey(1))); + } +} + +TYPED_TEST(HashtableAllTest, Clear) +{ + for (int i = 1; i < 200; i++) { + this->ht_.insert(this->UniqueObject(i)); + } + this->ht_.clear(); + EXPECT_EQ(0u, this->ht_.size()); + // TODO(csilvers): do we want to enforce that the hashtable has or + // has not shrunk? It does for dense_* but not sparse_*. +} + +TYPED_TEST(HashtableAllTest, ClearNoResize) +{ + if (!this->ht_.supports_clear_no_resize()) + return; + typename TypeParam::size_type empty_bucket_count = this->ht_.bucket_count(); + int last_element = 1; + while (this->ht_.bucket_count() == empty_bucket_count) { + this->ht_.insert(this->UniqueObject(last_element)); + ++last_element; + } + typename TypeParam::size_type last_bucket_count = this->ht_.bucket_count(); + this->ht_.clear_no_resize(); + EXPECT_EQ(last_bucket_count, this->ht_.bucket_count()); + EXPECT_TRUE(this->ht_.empty()); + + // When inserting the same number of elements again, no resize + // should be necessary. + for (int i = 1; i < last_element; ++i) { + this->ht_.insert(this->UniqueObject(last_element + i)); + EXPECT_EQ(last_bucket_count, this->ht_.bucket_count()); + } +} + +TYPED_TEST(HashtableAllTest, Swap) +{ + // Let's make a second hashtable with its own hasher, key_equal, etc. + Hasher hasher(1); // 1 is a unique id + TypeParam other_ht(200, hasher, hasher); + + this->ht_.set_deleted_key(this->UniqueKey(1)); + other_ht.set_deleted_key(this->UniqueKey(2)); + + for (int i = 3; i < 2000; i++) { + this->ht_.insert(this->UniqueObject(i)); + } + this->ht_.erase(this->UniqueKey(1000)); + other_ht.insert(this->UniqueObject(2001)); + typename TypeParam::size_type expected_buckets = other_ht.bucket_count(); + + this->ht_.swap(other_ht); + + EXPECT_EQ(1, this->ht_.hash_funct().id()); + EXPECT_EQ(0, other_ht.hash_funct().id()); + + EXPECT_EQ(1, this->ht_.key_eq().id()); + EXPECT_EQ(0, other_ht.key_eq().id()); + + EXPECT_EQ(expected_buckets, this->ht_.bucket_count()); + EXPECT_GT(other_ht.bucket_count(), 200u); + + EXPECT_EQ(1u, this->ht_.size()); + EXPECT_EQ(1996u, other_ht.size()); // because we erased 1000 + + EXPECT_EQ(0u, this->ht_.count(this->UniqueKey(111))); + EXPECT_EQ(1u, other_ht.count(this->UniqueKey(111))); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(2001))); + EXPECT_EQ(0u, other_ht.count(this->UniqueKey(2001))); + EXPECT_EQ(0u, this->ht_.count(this->UniqueKey(1000))); + EXPECT_EQ(0u, other_ht.count(this->UniqueKey(1000))); + + // We purposefully don't swap allocs -- they're not necessarily swappable. + + // Now swap back, using the free-function swap + // NOTE: MSVC seems to have trouble with this free swap, not quite + // sure why. I've given up trying to fix it though. +#ifdef _MSC_VER + other_ht.swap(this->ht_); +#else + std::swap(this->ht_, other_ht); +#endif + + EXPECT_EQ(0, this->ht_.hash_funct().id()); + EXPECT_EQ(1, other_ht.hash_funct().id()); + EXPECT_EQ(1996u, this->ht_.size()); + EXPECT_EQ(1u, other_ht.size()); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(111))); + EXPECT_EQ(0u, other_ht.count(this->UniqueKey(111))); + + // A user reported a crash with this code using swap to clear. + // We've since fixed the bug; this prevents a regression. + TypeParam swap_to_clear_ht; + swap_to_clear_ht.set_deleted_key(this->UniqueKey(1)); + for (int i = 2; i < 10000; ++i) { + swap_to_clear_ht.insert(this->UniqueObject(i)); + } + TypeParam empty_ht; + empty_ht.swap(swap_to_clear_ht); + swap_to_clear_ht.set_deleted_key(this->UniqueKey(1)); + for (int i = 2; i < 10000; ++i) { + swap_to_clear_ht.insert(this->UniqueObject(i)); + } +} + +TYPED_TEST(HashtableAllTest, Size) +{ + EXPECT_EQ(0u, this->ht_.size()); + for (int i = 1; i < 1000; i++) { // go through some resizes + this->ht_.insert(this->UniqueObject(i)); + EXPECT_EQ(static_cast(i), this->ht_.size()); + } + this->ht_.clear(); + EXPECT_EQ(0u, this->ht_.size()); + + this->ht_.set_deleted_key(this->UniqueKey(1)); + EXPECT_EQ(0u, this->ht_.size()); // deleted key doesn't count + for (int i = 2; i < 1000; i++) { // go through some resizes + this->ht_.insert(this->UniqueObject(i)); + this->ht_.erase(this->UniqueKey(i)); + EXPECT_EQ(0u, this->ht_.size()); + } +} + +TEST(HashtableTest, MaxSizeAndMaxBucketCount) +{ + // The max size depends on the allocator. So we can't use the + // built-in allocator type; instead, we make our own types. + sparse_hash_set > ht_default; + sparse_hash_set > ht_char; + sparse_hash_set > ht_104; + + EXPECT_GE(ht_default.max_size(), 256u); + EXPECT_EQ(255u, ht_char.max_size()); + EXPECT_EQ(104u, ht_104.max_size()); + + // In our implementations, MaxBucketCount == MaxSize. + EXPECT_EQ(ht_default.max_size(), ht_default.max_bucket_count()); + EXPECT_EQ(ht_char.max_size(), ht_char.max_bucket_count()); + EXPECT_EQ(ht_104.max_size(), ht_104.max_bucket_count()); +} + +TYPED_TEST(HashtableAllTest, Empty) +{ + EXPECT_TRUE(this->ht_.empty()); + + this->ht_.insert(this->UniqueObject(1)); + EXPECT_FALSE(this->ht_.empty()); + + this->ht_.clear(); + EXPECT_TRUE(this->ht_.empty()); + + TypeParam empty_ht; + this->ht_.insert(this->UniqueObject(1)); + this->ht_.swap(empty_ht); + EXPECT_TRUE(this->ht_.empty()); +} + +TYPED_TEST(HashtableAllTest, BucketCount) +{ + TypeParam ht(100); + // constructor arg is number of *items* to be inserted, not the + // number of buckets, so we expect more buckets. + EXPECT_GT(ht.bucket_count(), 100u); + for (int i = 1; i < 200; i++) { + ht.insert(this->UniqueObject(i)); + } + EXPECT_GT(ht.bucket_count(), 200u); +} + +TYPED_TEST(HashtableAllTest, BucketAndBucketSize) +{ + const typename TypeParam::size_type expected_bucknum = this->ht_.bucket( + this->UniqueKey(1)); + EXPECT_EQ(0u, this->ht_.bucket_size(expected_bucknum)); + + this->ht_.insert(this->UniqueObject(1)); + EXPECT_EQ(expected_bucknum, this->ht_.bucket(this->UniqueKey(1))); + EXPECT_EQ(1u, this->ht_.bucket_size(expected_bucknum)); + + // Check that a bucket we didn't insert into, has a 0 size. Since + // we have an even number of buckets, bucknum^1 is guaranteed in range. + EXPECT_EQ(0u, this->ht_.bucket_size(expected_bucknum ^ 1)); +} + +TYPED_TEST(HashtableAllTest, LoadFactor) +{ + const typename TypeParam::size_type kSize = 16536; + // Check growing past various thresholds and then shrinking below + // them. + for (float grow_threshold = 0.2f; + grow_threshold <= 0.8f; + grow_threshold += 0.2f) + { + TypeParam ht; + ht.set_deleted_key(this->UniqueKey(1)); + ht.max_load_factor(grow_threshold); + ht.min_load_factor(0.0); + EXPECT_EQ(grow_threshold, ht.max_load_factor()); + EXPECT_EQ(0.0, ht.min_load_factor()); + + ht.resize(kSize); + size_t bucket_count = ht.bucket_count(); + // Erase and insert an element to set consider_shrink = true, + // which should not cause a shrink because the threshold is 0.0. + ht.insert(this->UniqueObject(2)); + ht.erase(this->UniqueKey(2)); + for (int i = 2;; ++i) + { + ht.insert(this->UniqueObject(i)); + if (static_cast(ht.size())/bucket_count < grow_threshold) { + EXPECT_EQ(bucket_count, ht.bucket_count()); + } else { + EXPECT_GT(ht.bucket_count(), bucket_count); + break; + } + } + // Now set a shrink threshold 1% below the current size and remove + // items until the size falls below that. + const float shrink_threshold = static_cast(ht.size()) / + ht.bucket_count() - 0.01f; + + // This time around, check the old set_resizing_parameters interface. + ht.set_resizing_parameters(shrink_threshold, 1.0); + EXPECT_EQ(1.0, ht.max_load_factor()); + EXPECT_EQ(shrink_threshold, ht.min_load_factor()); + + bucket_count = ht.bucket_count(); + for (int i = 2;; ++i) + { + ht.erase(this->UniqueKey(i)); + // A resize is only triggered by an insert, so add and remove a + // value every iteration to trigger the shrink as soon as the + // threshold is passed. + ht.erase(this->UniqueKey(i+1)); + ht.insert(this->UniqueObject(i+1)); + if (static_cast(ht.size())/bucket_count > shrink_threshold) { + EXPECT_EQ(bucket_count, ht.bucket_count()); + } else { + EXPECT_LT(ht.bucket_count(), bucket_count); + break; + } + } + } +} + +TYPED_TEST(HashtableAllTest, ResizeAndRehash) +{ + // resize() and rehash() are synonyms. rehash() is the tr1 name. + TypeParam ht(10000); + ht.max_load_factor(0.8f); // for consistency's sake + + for (int i = 1; i < 100; ++i) + ht.insert(this->UniqueObject(i)); + ht.resize(0); + // Now ht should be as small as possible. + EXPECT_LT(ht.bucket_count(), 300u); + + ht.rehash(9000); // use the 'rehash' version of the name. + // Bucket count should be next power of 2, after considering max_load_factor. + EXPECT_EQ(16384u, ht.bucket_count()); + for (int i = 101; i < 200; ++i) + ht.insert(this->UniqueObject(i)); + // Adding a few hundred buckets shouldn't have caused a resize yet. + EXPECT_EQ(ht.bucket_count(), 16384u); +} + +TYPED_TEST(HashtableAllTest, FindAndCountAndEqualRange) +{ + pair eq_pair; + pair const_eq_pair; + + EXPECT_TRUE(this->ht_.empty()); + EXPECT_TRUE(this->ht_.find(this->UniqueKey(1)) == this->ht_.end()); + EXPECT_EQ(0u, this->ht_.count(this->UniqueKey(1))); + eq_pair = this->ht_.equal_range(this->UniqueKey(1)); + EXPECT_TRUE(eq_pair.first == eq_pair.second); + + this->ht_.insert(this->UniqueObject(1)); + EXPECT_FALSE(this->ht_.empty()); + this->ht_.insert(this->UniqueObject(11)); + this->ht_.insert(this->UniqueObject(111)); + this->ht_.insert(this->UniqueObject(1111)); + this->ht_.insert(this->UniqueObject(11111)); + this->ht_.insert(this->UniqueObject(111111)); + this->ht_.insert(this->UniqueObject(1111111)); + this->ht_.insert(this->UniqueObject(11111111)); + this->ht_.insert(this->UniqueObject(111111111)); + EXPECT_EQ(9u, this->ht_.size()); + typename TypeParam::const_iterator it = this->ht_.find(this->UniqueKey(1)); + EXPECT_EQ(it.key(), this->UniqueKey(1)); + + // Allow testing the const version of the methods as well. + const TypeParam ht = this->ht_; + + // Some successful lookups (via find, count, and equal_range). + EXPECT_TRUE(this->ht_.find(this->UniqueKey(1)) != this->ht_.end()); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(1))); + eq_pair = this->ht_.equal_range(this->UniqueKey(1)); + EXPECT_TRUE(eq_pair.first != eq_pair.second); + EXPECT_EQ(eq_pair.first.key(), this->UniqueKey(1)); + ++eq_pair.first; + EXPECT_TRUE(eq_pair.first == eq_pair.second); + + EXPECT_TRUE(ht.find(this->UniqueKey(1)) != ht.end()); + EXPECT_EQ(1u, ht.count(this->UniqueKey(1))); + const_eq_pair = ht.equal_range(this->UniqueKey(1)); + EXPECT_TRUE(const_eq_pair.first != const_eq_pair.second); + EXPECT_EQ(const_eq_pair.first.key(), this->UniqueKey(1)); + ++const_eq_pair.first; + EXPECT_TRUE(const_eq_pair.first == const_eq_pair.second); + + EXPECT_TRUE(this->ht_.find(this->UniqueKey(11111)) != this->ht_.end()); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(11111))); + eq_pair = this->ht_.equal_range(this->UniqueKey(11111)); + EXPECT_TRUE(eq_pair.first != eq_pair.second); + EXPECT_EQ(eq_pair.first.key(), this->UniqueKey(11111)); + ++eq_pair.first; + EXPECT_TRUE(eq_pair.first == eq_pair.second); + + EXPECT_TRUE(ht.find(this->UniqueKey(11111)) != ht.end()); + EXPECT_EQ(1u, ht.count(this->UniqueKey(11111))); + const_eq_pair = ht.equal_range(this->UniqueKey(11111)); + EXPECT_TRUE(const_eq_pair.first != const_eq_pair.second); + EXPECT_EQ(const_eq_pair.first.key(), this->UniqueKey(11111)); + ++const_eq_pair.first; + EXPECT_TRUE(const_eq_pair.first == const_eq_pair.second); + + // Some unsuccessful lookups (via find, count, and equal_range). + EXPECT_TRUE(this->ht_.find(this->UniqueKey(11112)) == this->ht_.end()); + EXPECT_EQ(0u, this->ht_.count(this->UniqueKey(11112))); + eq_pair = this->ht_.equal_range(this->UniqueKey(11112)); + EXPECT_TRUE(eq_pair.first == eq_pair.second); + + EXPECT_TRUE(ht.find(this->UniqueKey(11112)) == ht.end()); + EXPECT_EQ(0u, ht.count(this->UniqueKey(11112))); + const_eq_pair = ht.equal_range(this->UniqueKey(11112)); + EXPECT_TRUE(const_eq_pair.first == const_eq_pair.second); + + EXPECT_TRUE(this->ht_.find(this->UniqueKey(11110)) == this->ht_.end()); + EXPECT_EQ(0u, this->ht_.count(this->UniqueKey(11110))); + eq_pair = this->ht_.equal_range(this->UniqueKey(11110)); + EXPECT_TRUE(eq_pair.first == eq_pair.second); + + EXPECT_TRUE(ht.find(this->UniqueKey(11110)) == ht.end()); + EXPECT_EQ(0u, ht.count(this->UniqueKey(11110))); + const_eq_pair = ht.equal_range(this->UniqueKey(11110)); + EXPECT_TRUE(const_eq_pair.first == const_eq_pair.second); +} + +TYPED_TEST(HashtableAllTest, BracketInsert) +{ + // tests operator[], for those types that support it. + if (!this->ht_.supports_brackets()) + return; + + // bracket_equal is equivalent to ht_[a] == b. It should insert a if + // it doesn't already exist. + EXPECT_TRUE(this->ht_.bracket_equal(this->UniqueKey(1), + this->ht_.default_data())); + EXPECT_TRUE(this->ht_.find(this->UniqueKey(1)) != this->ht_.end()); + + // bracket_assign is equivalent to ht_[a] = b. + this->ht_.bracket_assign(this->UniqueKey(2), + this->ht_.get_data(this->UniqueObject(4))); + EXPECT_TRUE(this->ht_.find(this->UniqueKey(2)) != this->ht_.end()); + EXPECT_TRUE(this->ht_.bracket_equal( + this->UniqueKey(2), this->ht_.get_data(this->UniqueObject(4)))); + + this->ht_.bracket_assign( + this->UniqueKey(2), this->ht_.get_data(this->UniqueObject(6))); + EXPECT_TRUE(this->ht_.bracket_equal( + this->UniqueKey(2), this->ht_.get_data(this->UniqueObject(6)))); + // bracket_equal shouldn't have modified the value. + EXPECT_TRUE(this->ht_.bracket_equal( + this->UniqueKey(2), this->ht_.get_data(this->UniqueObject(6)))); + + // Verify that an operator[] that doesn't cause a resize, also + // doesn't require an extra rehash. + TypeParam ht(100); + EXPECT_EQ(0, ht.hash_funct().num_hashes()); + ht.bracket_assign(this->UniqueKey(2), ht.get_data(this->UniqueObject(2))); + EXPECT_EQ(1, ht.hash_funct().num_hashes()); + + // And overwriting, likewise, should only cause one extra hash. + ht.bracket_assign(this->UniqueKey(2), ht.get_data(this->UniqueObject(2))); + EXPECT_EQ(2, ht.hash_funct().num_hashes()); +} + + +TYPED_TEST(HashtableAllTest, InsertValue) +{ + // First, try some straightforward insertions. + EXPECT_TRUE(this->ht_.empty()); + this->ht_.insert(this->UniqueObject(1)); + EXPECT_FALSE(this->ht_.empty()); + this->ht_.insert(this->UniqueObject(11)); + this->ht_.insert(this->UniqueObject(111)); + this->ht_.insert(this->UniqueObject(1111)); + this->ht_.insert(this->UniqueObject(11111)); + this->ht_.insert(this->UniqueObject(111111)); + this->ht_.insert(this->UniqueObject(1111111)); + this->ht_.insert(this->UniqueObject(11111111)); + this->ht_.insert(this->UniqueObject(111111111)); + EXPECT_EQ(9u, this->ht_.size()); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(1))); + EXPECT_EQ(1u, this->ht_.count(this->UniqueKey(1111))); + + // Check the return type. + pair insert_it; + insert_it = this->ht_.insert(this->UniqueObject(1)); + EXPECT_EQ(false, insert_it.second); // false: already present + EXPECT_TRUE(*insert_it.first == this->UniqueObject(1)); + + insert_it = this->ht_.insert(this->UniqueObject(2)); + EXPECT_EQ(true, insert_it.second); // true: not already present + EXPECT_TRUE(*insert_it.first == this->UniqueObject(2)); +} + +TYPED_TEST(HashtableIntTest, InsertRange) +{ + // We just test the ints here, to make the placement-new easier. + TypeParam ht_source; + ht_source.insert(this->UniqueObject(10)); + ht_source.insert(this->UniqueObject(100)); + ht_source.insert(this->UniqueObject(1000)); + ht_source.insert(this->UniqueObject(10000)); + ht_source.insert(this->UniqueObject(100000)); + ht_source.insert(this->UniqueObject(1000000)); + + const typename TypeParam::value_type input[] = { + // This is a copy of the first element in ht_source. + *ht_source.begin(), + this->UniqueObject(2), + this->UniqueObject(4), + this->UniqueObject(8) + }; + + set set_input; + set_input.insert(this->UniqueObject(1111111)); + set_input.insert(this->UniqueObject(111111)); + set_input.insert(this->UniqueObject(11111)); + set_input.insert(this->UniqueObject(1111)); + set_input.insert(this->UniqueObject(111)); + set_input.insert(this->UniqueObject(11)); + + // Insert from ht_source, an iterator of the same type as us. + typename TypeParam::const_iterator begin = ht_source.begin(); + typename TypeParam::const_iterator end = begin; + std::advance(end, 3); + this->ht_.insert(begin, end); // insert 3 elements from ht_source + EXPECT_EQ(3u, this->ht_.size()); + EXPECT_TRUE(*this->ht_.begin() == this->UniqueObject(10) || + *this->ht_.begin() == this->UniqueObject(100) || + *this->ht_.begin() == this->UniqueObject(1000) || + *this->ht_.begin() == this->UniqueObject(10000) || + *this->ht_.begin() == this->UniqueObject(100000) || + *this->ht_.begin() == this->UniqueObject(1000000)); + + // And insert from set_input, a separate, non-random-access iterator. + typename set::const_iterator set_begin; + typename set::const_iterator set_end; + set_begin = set_input.begin(); + set_end = set_begin; + std::advance(set_end, 3); + this->ht_.insert(set_begin, set_end); + EXPECT_EQ(6u, this->ht_.size()); + + // Insert from input as well, a separate, random-access iterator. + // The first element of input overlaps with an existing element + // of ht_, so this should only up the size by 2. + this->ht_.insert(&input[0], &input[3]); + EXPECT_EQ(8u, this->ht_.size()); +} + +TEST(HashtableTest, InsertValueToMap) +{ + // For the maps in particular, ensure that inserting doesn't change + // the value. + sparse_hash_map shm; + pair::iterator, bool> shm_it; + shm[1] = 2; // test a different method of inserting + shm_it = shm.insert(pair(1, 3)); + EXPECT_EQ(false, shm_it.second); + EXPECT_EQ(1, shm_it.first->first); + EXPECT_EQ(2, shm_it.first->second); + shm_it.first->second = 20; + EXPECT_EQ(20, shm[1]); + + shm_it = shm.insert(pair(2, 4)); + EXPECT_EQ(true, shm_it.second); + EXPECT_EQ(2, shm_it.first->first); + EXPECT_EQ(4, shm_it.first->second); + EXPECT_EQ(4, shm[2]); +} + +TYPED_TEST(HashtableStringTest, EmptyKey) +{ + // Only run the string tests, to make it easier to know what the + // empty key should be. + if (!this->ht_.supports_empty_key()) + return; + EXPECT_EQ(kEmptyString, this->ht_.empty_key()); +} + +TYPED_TEST(HashtableAllTest, Erase) +{ + this->ht_.set_deleted_key(this->UniqueKey(1)); + EXPECT_EQ(0u, this->ht_.erase(this->UniqueKey(20))); + this->ht_.insert(this->UniqueObject(10)); + this->ht_.insert(this->UniqueObject(20)); + EXPECT_EQ(1u, this->ht_.erase(this->UniqueKey(20))); + EXPECT_EQ(1u, this->ht_.size()); + EXPECT_EQ(0u, this->ht_.erase(this->UniqueKey(20))); + EXPECT_EQ(1u, this->ht_.size()); + EXPECT_EQ(0u, this->ht_.erase(this->UniqueKey(19))); + EXPECT_EQ(1u, this->ht_.size()); + + typename TypeParam::iterator it = this->ht_.find(this->UniqueKey(10)); + EXPECT_TRUE(it != this->ht_.end()); + this->ht_.erase(it); + EXPECT_EQ(0u, this->ht_.size()); + + for (int i = 10; i < 100; i++) + this->ht_.insert(this->UniqueObject(i)); + EXPECT_EQ(90u, this->ht_.size()); + this->ht_.erase(this->ht_.begin(), this->ht_.end()); + EXPECT_EQ(0u, this->ht_.size()); +} + +TYPED_TEST(HashtableAllTest, EraseDoesNotResize) +{ + this->ht_.set_deleted_key(this->UniqueKey(1)); + for (int i = 10; i < 2000; i++) { + this->ht_.insert(this->UniqueObject(i)); + } + const typename TypeParam::size_type old_count = this->ht_.bucket_count(); + for (int i = 10; i < 1000; i++) { // erase half one at a time + EXPECT_EQ(1u, this->ht_.erase(this->UniqueKey(i))); + } + this->ht_.erase(this->ht_.begin(), this->ht_.end()); // and the rest at once + EXPECT_EQ(0u, this->ht_.size()); + EXPECT_EQ(old_count, this->ht_.bucket_count()); +} + +TYPED_TEST(HashtableAllTest, Equals) +{ + // The real test here is whether two hashtables are equal if they + // have the same items but in a different order. + TypeParam ht1; + TypeParam ht2; + + EXPECT_TRUE(ht1 == ht1); + EXPECT_FALSE(ht1 != ht1); + EXPECT_TRUE(ht1 == ht2); + EXPECT_FALSE(ht1 != ht2); + ht1.set_deleted_key(this->UniqueKey(1)); + // Only the contents affect equality, not things like deleted-key. + EXPECT_TRUE(ht1 == ht2); + EXPECT_FALSE(ht1 != ht2); + ht1.resize(2000); + EXPECT_TRUE(ht1 == ht2); + + // The choice of allocator/etc doesn't matter either. + Hasher hasher(1); + Alloc alloc(2, NULL); + TypeParam ht3(5, hasher, hasher, alloc); + EXPECT_TRUE(ht1 == ht3); + EXPECT_FALSE(ht1 != ht3); + + ht1.insert(this->UniqueObject(2)); + EXPECT_TRUE(ht1 != ht2); + EXPECT_FALSE(ht1 == ht2); // this should hold as well! + + ht2.insert(this->UniqueObject(2)); + EXPECT_TRUE(ht1 == ht2); + + for (int i = 3; i <= 2000; i++) { + ht1.insert(this->UniqueObject(i)); + } + for (int i = 2000; i >= 3; i--) { + ht2.insert(this->UniqueObject(i)); + } + EXPECT_TRUE(ht1 == ht2); +} + +TEST(HashtableTest, IntIO) +{ + // Since the set case is just a special (easier) case than the map case, I + // just test on sparse_hash_map. This handles the easy case where we can + // use the standard reader and writer. + sparse_hash_map ht_out; + ht_out.set_deleted_key(0); + for (int i = 1; i < 1000; i++) { + ht_out[i] = i * i; + } + ht_out.erase(563); // just to test having some erased keys when we write. + ht_out.erase(22); + + string file(TmpFile("intio")); + FILE* fp = fopen(file.c_str(), "wb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_out.write_metadata(fp)); + EXPECT_TRUE(ht_out.write_nopointer_data(fp)); + fclose(fp); + } + + sparse_hash_map ht_in; + fp = fopen(file.c_str(), "rb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_in.read_metadata(fp)); + EXPECT_TRUE(ht_in.read_nopointer_data(fp)); + fclose(fp); + } + + EXPECT_EQ(1, ht_in[1]); + EXPECT_EQ(998001, ht_in[999]); + EXPECT_EQ(100, ht_in[10]); + EXPECT_EQ(441, ht_in[21]); + EXPECT_EQ(0, ht_in[22]); // should not have been saved + EXPECT_EQ(0, ht_in[563]); +} + +TEST(HashtableTest, StringIO) +{ + // Since the set case is just a special (easier) case than the map case, + // I just test on sparse_hash_map. This handles the difficult case where + // we have to write our own custom reader/writer for the data. + typedef sparse_hash_map SP; + SP ht_out; + ht_out.set_deleted_key(string("")); + + for (int i = 32; i < 128; i++) { + // This maps 'a' to 32 a's, 'b' to 33 b's, etc. + ht_out[string(1, (char)i)] = string((size_t)i, (char)i); + } + ht_out.erase("c"); // just to test having some erased keys when we write. + ht_out.erase("y"); + + string file(TmpFile("stringio")); + FILE* fp = fopen(file.c_str(), "wb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_out.write_metadata(fp)); + + for (SP::const_iterator it = ht_out.cbegin(); it != ht_out.cend(); ++it) + { + const string::size_type first_size = it->first.length(); + fwrite(&first_size, sizeof(first_size), 1, fp); // ignore endianness issues + fwrite(it->first.c_str(), first_size, 1, fp); + + const string::size_type second_size = it->second.length(); + fwrite(&second_size, sizeof(second_size), 1, fp); + fwrite(it->second.c_str(), second_size, 1, fp); + } + fclose(fp); + } + + sparse_hash_map ht_in; + fp = fopen(file.c_str(), "rb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_in.read_metadata(fp)); + for (sparse_hash_map::iterator + it = ht_in.begin(); it != ht_in.end(); ++it) { + string::size_type first_size; + EXPECT_EQ(1u, fread(&first_size, sizeof(first_size), 1, fp)); + char* first = new char[first_size]; + EXPECT_EQ(1u, fread(first, first_size, 1, fp)); + + string::size_type second_size; + EXPECT_EQ(1u, fread(&second_size, sizeof(second_size), 1, fp)); + char* second = new char[second_size]; + EXPECT_EQ(1u, fread(second, second_size, 1, fp)); + + // it points to garbage, so we have to use placement-new to initialize. + // We also have to use const-cast since it->first is const. + new(const_cast(&it->first)) string(first, first_size); + new(&it->second) string(second, second_size); + delete[] first; + delete[] second; + } + fclose(fp); + } + EXPECT_EQ(string(" "), ht_in[" "]); + EXPECT_EQ(string("+++++++++++++++++++++++++++++++++++++++++++"), ht_in["+"]); + EXPECT_EQ(string(""), ht_in["c"]); // should not have been saved + EXPECT_EQ(string(""), ht_in["y"]); +} + +TYPED_TEST(HashtableAllTest, Serialization) +{ + if (!this->ht_.supports_serialization()) return; + TypeParam ht_out; + ht_out.set_deleted_key(this->UniqueKey(2000)); + for (int i = 1; i < 100; i++) { + ht_out.insert(this->UniqueObject(i)); + } + // just to test having some erased keys when we write. + ht_out.erase(this->UniqueKey(56)); + ht_out.erase(this->UniqueKey(22)); + + string file(TmpFile("serialization")); + FILE* fp = fopen(file.c_str(), "wb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_out.serialize(ValueSerializer(), fp)); + fclose(fp); + } + + TypeParam ht_in; + fp = fopen(file.c_str(), "rb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_in.unserialize(ValueSerializer(), fp)); + fclose(fp); + } + + EXPECT_EQ(this->UniqueObject(1), *ht_in.find(this->UniqueKey(1))); + EXPECT_EQ(this->UniqueObject(99), *ht_in.find(this->UniqueKey(99))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(100))); + EXPECT_EQ(this->UniqueObject(21), *ht_in.find(this->UniqueKey(21))); + // should not have been saved + EXPECT_FALSE(ht_in.count(this->UniqueKey(22))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(56))); +} + +TYPED_TEST(HashtableIntTest, NopointerSerialization) +{ + if (!this->ht_.supports_serialization()) return; + TypeParam ht_out; + ht_out.set_deleted_key(this->UniqueKey(2000)); + for (int i = 1; i < 100; i++) { + ht_out.insert(this->UniqueObject(i)); + } + // just to test having some erased keys when we write. + ht_out.erase(this->UniqueKey(56)); + ht_out.erase(this->UniqueKey(22)); + + string file(TmpFile("nopointer_serialization")); + FILE* fp = fopen(file.c_str(), "wb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_out.serialize(typename TypeParam::NopointerSerializer(), fp)); + fclose(fp); + } + + TypeParam ht_in; + fp = fopen(file.c_str(), "rb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + EXPECT_TRUE(ht_in.unserialize(typename TypeParam::NopointerSerializer(), fp)); + fclose(fp); + } + + EXPECT_EQ(this->UniqueObject(1), *ht_in.find(this->UniqueKey(1))); + EXPECT_EQ(this->UniqueObject(99), *ht_in.find(this->UniqueKey(99))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(100))); + EXPECT_EQ(this->UniqueObject(21), *ht_in.find(this->UniqueKey(21))); + // should not have been saved + EXPECT_FALSE(ht_in.count(this->UniqueKey(22))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(56))); +} + +// We don't support serializing to a string by default, but you can do +// it by writing your own custom input/output class. +class StringIO { + public: + explicit StringIO(string* s) : s_(s) {} + size_t Write(const void* buf, size_t len) { + s_->append(reinterpret_cast(buf), len); + return len; + } + size_t Read(void* buf, size_t len) { + if (s_->length() < len) + len = s_->length(); + memcpy(reinterpret_cast(buf), s_->data(), len); + s_->erase(0, len); + return len; + } + private: + StringIO& operator=(const StringIO&); + string* const s_; +}; + +TYPED_TEST(HashtableIntTest, SerializingToString) +{ + if (!this->ht_.supports_serialization()) return; + TypeParam ht_out; + ht_out.set_deleted_key(this->UniqueKey(2000)); + for (int i = 1; i < 100; i++) { + ht_out.insert(this->UniqueObject(i)); + } + // just to test having some erased keys when we write. + ht_out.erase(this->UniqueKey(56)); + ht_out.erase(this->UniqueKey(22)); + + string stringbuf; + StringIO stringio(&stringbuf); + EXPECT_TRUE(ht_out.serialize(typename TypeParam::NopointerSerializer(), + &stringio)); + + TypeParam ht_in; + EXPECT_TRUE(ht_in.unserialize(typename TypeParam::NopointerSerializer(), + &stringio)); + + EXPECT_EQ(this->UniqueObject(1), *ht_in.find(this->UniqueKey(1))); + EXPECT_EQ(this->UniqueObject(99), *ht_in.find(this->UniqueKey(99))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(100))); + EXPECT_EQ(this->UniqueObject(21), *ht_in.find(this->UniqueKey(21))); + // should not have been saved + EXPECT_FALSE(ht_in.count(this->UniqueKey(22))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(56))); +} + +// An easier way to do the above would be to use the existing stream methods. +TYPED_TEST(HashtableIntTest, SerializingToStringStream) +{ + if (!this->ht_.supports_serialization()) return; + TypeParam ht_out; + ht_out.set_deleted_key(this->UniqueKey(2000)); + for (int i = 1; i < 100; i++) { + ht_out.insert(this->UniqueObject(i)); + } + // just to test having some erased keys when we write. + ht_out.erase(this->UniqueKey(56)); + ht_out.erase(this->UniqueKey(22)); + + std::stringstream string_buffer; + EXPECT_TRUE(ht_out.serialize(typename TypeParam::NopointerSerializer(), + &string_buffer)); + + TypeParam ht_in; + EXPECT_TRUE(ht_in.unserialize(typename TypeParam::NopointerSerializer(), + &string_buffer)); + + EXPECT_EQ(this->UniqueObject(1), *ht_in.find(this->UniqueKey(1))); + EXPECT_EQ(this->UniqueObject(99), *ht_in.find(this->UniqueKey(99))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(100))); + EXPECT_EQ(this->UniqueObject(21), *ht_in.find(this->UniqueKey(21))); + // should not have been saved + EXPECT_FALSE(ht_in.count(this->UniqueKey(22))); + EXPECT_FALSE(ht_in.count(this->UniqueKey(56))); +} + +// Verify that the metadata serialization is endianness and word size +// agnostic. +TYPED_TEST(HashtableAllTest, MetadataSerializationAndEndianness) +{ + TypeParam ht_out; + string kExpectedDense("\x13W\x86""B\0\0\0\0\0\0\0 \0\0\0\0\0\0\0\0\0\0\0\0", + 24); + + // GP change - switched size from 20 to formula, because the sparsegroup bitmap is 4 or 8 bytes and not 6 + string kExpectedSparse("$hu1\0\0\0 \0\0\0\0\0\0\0\0\0\0\0", 12 + sizeof(group_bm_type)); + + if (ht_out.supports_readwrite()) { + size_t num_bytes = 0; + string file(TmpFile("metadata_serialization")); + FILE* fp = fopen(file.c_str(), "wb"); + if (fp) + { + EXPECT_TRUE(fp != NULL); + + EXPECT_TRUE(ht_out.write_metadata(fp)); + EXPECT_TRUE(ht_out.write_nopointer_data(fp)); + + num_bytes = (const size_t)ftell(fp); + fclose(fp); + } + + char contents[24] = {0}; + fp = fopen(file.c_str(), "rb"); + if (fp) + { + EXPECT_LE(num_bytes, static_cast(24)); + EXPECT_EQ(num_bytes, fread(contents, 1, num_bytes <= 24 ? num_bytes : 24, fp)); + EXPECT_EQ(EOF, fgetc(fp)); // check we're *exactly* the right size + fclose(fp); + } + // TODO(csilvers): check type of ht_out instead of looking at the 1st byte. + if (contents[0] == kExpectedDense[0]) { + EXPECT_EQ(kExpectedDense, string(contents, num_bytes)); + } else { + EXPECT_EQ(kExpectedSparse, string(contents, num_bytes)); + } + } + + // Do it again with new-style serialization. Here we can use StringIO. + if (ht_out.supports_serialization()) { + string stringbuf; + StringIO stringio(&stringbuf); + EXPECT_TRUE(ht_out.serialize(typename TypeParam::NopointerSerializer(), + &stringio)); + if (stringbuf[0] == kExpectedDense[0]) { + EXPECT_EQ(kExpectedDense, stringbuf); + } else { + EXPECT_EQ(kExpectedSparse, stringbuf); + } + } +} + + +// ------------------------------------------------------------------------ +// The above tests test the general API for correctness. These tests +// test a few corner cases that have tripped us up in the past, and +// more general, cross-API issues like memory management. + +TYPED_TEST(HashtableAllTest, BracketOperatorCrashing) +{ + this->ht_.set_deleted_key(this->UniqueKey(1)); + for (int iters = 0; iters < 10; iters++) { + // We start at 33 because after shrinking, we'll be at 32 buckets. + for (int i = 33; i < 133; i++) { + this->ht_.bracket_assign(this->UniqueKey(i), + this->ht_.get_data(this->UniqueObject(i))); + } + this->ht_.clear_no_resize(); + // This will force a shrink on the next insert, which we want to test. + this->ht_.bracket_assign(this->UniqueKey(2), + this->ht_.get_data(this->UniqueObject(2))); + this->ht_.erase(this->UniqueKey(2)); + } +} + +// For data types with trivial copy-constructors and destructors, we +// should use an optimized routine for data-copying, that involves +// memmove. We test this by keeping count of how many times the +// copy-constructor is called; it should be much less with the +// optimized code. +struct Memmove +{ +public: + Memmove(): i(0) {} + explicit Memmove(int ival): i(ival) {} + Memmove(const Memmove& that) { this->i = that.i; num_copies++; } + int i; + static int num_copies; +}; +int Memmove::num_copies = 0; + +struct NoMemmove +{ +public: + NoMemmove(): i(0) {} + explicit NoMemmove(int ival): i(ival) {} + NoMemmove(const NoMemmove& that) { this->i = that.i; num_copies++; } + int i; + static int num_copies; +}; +int NoMemmove::num_copies = 0; + +} // unnamed namespace + +#if 0 +// This is what tells the hashtable code it can use memmove for this class: +namespace google { + +template<> struct has_trivial_copy : true_type { }; +template<> struct has_trivial_destructor : true_type { }; + +}; +#endif + +namespace +{ + +TEST(HashtableTest, SimpleDataTypeOptimizations) +{ + // Only sparsehashtable optimizes moves in this way. + sparse_hash_map memmove; + sparse_hash_map nomemmove; + sparse_hash_map > > + memmove_nonstandard_alloc; + + Memmove::num_copies = 0; + for (int i = 10000; i > 0; i--) { + memmove[i] = Memmove(i); + } + // GP change - const int memmove_copies = Memmove::num_copies; + + NoMemmove::num_copies = 0; + for (int i = 10000; i > 0; i--) { + nomemmove[i] = NoMemmove(i); + } + // GP change - const int nomemmove_copies = NoMemmove::num_copies; + + Memmove::num_copies = 0; + for (int i = 10000; i > 0; i--) { + memmove_nonstandard_alloc[i] = Memmove(i); + } + // GP change - const int memmove_nonstandard_alloc_copies = Memmove::num_copies; + + // GP change - commented out following two lines + //EXPECT_GT(nomemmove_copies, memmove_copies); + //EXPECT_EQ(nomemmove_copies, memmove_nonstandard_alloc_copies); +} + +TYPED_TEST(HashtableAllTest, ResizeHysteresis) +{ + // We want to make sure that when we create a hashtable, and then + // add and delete one element, the size of the hashtable doesn't + // change. + this->ht_.set_deleted_key(this->UniqueKey(1)); + typename TypeParam::size_type old_bucket_count = this->ht_.bucket_count(); + this->ht_.insert(this->UniqueObject(4)); + this->ht_.erase(this->UniqueKey(4)); + this->ht_.insert(this->UniqueObject(4)); + this->ht_.erase(this->UniqueKey(4)); + EXPECT_EQ(old_bucket_count, this->ht_.bucket_count()); + + // Try it again, but with a hashtable that starts very small + TypeParam ht(2); + EXPECT_LT(ht.bucket_count(), 32u); // verify we really do start small + ht.set_deleted_key(this->UniqueKey(1)); + old_bucket_count = ht.bucket_count(); + ht.insert(this->UniqueObject(4)); + ht.erase(this->UniqueKey(4)); + ht.insert(this->UniqueObject(4)); + ht.erase(this->UniqueKey(4)); + EXPECT_EQ(old_bucket_count, ht.bucket_count()); +} + +TEST(HashtableTest, ConstKey) +{ + // Sometimes people write hash_map, even though the + // const isn't necessary. Make sure we handle this cleanly. + sparse_hash_map shm; + shm.set_deleted_key(1); + shm[10] = 20; +} + +TYPED_TEST(HashtableAllTest, ResizeActuallyResizes) +{ + // This tests for a problem we had where we could repeatedly "resize" + // a hashtable to the same size it was before, on every insert. + // ----------------------------------------------------------------- + const typename TypeParam::size_type kSize = 1<<10; // Pick any power of 2 + const float kResize = 0.8f; // anything between 0.5 and 1 is fine. + const int kThreshold = static_cast(kSize * kResize - 1); + this->ht_.set_resizing_parameters(0, kResize); + this->ht_.set_deleted_key(this->UniqueKey(kThreshold + 100)); + + // Get right up to the resizing threshold. + for (int i = 0; i <= kThreshold; i++) { + this->ht_.insert(this->UniqueObject(i+1)); + } + // The bucket count should equal kSize. + EXPECT_EQ(kSize, this->ht_.bucket_count()); + + // Now start doing erase+insert pairs. This should cause us to + // copy the hashtable at most once. + const int pre_copies = this->ht_.num_table_copies(); + for (int i = 0; i < static_cast(kSize); i++) { + this->ht_.erase(this->UniqueKey(kThreshold)); + this->ht_.insert(this->UniqueObject(kThreshold)); + } + EXPECT_LT(this->ht_.num_table_copies(), pre_copies + 2); + + // Now create a hashtable where we go right to the threshold, then + // delete everything and do one insert. Even though our hashtable + // is now tiny, we should still have at least kSize buckets, because + // our shrink threshhold is 0. + // ----------------------------------------------------------------- + TypeParam ht2; + ht2.set_deleted_key(this->UniqueKey(kThreshold + 100)); + ht2.set_resizing_parameters(0, kResize); + EXPECT_LT(ht2.bucket_count(), kSize); + for (int i = 0; i <= kThreshold; i++) { + ht2.insert(this->UniqueObject(i+1)); + } + EXPECT_EQ(ht2.bucket_count(), kSize); + for (int i = 0; i <= kThreshold; i++) { + ht2.erase(this->UniqueKey(i+1)); + EXPECT_EQ(ht2.bucket_count(), kSize); + } + ht2.insert(this->UniqueObject(kThreshold+2)); + EXPECT_GE(ht2.bucket_count(), kSize); +} + +TEST(HashtableTest, CXX11) +{ +#if !defined(SPP_NO_CXX11_HDR_INITIALIZER_LIST) + { + // Initializer lists + // ----------------- + typedef sparse_hash_map Smap; + + Smap smap({ {1, 1}, {2, 2} }); + EXPECT_EQ(smap.size(), 2); + + smap = { {1, 1}, {2, 2}, {3, 4} }; + EXPECT_EQ(smap.size(), 3); + + smap.insert({{5, 1}, {6, 1}}); + EXPECT_EQ(smap.size(), 5); + EXPECT_EQ(smap[6], 1); + EXPECT_EQ(smap.at(6), 1); + try + { + EXPECT_EQ(smap.at(999), 1); + } + catch (...) + {}; + + sparse_hash_set sset({ 1, 3, 4, 5 }); + EXPECT_EQ(sset.size(), 4); + } +#endif +} + + + +TEST(HashtableTest, NestedHashtables) +{ + // People can do better than to have a hash_map of hash_maps, but we + // should still support it. I try a few different mappings. + sparse_hash_map, Hasher, Hasher> ht1; + + ht1["hi"]; // create a sub-ht with the default values + ht1["lo"][1] = "there"; + sparse_hash_map, Hasher, Hasher> + ht1copy = ht1; +} + +TEST(HashtableDeathTest, ResizeOverflow) +{ + sparse_hash_map ht2; + EXPECT_DEATH(ht2.resize(static_cast(-1)), "overflows size_type"); +} + +TEST(HashtableDeathTest, InsertSizeTypeOverflow) +{ + static const int kMax = 256; + vector test_data(kMax); + for (int i = 0; i < kMax; ++i) { + test_data[(size_t)i] = i+1000; + } + + sparse_hash_set > shs; + + // Test we are using the correct allocator + EXPECT_TRUE(shs.get_allocator().is_custom_alloc()); + + // Test size_type overflow in insert(it, it) + EXPECT_DEATH(shs.insert(test_data.begin(), test_data.end()), "overflows size_type"); +} + +TEST(HashtableDeathTest, InsertMaxSizeOverflow) +{ + static const int kMax = 256; + vector test_data(kMax); + for (int i = 0; i < kMax; ++i) { + test_data[(size_t)i] = i+1000; + } + + sparse_hash_set > shs; + + // Test max_size overflow + EXPECT_DEATH(shs.insert(test_data.begin(), test_data.begin() + 11), "exceed max_size"); +} + +TEST(HashtableDeathTest, ResizeSizeTypeOverflow) +{ + // Test min-buckets overflow, when we want to resize too close to size_type + sparse_hash_set > shs; + + EXPECT_DEATH(shs.resize(250), "overflows size_type"); +} + +TEST(HashtableDeathTest, ResizeDeltaOverflow) +{ + static const int kMax = 256; + vector test_data(kMax); + for (int i = 0; i < kMax; ++i) { + test_data[(size_t)i] = i+1000; + } + + sparse_hash_set > shs; + + for (int i = 0; i < 9; i++) { + shs.insert(i); + } + EXPECT_DEATH(shs.insert(test_data.begin(), test_data.begin() + 250), + "overflows size_type"); +} + +// ------------------------------------------------------------------------ +// This informational "test" comes last so it's easy to see. +// Also, benchmarks. + +TYPED_TEST(HashtableAllTest, ClassSizes) +{ + std::cout << "sizeof(" << typeid(TypeParam).name() << "): " + << sizeof(this->ht_) << "\n"; +} + +} // unnamed namespace + +int main(int, char **) +{ + // All the work is done in the static constructors. If they don't + // die, the tests have all passed. + cout << "PASS\n"; + return 0; +} diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln b/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln new file mode 100755 index 000000000..06da8666e --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp.sln @@ -0,0 +1,38 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio 14 +VisualStudioVersion = 14.0.25420.1 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "spp_test", "spp_test.vcxproj", "{9863A521-E9DB-4775-A276-CADEF726CF11}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "spp_alloc_test", "spp_alloc_test.vcxproj", "{19BC4240-15ED-4C76-BC57-34BB70FE163B}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x64.ActiveCfg = Debug|x64 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x64.Build.0 = Debug|x64 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x86.ActiveCfg = Debug|Win32 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Debug|x86.Build.0 = Debug|Win32 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x64.ActiveCfg = Release|x64 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x64.Build.0 = Release|x64 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x86.ActiveCfg = Release|Win32 + {9863A521-E9DB-4775-A276-CADEF726CF11}.Release|x86.Build.0 = Release|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.ActiveCfg = Debug|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x64.Build.0 = Debug|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.ActiveCfg = Debug|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Debug|x86.Build.0 = Debug|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.ActiveCfg = Release|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x64.Build.0 = Release|x64 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.ActiveCfg = Release|Win32 + {19BC4240-15ED-4C76-BC57-34BB70FE163B}.Release|x86.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj new file mode 100755 index 000000000..609710ffe --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj @@ -0,0 +1,176 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + + + + + + + + + {19BC4240-15ED-4C76-BC57-34BB70FE163B} + Win32Proj + 8.1 + + + + Application + v140 + MultiByte + + + Application + MultiByte + v140 + + + Application + v140 + MultiByte + + + Application + v140 + MultiByte + + + + + + + + + + + + + + + + + + + <_ProjectFileVersion>14.0.23107.0 + + + None + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + true + + + true + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + false + + + false + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebug + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + $(OutDir)spp_alloc_test.pdb + Console + MachineX86 + + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + EnableFastChecks + MultiThreadedDebug + + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + $(OutDir)spp_alloc_test.pdb + Console + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + Console + true + true + MachineX86 + true + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_alloc_test.exe + true + Console + true + true + true + + + + + + \ No newline at end of file diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters new file mode 100755 index 000000000..8c773fa94 --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_alloc_test.vcxproj.filters @@ -0,0 +1,28 @@ + + + + + {c644622a-f598-4fcf-861c-199b4b988881} + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + \ No newline at end of file diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj new file mode 100755 index 000000000..c510a10cf --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj @@ -0,0 +1,175 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + + + + + + + + + + + + {9863A521-E9DB-4775-A276-CADEF726CF11} + Win32Proj + 8.1 + + + + Application + v140 + MultiByte + + + Application + MultiByte + v140 + + + Application + v140 + MultiByte + + + Application + v140 + MultiByte + + + + + + + + + + + + + + + + + + + <_ProjectFileVersion>14.0.23107.0 + + + None + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + true + + + true + AllRules.ruleset + + + $(SolutionDir)$(Configuration)\ + $(Configuration)\ + false + + + false + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + true + EnableFastChecks + MultiThreadedDebug + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_test.exe + true + $(OutDir)spp_test.pdb + Console + MachineX86 + + + + + Disabled + WIN32;_DEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + EnableFastChecks + MultiThreadedDebug + + + EnableAllWarnings + ProgramDatabase + ../.. + + + $(OutDir)spp_test.exe + true + $(OutDir)spp_test.pdb + Console + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_test.exe + true + Console + true + true + MachineX86 + + + + + WIN32;NDEBUG;_CONSOLE;_SCL_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreaded + + + Level3 + ProgramDatabase + ../.. + + + $(OutDir)spp_test.exe + true + Console + true + true + + + + + + \ No newline at end of file diff --git a/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters new file mode 100755 index 000000000..70934ad0c --- /dev/null +++ b/resources/3rdparty/sparsepp/tests/vsprojects/spp_test.vcxproj.filters @@ -0,0 +1,32 @@ + + + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hpp;hxx;hm;inl;inc;xsd + + + + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + \ No newline at end of file