aboutsummaryrefslogtreecommitdiff
path: root/absl/hash/hash_benchmark.cc
diff options
context:
space:
mode:
authorAbseil Team <absl-team@google.com>2022-01-19 11:04:50 -0800
committerdinord <dino.radakovich@gmail.com>2022-01-19 14:36:25 -0500
commitfbbb5865a562c9a9167d71c1cf56b82025a8f065 (patch)
tree212c8694bb6cf1d76af2b4aabac02c4910f6beab /absl/hash/hash_benchmark.cc
parent9bb42857112ad13b23de4333fbb75eb47db9de95 (diff)
downloadabseil-fbbb5865a562c9a9167d71c1cf56b82025a8f065.tar.gz
abseil-fbbb5865a562c9a9167d71c1cf56b82025a8f065.tar.bz2
abseil-fbbb5865a562c9a9167d71c1cf56b82025a8f065.zip
Export of internal Abseil changes
-- 487c7a754a3b93bc0f9de14bdced48007a96ae55 by Greg Falcon <gfalcon@google.com>: Add support for absl::Hash to hash unordered containers. These can now be hashed directly, as well as combined in AbslHashValue implementations. This also adds a new method, `H::combine_unordered()`, to the public AbslHashValue hash state API. This allows users to implement hash specializations for their own unordered collection types. A traits class, `H::is_hashable<T>`, is also added to the hash state API. H::is_hashable<T>::value reflects whether type T is considered hashable by the AbslHashValue framework. This allows users to properly SFINAE templated versions of AbslHashValue. (The AbslHashValue implementation added to raw_hash_set shows an example of its use.) PiperOrigin-RevId: 422856706 GitOrigin-RevId: 487c7a754a3b93bc0f9de14bdced48007a96ae55 Change-Id: Id31fd4ccba282f8c9ae6fcee6ae0ad0f7879f456
Diffstat (limited to 'absl/hash/hash_benchmark.cc')
-rw-r--r--absl/hash/hash_benchmark.cc77
1 files changed, 73 insertions, 4 deletions
diff --git a/absl/hash/hash_benchmark.cc b/absl/hash/hash_benchmark.cc
index d498ac29..8712a01c 100644
--- a/absl/hash/hash_benchmark.cc
+++ b/absl/hash/hash_benchmark.cc
@@ -19,6 +19,7 @@
#include <vector>
#include "absl/base/attributes.h"
+#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/strings/cord.h"
@@ -107,6 +108,44 @@ absl::Cord FragmentedCord(size_t size) {
return result;
}
+template <typename T>
+std::vector<T> Vector(size_t count) {
+ std::vector<T> result;
+ for (size_t v = 0; v < count; ++v) {
+ result.push_back(v);
+ }
+ return result;
+}
+
+// Bogus type that replicates an unorderd_set's bit mixing, but with
+// vector-speed iteration. This is intended to measure the overhead of unordered
+// hashing without counting the speed of unordered_set iteration.
+template <typename T>
+struct FastUnorderedSet {
+ explicit FastUnorderedSet(size_t count) {
+ for (size_t v = 0; v < count; ++v) {
+ values.push_back(v);
+ }
+ }
+ std::vector<T> values;
+
+ template <typename H>
+ friend H AbslHashValue(H h, const FastUnorderedSet& fus) {
+ return H::combine(H::combine_unordered(std::move(h), fus.values.begin(),
+ fus.values.end()),
+ fus.values.size());
+ }
+};
+
+template <typename T>
+absl::flat_hash_set<T> FlatHashSet(size_t count) {
+ absl::flat_hash_set<T> result;
+ for (size_t v = 0; v < count; ++v) {
+ result.insert(v);
+ }
+ return result;
+}
+
// Generates a benchmark and a codegen method for the provided types. The
// codegen method provides a well known entrypoint for dumping assembly.
#define MAKE_BENCHMARK(hash, name, ...) \
@@ -145,10 +184,22 @@ MAKE_BENCHMARK(AbslHash, Cord_Flat_200, FlatCord(200));
MAKE_BENCHMARK(AbslHash, Cord_Flat_5000, FlatCord(5000));
MAKE_BENCHMARK(AbslHash, Cord_Fragmented_200, FragmentedCord(200));
MAKE_BENCHMARK(AbslHash, Cord_Fragmented_5000, FragmentedCord(5000));
-MAKE_BENCHMARK(AbslHash, VectorInt64_10, std::vector<int64_t>(10));
-MAKE_BENCHMARK(AbslHash, VectorInt64_100, std::vector<int64_t>(100));
-MAKE_BENCHMARK(AbslHash, VectorDouble_10, std::vector<double>(10, 1.1));
-MAKE_BENCHMARK(AbslHash, VectorDouble_100, std::vector<double>(100, 1.1));
+MAKE_BENCHMARK(AbslHash, VectorInt64_10, Vector<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, VectorInt64_100, Vector<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, VectorInt64_1000, Vector<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, VectorDouble_10, Vector<double>(10));
+MAKE_BENCHMARK(AbslHash, VectorDouble_100, Vector<double>(100));
+MAKE_BENCHMARK(AbslHash, VectorDouble_1000, Vector<double>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_10, FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_100, FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetInt64_1000, FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_10, FlatHashSet<double>(10));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_100, FlatHashSet<double>(100));
+MAKE_BENCHMARK(AbslHash, FlatHashSetDouble_1000, FlatHashSet<double>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetInt64_1000,
+ FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(AbslHash, FastUnorderedSetDouble_1000,
+ FastUnorderedSet<double>(1000));
MAKE_BENCHMARK(AbslHash, PairStringString_0,
std::make_pair(std::string(), std::string()));
MAKE_BENCHMARK(AbslHash, PairStringString_10,
@@ -180,6 +231,24 @@ MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_10,
std::vector<double>(10, 1.1));
MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_100,
std::vector<double>(100, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, VectorDouble_1000,
+ std::vector<double>(1000, 1.1));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_10,
+ FlatHashSet<int64_t>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_100,
+ FlatHashSet<int64_t>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetInt64_1000,
+ FlatHashSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_10,
+ FlatHashSet<double>(10));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_100,
+ FlatHashSet<double>(100));
+MAKE_BENCHMARK(TypeErasedAbslHash, FlatHashSetDouble_1000,
+ FlatHashSet<double>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetInt64_1000,
+ FastUnorderedSet<int64_t>(1000));
+MAKE_BENCHMARK(TypeErasedAbslHash, FastUnorderedSetDouble_1000,
+ FastUnorderedSet<double>(1000));
// The latency benchmark attempts to model the speed of the hash function in
// production. When a hash function is used for hashtable lookups it is rarely