diff options
author | Shahriar Rouf <nafi@google.com> | 2024-02-07 13:58:56 -0800 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2024-02-07 13:59:46 -0800 |
commit | 3e59efa2ad1d1777257bd3b1845d5acc4a931687 (patch) | |
tree | 641c393b5a52f29e8cdf712804e0102c2395ae25 /absl/hash/internal/low_level_hash.cc | |
parent | f4c713f55e0e7d12ae03204c027364dd87719e26 (diff) | |
download | abseil-3e59efa2ad1d1777257bd3b1845d5acc4a931687.tar.gz abseil-3e59efa2ad1d1777257bd3b1845d5acc4a931687.tar.bz2 abseil-3e59efa2ad1d1777257bd3b1845d5acc4a931687.zip |
Optimize `absl::Hash` by making `LowLevelHash` faster.
Throughput of the 64 byte chunk loop inside `LowLevelHash` (or now in `LowLevelHashLenGt16`) gets limited by the loop carried dependency on `current_state`. By using 4 states instead of 2, we can reduce this duration by 1 cycle. On Skylake, it is reduced from 9 cycles to 8 cycles (12.5% faster asymptotically).
To see the reduction in a simplified version of `LowLevelHash` implementation on Skylake:
* Before: https://godbolt.org/z/Tcj9vsGax, llvm-mca (https://godbolt.org/z/3o78Msr63) shows 9 cycles / iteration.
* After: https://godbolt.org/z/q4GM4EjPr, llvm-mca (https://godbolt.org/z/W5d1KEMzq) shows 8 cycles / iteration.
* This CL is removing 1 xor (1 cycle) per iteration from the critical path.
A block for 32 byte chunk is also added.
Finally, just before returning, `Mix` is called 1 time instead of twice.
PiperOrigin-RevId: 605090653
Change-Id: Ib7517ebb8bef7484066cd14cf41a943953e93377
Diffstat (limited to 'absl/hash/internal/low_level_hash.cc')
-rw-r--r-- | absl/hash/internal/low_level_hash.cc | 84 |
1 files changed, 59 insertions, 25 deletions
diff --git a/absl/hash/internal/low_level_hash.cc b/absl/hash/internal/low_level_hash.cc index b5db0b89..43de6729 100644 --- a/absl/hash/internal/low_level_hash.cc +++ b/absl/hash/internal/low_level_hash.cc @@ -14,6 +14,9 @@ #include "absl/hash/internal/low_level_hash.h" +#include <cstddef> +#include <cstdint> + #include "absl/base/internal/unaligned_access.h" #include "absl/base/prefetch.h" #include "absl/numeric/int128.h" @@ -28,8 +31,8 @@ static uint64_t Mix(uint64_t v0, uint64_t v1) { return absl::Uint128Low64(p) ^ absl::Uint128High64(p); } -uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, - const uint64_t salt[5]) { +uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed, + const uint64_t salt[5]) { // Prefetch the cacheline that data resides in. PrefetchToLocalCache(data); const uint8_t* ptr = static_cast<const uint8_t*>(data); @@ -40,7 +43,9 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, // If we have more than 64 bytes, we're going to handle chunks of 64 // bytes at a time. We're going to build up two separate hash states // which we will then hash together. - uint64_t duplicated_state = current_state; + uint64_t duplicated_state0 = current_state; + uint64_t duplicated_state1 = current_state; + uint64_t duplicated_state2 = current_state; do { // Always prefetch the next cacheline. @@ -55,24 +60,39 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48); uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56); - uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state); - uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state); - current_state = (cs0 ^ cs1); + current_state = Mix(a ^ salt[1], b ^ current_state); + duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0); - uint64_t ds0 = Mix(e ^ salt[3], f ^ duplicated_state); - uint64_t ds1 = Mix(g ^ salt[4], h ^ duplicated_state); - duplicated_state = (ds0 ^ ds1); + duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1); + duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2); ptr += 64; len -= 64; } while (len > 64); - current_state = current_state ^ duplicated_state; + current_state = (current_state ^ duplicated_state0) ^ + (duplicated_state1 + duplicated_state2); } // We now have a data `ptr` with at most 64 bytes and the current state // of the hashing state machine stored in current_state. - while (len > 16) { + if (len > 32) { + uint64_t a = absl::base_internal::UnalignedLoad64(ptr); + uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); + uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16); + uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24); + + uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state); + uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state); + current_state = cs0 ^ cs1; + + ptr += 32; + len -= 32; + } + + // We now have a data `ptr` with at most 32 bytes and the current state + // of the hashing state machine stored in current_state. + if (len > 16) { uint64_t a = absl::base_internal::UnalignedLoad64(ptr); uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8); @@ -82,13 +102,33 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, len -= 16; } - // We now have a data `ptr` with at most 16 bytes. + // We now have a data `ptr` with at least 1 and at most 16 bytes. But we can + // safely read from `ptr + len - 16`. + uint64_t a = absl::base_internal::UnalignedLoad64(ptr + len - 16); + uint64_t b = absl::base_internal::UnalignedLoad64(ptr + len - 8); + + return Mix(a ^ salt[1] ^ starting_length, b ^ current_state); +} + +uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, + const uint64_t salt[5]) { + if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt); + + // Prefetch the cacheline that data resides in. + PrefetchToLocalCache(data); + const uint8_t* ptr = static_cast<const uint8_t*>(data); + uint64_t starting_length = static_cast<uint64_t>(len); + uint64_t current_state = seed ^ salt[0]; + if (len == 0) return current_state; + uint64_t a = 0; uint64_t b = 0; + + // We now have a data `ptr` with at least 1 and at most 16 bytes. if (len > 8) { // When we have at least 9 and at most 16 bytes, set A to the first 64 - // bits of the input and B to the last 64 bits of the input. Yes, they will - // overlap in the middle if we are working with less than the full 16 + // bits of the input and B to the last 64 bits of the input. Yes, they + // will overlap in the middle if we are working with less than the full 16 // bytes. a = absl::base_internal::UnalignedLoad64(ptr); b = absl::base_internal::UnalignedLoad64(ptr + len - 8); @@ -97,20 +137,14 @@ uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed, // bits and B to the last 32 bits. a = absl::base_internal::UnalignedLoad32(ptr); b = absl::base_internal::UnalignedLoad32(ptr + len - 4); - } else if (len > 0) { - // If we have at least 1 and at most 3 bytes, read all of the provided - // bits into A, with some adjustments. - a = static_cast<uint64_t>((ptr[0] << 16) | (ptr[len >> 1] << 8) | - ptr[len - 1]); - b = 0; } else { - a = 0; - b = 0; + // If we have at least 1 and at most 3 bytes, read 2 bytes into A and the + // other byte into B, with some adjustments. + a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]); + b = static_cast<uint64_t>(ptr[len >> 1]); } - uint64_t w = Mix(a ^ salt[1], b ^ current_state); - uint64_t z = salt[1] ^ starting_length; - return Mix(w, z); + return Mix(a ^ salt[1] ^ starting_length, b ^ current_state); } } // namespace hash_internal |