diff options
author | Evan Brown <ezb@google.com> | 2024-03-19 12:07:34 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2024-03-19 12:08:27 -0700 |
commit | 1980d7b98a0cae64432ccf9c4afebda5e21b2c5e (patch) | |
tree | 89fa98715f94d4f0221080158e1c798dc5cf52c0 /absl/container/internal/hashtablez_sampler.cc | |
parent | 43c36ffae6256b4c93c1b9d3c73efaf8c2c891ab (diff) | |
download | abseil-1980d7b98a0cae64432ccf9c4afebda5e21b2c5e.tar.gz abseil-1980d7b98a0cae64432ccf9c4afebda5e21b2c5e.tar.bz2 abseil-1980d7b98a0cae64432ccf9c4afebda5e21b2c5e.zip |
Do hashtablez sampling on the first insertion into an empty SOO hashtable.
When sampling triggers, we skip SOO and allocate a backing array. We must do this because the HashtablezInfoHandle is part of the heap allocation (along with the control bytes and slots). By default, we sample 1 in ~1024 hashtables when sampling is enabled. This will impact performance because (a) we won't benefit from SOO so we would have worse data locality (more cache/TLB misses), and (b) the backing array capacity will be 3 instead of 1 so (b.1) we skip the rehash after the second insertion and (b.2) we potentially waste up to two slots worth of memory.
We also add an soo_capacity field to HashtablezInfo to allow for distinguishing which sampled tables may otherwise have been SOO - this will allow us to know approximately what fraction of tables are in SOO mode.
PiperOrigin-RevId: 617252334
Change-Id: Ib48b7a4870bd74ea3ba923ed8f350a3b75dbb7d3
Diffstat (limited to 'absl/container/internal/hashtablez_sampler.cc')
-rw-r--r-- | absl/container/internal/hashtablez_sampler.cc | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc index b21beef0..e17ba14d 100644 --- a/absl/container/internal/hashtablez_sampler.cc +++ b/absl/container/internal/hashtablez_sampler.cc @@ -18,13 +18,18 @@ #include <atomic> #include <cassert> #include <cmath> +#include <cstddef> +#include <cstdint> #include <functional> #include <limits> #include "absl/base/attributes.h" #include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" #include "absl/base/internal/raw_logging.h" +#include "absl/base/macros.h" #include "absl/base/no_destructor.h" +#include "absl/base/optimization.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" #include "absl/profiling/internal/exponential_biased.h" @@ -73,7 +78,8 @@ HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; void HashtablezInfo::PrepareForSampling(int64_t stride, - size_t inline_element_size_value) { + size_t inline_element_size_value, + uint16_t soo_capacity_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); @@ -93,6 +99,7 @@ void HashtablezInfo::PrepareForSampling(int64_t stride, depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); inline_element_size = inline_element_size_value; + soo_capacity = soo_capacity_value; } static bool ShouldForceSampling() { @@ -116,12 +123,12 @@ static bool ShouldForceSampling() { } HashtablezInfo* SampleSlow(SamplingState& next_sample, - size_t inline_element_size) { + size_t inline_element_size, uint16_t soo_capacity) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { next_sample.next_sample = 1; const int64_t old_stride = exchange(next_sample.sample_stride, 1); - HashtablezInfo* result = - GlobalHashtablezSampler().Register(old_stride, inline_element_size); + HashtablezInfo* result = GlobalHashtablezSampler().Register( + old_stride, inline_element_size, soo_capacity); return result; } @@ -151,10 +158,11 @@ HashtablezInfo* SampleSlow(SamplingState& next_sample, // that case. if (first) { if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; - return SampleSlow(next_sample, inline_element_size); + return SampleSlow(next_sample, inline_element_size, soo_capacity); } - return GlobalHashtablezSampler().Register(old_stride, inline_element_size); + return GlobalHashtablezSampler().Register(old_stride, inline_element_size, + soo_capacity); #endif } |