diff options
Diffstat (limited to 'absl/container/internal/raw_hash_set.h')
-rw-r--r-- | absl/container/internal/raw_hash_set.h | 33 |
1 files changed, 9 insertions, 24 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 65dc66c2..09b55f66 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -749,15 +749,6 @@ using Group = GroupAArch64Impl; using Group = GroupPortableImpl; #endif -// When there is an insertion with no reserved growth, we rehash with -// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a -// constant divided by capacity ensures that inserting N elements is still O(N) -// in the average case. Using the constant 16 means that we expect to rehash ~8 -// times more often than when generations are disabled. We are adding expected -// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 - -// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth. -inline size_t RehashProbabilityConstant() { return 16; } - class CommonFieldsGenerationInfoEnabled { // A sentinel value for reserved_growth_ indicating that we just ran out of // reserved growth on the last insertion. When reserve is called and then @@ -778,10 +769,12 @@ class CommonFieldsGenerationInfoEnabled { // Whether we should rehash on insert in order to detect bugs of using invalid // references. We rehash on the first insertion after reserved_growth_ reaches - // 0 after a call to reserve. We also do a rehash with low probability + // 0 after a call to reserve. + // TODO(b/254649633): we could potentially do a rehash with low probability // whenever reserved_growth_ is zero. - bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl, - size_t capacity) const; + bool should_rehash_for_bug_detection_on_insert() const { + return reserved_growth_ == kReservedGrowthJustRanOut; + } void maybe_increment_generation_on_insert() { if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0; @@ -827,9 +820,7 @@ class CommonFieldsGenerationInfoDisabled { CommonFieldsGenerationInfoDisabled& operator=( CommonFieldsGenerationInfoDisabled&&) = default; - bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const { - return false; - } + bool should_rehash_for_bug_detection_on_insert() const { return false; } void maybe_increment_generation_on_insert() {} void reset_reserved_growth(size_t, size_t) {} size_t reserved_growth() const { return 0; } @@ -914,10 +905,6 @@ class CommonFields : public CommonFieldsGenerationInfo { return compressed_tuple_.template get<1>(); } - bool should_rehash_for_bug_detection_on_insert() const { - return CommonFieldsGenerationInfo:: - should_rehash_for_bug_detection_on_insert(control_, capacity_); - } void reset_reserved_growth(size_t reservation) { CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size_); } @@ -1119,12 +1106,10 @@ struct FindInfo { inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } // Begins a probing operation on `common.control`, using `hash`. -inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity, - size_t hash) { - return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity); -} inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) { - return probe(common.control_, common.capacity_, hash); + const ctrl_t* ctrl = common.control_; + const size_t capacity = common.capacity_; + return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity); } // Probes an array of control bits using a probe sequence derived from `hash`, |