aboutsummaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set.h
diff options
context:
space:
mode:
authorVitaly Goldshteyn <goldvitaly@google.com>2024-06-27 02:09:33 -0700
committerCopybara-Service <copybara-worker@google.com>2024-06-27 02:10:24 -0700
commit0ccc51f9ddbb407d579f8158d5421fbf3eea0524 (patch)
treeac234b48655a0d53ce43c827f23dd17865a58b28 /absl/container/internal/raw_hash_set.h
parent16452e1418c1c2a8bcf4a99238e190ba901a20a6 (diff)
downloadabseil-0ccc51f9ddbb407d579f8158d5421fbf3eea0524.tar.gz
abseil-0ccc51f9ddbb407d579f8158d5421fbf3eea0524.tar.bz2
abseil-0ccc51f9ddbb407d579f8158d5421fbf3eea0524.zip
Add assertions to detect reentrance in `IterateOverFullSlots` and `absl::erase_if`.
Since we have potential plans to use this function more widely including `absl::c_for_each`, we need to have good error detection. PiperOrigin-RevId: 647236725 Change-Id: I5035bfb8cef24f80f1bbed83a42380e57d84e428
Diffstat (limited to 'absl/container/internal/raw_hash_set.h')
-rw-r--r--absl/container/internal/raw_hash_set.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 724df193..02e389b9 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -1860,8 +1860,8 @@ inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
}
// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
-// If kAllowRemoveReentrance is false, no erasure from this table allowed during
-// Callback call. This mode is slightly faster.
+// No insertion to the table allowed during Callback call.
+// Erasure is allowed only for the element passed to the callback.
template <class SlotType, class Callback>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
const CommonFields& c, SlotType* slot, Callback cb) {
@@ -1890,16 +1890,22 @@ ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
return;
}
size_t remaining = c.size();
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = remaining;
while (remaining != 0) {
for (uint32_t i : GroupFullEmptyOrDeleted(ctrl).MaskFull()) {
+ assert(IsFull(ctrl[i]) && "hash table was modified unexpectedly");
cb(ctrl + i, slot + i);
--remaining;
}
ctrl += Group::kWidth;
slot += Group::kWidth;
assert((remaining == 0 || *(ctrl - 1) != ctrl_t::kSentinel) &&
- "element was erased from hash table unexpectedly");
+ "hash table was modified unexpectedly");
}
+ // NOTE: erasure of the current element is allowed in callback for
+ // absl::erase_if specialization. So we use `>=`.
+ assert(original_size_for_assert >= c.size() &&
+ "hash table was modified unexpectedly");
}
template <typename CharAlloc>
@@ -4049,12 +4055,14 @@ struct HashtableFreeFunctionsAccess {
if (c->is_soo()) {
auto it = c->soo_iterator();
if (!pred(*it)) {
+ assert(c->size() == 1 && "hash table was modified unexpectedly");
return 0;
}
c->destroy(it.slot());
c->common().set_empty_soo();
return 1;
}
+ ABSL_ATTRIBUTE_UNUSED const size_t original_size_for_assert = c->size();
size_t num_deleted = 0;
IterateOverFullSlots(
c->common(), c->slot_array(), [&](const ctrl_t* ctrl, auto* slot) {
@@ -4065,6 +4073,10 @@ struct HashtableFreeFunctionsAccess {
++num_deleted;
}
});
+ // NOTE: IterateOverFullSlots allow removal of the current element, so we
+ // verify the size additionally here.
+ assert(original_size_for_assert - num_deleted == c->size() &&
+ "hash table was modified unexpectedly");
return num_deleted;
}