aboutsummaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/btree_map.h41
-rw-r--r--absl/container/btree_set.h37
-rw-r--r--absl/container/btree_test.cc178
-rw-r--r--absl/container/flat_hash_map_test.cc8
-rw-r--r--absl/container/inlined_vector.h25
-rw-r--r--absl/container/inlined_vector_test.cc294
-rw-r--r--absl/container/internal/btree.h151
-rw-r--r--absl/container/internal/btree_container.h14
-rw-r--r--absl/container/internal/common_policy_traits.h45
-rw-r--r--absl/container/internal/hashtablez_sampler.cc46
-rw-r--r--absl/container/internal/hashtablez_sampler.h54
-rw-r--r--absl/container/internal/inlined_vector.h123
-rw-r--r--absl/container/internal/raw_hash_set.cc152
-rw-r--r--absl/container/internal/raw_hash_set.h741
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc18
-rw-r--r--absl/container/internal/raw_hash_set_test.cc100
-rw-r--r--absl/container/node_hash_map_test.cc8
17 files changed, 1422 insertions, 613 deletions
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index 479db8b7..cd3ee2b4 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -42,10 +42,10 @@
// Importantly, insertions and deletions may invalidate outstanding iterators,
// pointers, and references to elements. Such invalidations are typically only
// an issue if insertion and deletion operations are interleaved with the use of
-// more than one iterator, pointer, or reference simultaneously. For this
-// reason, `insert()` and `erase()` return a valid iterator at the current
-// position. Another important difference is that key-types must be
-// copy-constructible.
+// more than one iterator, pointer, or reference simultaneously. For this
+// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
+// iterator at the current position. Another important difference is that
+// key-types must be copy-constructible.
//
// Another API difference is that btree iterators can be subtracted, and this
// is faster than using std::distance.
@@ -325,7 +325,8 @@ class btree_map
// btree_map::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
- // as a C++17-compatible node handle. Overloads are listed below.
+ // as a C++17-compatible node handle. Any references, pointers, or iterators
+ // are invalidated. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
@@ -350,6 +351,21 @@ class btree_map
// It does NOT refer to the data layout of the underlying btree.
using Base::extract;
+ // btree_map::extract_and_get_next()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle along with an iterator to the next
+ // element.
+ //
+ // extract_and_get_next_return_type extract_and_get_next(
+ // const_iterator position):
+ //
+ // Extracts the element at the indicated position, returns a struct
+ // containing a member named `node`: a node handle owning that extracted
+ // data and a member named `next`: an iterator pointing to the next element
+ // in the btree.
+ using Base::extract_and_get_next;
+
// btree_map::merge()
//
// Extracts elements from a given `source` btree_map into this
@@ -701,6 +717,21 @@ class btree_multimap
// It does NOT refer to the data layout of the underlying btree.
using Base::extract;
+ // btree_multimap::extract_and_get_next()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle along with an iterator to the next
+ // element.
+ //
+ // extract_and_get_next_return_type extract_and_get_next(
+ // const_iterator position):
+ //
+ // Extracts the element at the indicated position, returns a struct
+ // containing a member named `node`: a node handle owning that extracted
+ // data and a member named `next`: an iterator pointing to the next element
+ // in the btree.
+ using Base::extract_and_get_next;
+
// btree_multimap::merge()
//
// Extracts all elements from a given `source` btree_multimap into this
diff --git a/absl/container/btree_set.h b/absl/container/btree_set.h
index bbff65d1..51dc42b7 100644
--- a/absl/container/btree_set.h
+++ b/absl/container/btree_set.h
@@ -43,8 +43,8 @@
// pointers, and references to elements. Such invalidations are typically only
// an issue if insertion and deletion operations are interleaved with the use of
// more than one iterator, pointer, or reference simultaneously. For this
-// reason, `insert()` and `erase()` return a valid iterator at the current
-// position.
+// reason, `insert()`, `erase()`, and `extract_and_get_next()` return a valid
+// iterator at the current position.
//
// Another API difference is that btree iterators can be subtracted, and this
// is faster than using std::distance.
@@ -272,7 +272,8 @@ class btree_set
// btree_set::extract()
//
// Extracts the indicated element, erasing it in the process, and returns it
- // as a C++17-compatible node handle. Overloads are listed below.
+ // as a C++17-compatible node handle. Any references, pointers, or iterators
+ // are invalidated. Overloads are listed below.
//
// node_type extract(const_iterator position):
//
@@ -292,6 +293,21 @@ class btree_set
// It does NOT refer to the data layout of the underlying btree.
using Base::extract;
+ // btree_set::extract_and_get_next()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle along with an iterator to the next
+ // element.
+ //
+ // extract_and_get_next_return_type extract_and_get_next(
+ // const_iterator position):
+ //
+ // Extracts the element at the indicated position, returns a struct
+ // containing a member named `node`: a node handle owning that extracted
+ // data and a member named `next`: an iterator pointing to the next element
+ // in the btree.
+ using Base::extract_and_get_next;
+
// btree_set::merge()
//
// Extracts elements from a given `source` btree_set into this
@@ -614,6 +630,21 @@ class btree_multiset
// It does NOT refer to the data layout of the underlying btree.
using Base::extract;
+ // btree_multiset::extract_and_get_next()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle along with an iterator to the next
+ // element.
+ //
+ // extract_and_get_next_return_type extract_and_get_next(
+ // const_iterator position):
+ //
+ // Extracts the element at the indicated position, returns a struct
+ // containing a member named `node`: a node handle owning that extracted
+ // data and a member named `next`: an iterator pointing to the next element
+ // in the btree.
+ using Base::extract_and_get_next;
+
// btree_multiset::merge()
//
// Extracts all elements from a given `source` btree_multiset into this
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index 6a5351fe..cc763b29 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -74,6 +74,16 @@ void CheckPairEquals(const std::pair<T, U> &x, const std::pair<V, W> &y) {
CheckPairEquals(x.first, y.first);
CheckPairEquals(x.second, y.second);
}
+
+bool IsAssertEnabled() {
+ // Use an assert with side-effects to figure out if they are actually enabled.
+ bool assert_enabled = false;
+ assert([&]() { // NOLINT
+ assert_enabled = true;
+ return true;
+ }());
+ return assert_enabled;
+}
} // namespace
// The base class for a sorted associative container checker. TreeType is the
@@ -1651,10 +1661,9 @@ TEST(Btree, BtreeMultisetEmplace) {
auto iter = s.emplace(value_to_insert);
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
- auto iter2 = s.emplace(value_to_insert);
- EXPECT_NE(iter2, iter);
- ASSERT_NE(iter2, s.end());
- EXPECT_EQ(*iter2, value_to_insert);
+ iter = s.emplace(value_to_insert);
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(*iter, value_to_insert);
auto result = s.equal_range(value_to_insert);
EXPECT_EQ(std::distance(result.first, result.second), 2);
}
@@ -1665,44 +1674,45 @@ TEST(Btree, BtreeMultisetEmplaceHint) {
auto iter = s.emplace(value_to_insert);
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
- auto emplace_iter = s.emplace_hint(iter, value_to_insert);
- EXPECT_NE(emplace_iter, iter);
- ASSERT_NE(emplace_iter, s.end());
- EXPECT_EQ(*emplace_iter, value_to_insert);
+ iter = s.emplace_hint(iter, value_to_insert);
+ // The new element should be before the previously inserted one.
+ EXPECT_EQ(iter, s.lower_bound(value_to_insert));
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(*iter, value_to_insert);
}
TEST(Btree, BtreeMultimapEmplace) {
const int key_to_insert = 123456;
const char value0[] = "a";
- absl::btree_multimap<int, std::string> s;
- auto iter = s.emplace(key_to_insert, value0);
- ASSERT_NE(iter, s.end());
+ absl::btree_multimap<int, std::string> m;
+ auto iter = m.emplace(key_to_insert, value0);
+ ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value0);
const char value1[] = "b";
- auto iter2 = s.emplace(key_to_insert, value1);
- EXPECT_NE(iter2, iter);
- ASSERT_NE(iter2, s.end());
- EXPECT_EQ(iter2->first, key_to_insert);
- EXPECT_EQ(iter2->second, value1);
- auto result = s.equal_range(key_to_insert);
+ iter = m.emplace(key_to_insert, value1);
+ ASSERT_NE(iter, m.end());
+ EXPECT_EQ(iter->first, key_to_insert);
+ EXPECT_EQ(iter->second, value1);
+ auto result = m.equal_range(key_to_insert);
EXPECT_EQ(std::distance(result.first, result.second), 2);
}
TEST(Btree, BtreeMultimapEmplaceHint) {
const int key_to_insert = 123456;
const char value0[] = "a";
- absl::btree_multimap<int, std::string> s;
- auto iter = s.emplace(key_to_insert, value0);
- ASSERT_NE(iter, s.end());
+ absl::btree_multimap<int, std::string> m;
+ auto iter = m.emplace(key_to_insert, value0);
+ ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value0);
const char value1[] = "b";
- auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1);
- EXPECT_NE(emplace_iter, iter);
- ASSERT_NE(emplace_iter, s.end());
- EXPECT_EQ(emplace_iter->first, key_to_insert);
- EXPECT_EQ(emplace_iter->second, value1);
+ iter = m.emplace_hint(iter, key_to_insert, value1);
+ // The new element should be before the previously inserted one.
+ EXPECT_EQ(iter, m.lower_bound(key_to_insert));
+ ASSERT_NE(iter, m.end());
+ EXPECT_EQ(iter->first, key_to_insert);
+ EXPECT_EQ(iter->second, value1);
}
TEST(Btree, ConstIteratorAccessors) {
@@ -2113,6 +2123,79 @@ TEST(Btree, ExtractMultiMapEquivalentKeys) {
}
}
+TEST(Btree, ExtractAndGetNextSet) {
+ absl::btree_set<int> src = {1, 2, 3, 4, 5};
+ auto it = src.find(3);
+ auto extracted_and_next = src.extract_and_get_next(it);
+ EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
+ EXPECT_EQ(extracted_and_next.node.value(), 3);
+ EXPECT_EQ(*extracted_and_next.next, 4);
+}
+
+TEST(Btree, ExtractAndGetNextMultiSet) {
+ absl::btree_multiset<int> src = {1, 2, 3, 4, 5};
+ auto it = src.find(3);
+ auto extracted_and_next = src.extract_and_get_next(it);
+ EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
+ EXPECT_EQ(extracted_and_next.node.value(), 3);
+ EXPECT_EQ(*extracted_and_next.next, 4);
+}
+
+TEST(Btree, ExtractAndGetNextMap) {
+ absl::btree_map<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
+ auto it = src.find(3);
+ auto extracted_and_next = src.extract_and_get_next(it);
+ EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
+ EXPECT_EQ(extracted_and_next.node.key(), 3);
+ EXPECT_EQ(extracted_and_next.node.mapped(), 4);
+ EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
+}
+
+TEST(Btree, ExtractAndGetNextMultiMap) {
+ absl::btree_multimap<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
+ auto it = src.find(3);
+ auto extracted_and_next = src.extract_and_get_next(it);
+ EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
+ EXPECT_EQ(extracted_and_next.node.key(), 3);
+ EXPECT_EQ(extracted_and_next.node.mapped(), 4);
+ EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
+}
+
+TEST(Btree, ExtractAndGetNextEndIter) {
+ absl::btree_set<int> src = {1, 2, 3, 4, 5};
+ auto it = src.find(5);
+ auto extracted_and_next = src.extract_and_get_next(it);
+ EXPECT_THAT(src, ElementsAre(1, 2, 3, 4));
+ EXPECT_EQ(extracted_and_next.node.value(), 5);
+ EXPECT_EQ(extracted_and_next.next, src.end());
+}
+
+TEST(Btree, ExtractDoesntCauseExtraMoves) {
+#ifdef _MSC_VER
+ GTEST_SKIP() << "This test fails on MSVC.";
+#endif
+
+ using Set = absl::btree_set<MovableOnlyInstance>;
+ std::array<std::function<void(Set &)>, 3> extracters = {
+ [](Set &s) { auto node = s.extract(s.begin()); },
+ [](Set &s) { auto ret = s.extract_and_get_next(s.begin()); },
+ [](Set &s) { auto node = s.extract(MovableOnlyInstance(0)); }};
+
+ InstanceTracker tracker;
+ for (int i = 0; i < 3; ++i) {
+ Set s;
+ s.insert(MovableOnlyInstance(0));
+ tracker.ResetCopiesMovesSwaps();
+
+ extracters[i](s);
+ // We expect to see exactly 1 move: from the original slot into the
+ // extracted node.
+ EXPECT_EQ(tracker.copies(), 0) << i;
+ EXPECT_EQ(tracker.moves(), 1) << i;
+ EXPECT_EQ(tracker.swaps(), 0) << i;
+ }
+}
+
// For multisets, insert with hint also affects correctness because we need to
// insert immediately before the hint if possible.
struct InsertMultiHintData {
@@ -3005,8 +3088,9 @@ TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
}
-#ifndef NDEBUG
TEST(Btree, InvalidComparatorsCaught) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
{
struct ZeroAlwaysLessCmp {
bool operator()(int lhs, int rhs) const {
@@ -3054,7 +3138,6 @@ TEST(Btree, InvalidComparatorsCaught) {
EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
}
}
-#endif
#ifndef _MSC_VER
// This test crashes on MSVC.
@@ -3084,6 +3167,14 @@ TEST(Btree, InvalidIteratorUse) {
set.erase(1);
EXPECT_DEATH(*it, "invalidated iterator");
}
+ {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 10; ++i) set.insert(i);
+ auto it = set.insert(20).first;
+ set.insert(30);
+ EXPECT_DEATH(void(it == set.begin()), "invalidated iterator");
+ EXPECT_DEATH(void(set.begin() == it), "invalidated iterator");
+ }
}
#endif
@@ -3340,6 +3431,39 @@ TEST(Btree, IteratorSubtraction) {
}
}
+TEST(Btree, DereferencingEndIterator) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ absl::btree_set<int> set;
+ for (int i = 0; i < 1000; ++i) set.insert(i);
+ EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex");
+}
+
+TEST(Btree, InvalidIteratorComparison) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ absl::btree_set<int> set1, set2;
+ for (int i = 0; i < 1000; ++i) {
+ set1.insert(i);
+ set2.insert(i);
+ }
+
+ constexpr const char *kValueInitDeathMessage =
+ "Comparing default-constructed iterator with .*non-default-constructed "
+ "iterator";
+ typename absl::btree_set<int>::iterator iter1, iter2;
+ EXPECT_EQ(iter1, iter2);
+ EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage);
+ EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage);
+
+ constexpr const char *kDifferentContainerDeathMessage =
+ "Comparing iterators from different containers";
+ iter1 = set1.begin();
+ iter2 = set2.begin();
+ EXPECT_DEATH(void(iter1 == iter2), kDifferentContainerDeathMessage);
+ EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage);
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index 263951f1..03171f6d 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -311,6 +311,14 @@ TEST(FlatHashMap, Reserve) {
}
}
+TEST(FlatHashMap, RecursiveTypeCompiles) {
+ struct RecursiveType {
+ flat_hash_map<int, RecursiveType> m;
+ };
+ RecursiveType t;
+ t.m[0] = RecursiveType{};
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/inlined_vector.h b/absl/container/inlined_vector.h
index 10b1896b..7058f375 100644
--- a/absl/container/inlined_vector.h
+++ b/absl/container/inlined_vector.h
@@ -97,14 +97,11 @@ class InlinedVector {
using DisableIfAtLeastForwardIterator = absl::enable_if_t<
!inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
- struct MemcpyPolicy {};
- struct ElementwiseAssignPolicy {};
- struct ElementwiseConstructPolicy {};
-
- using MoveAssignmentPolicy = absl::conditional_t<
- IsMemcpyOk<A>::value, MemcpyPolicy,
- absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
- ElementwiseConstructPolicy>>;
+ using MemcpyPolicy = typename Storage::MemcpyPolicy;
+ using ElementwiseAssignPolicy = typename Storage::ElementwiseAssignPolicy;
+ using ElementwiseConstructPolicy =
+ typename Storage::ElementwiseConstructPolicy;
+ using MoveAssignmentPolicy = typename Storage::MoveAssignmentPolicy;
public:
using allocator_type = A;
@@ -664,10 +661,22 @@ class InlinedVector {
ABSL_HARDENING_ASSERT(pos <= end());
value_type dealias(std::forward<Args>(args)...);
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
+ // It appears that GCC thinks that since `pos` is a const pointer and may
+ // point to uninitialized memory at this point, a warning should be
+ // issued. But `pos` is actually only used to compute an array index to
+ // write to.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
return storage_.Insert(pos,
IteratorValueAdapter<A, MoveIterator<A>>(
MoveIterator<A>(std::addressof(dealias))),
1);
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
}
// `InlinedVector::emplace_back(...)`
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index 65ddbab6..898b40db 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -1208,6 +1208,8 @@ TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) {
}
TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
+ const size_t inlined_capacity = absl::InlinedVector<int, 2>().capacity();
+
for (size_t original_size = 0; original_size <= 5; ++original_size) {
SCOPED_TRACE(original_size);
// Original contents are [12345, 12345, ...]
@@ -1217,9 +1219,9 @@ TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
original_contents.end());
v.assign(2, 123);
EXPECT_THAT(v, AllOf(SizeIs(2u), ElementsAre(123, 123)));
- if (original_size <= 2) {
+ if (original_size <= inlined_capacity) {
// If the original had inline backing, it should stay inline.
- EXPECT_EQ(2u, v.capacity());
+ EXPECT_EQ(v.capacity(), inlined_capacity);
}
}
}
@@ -1360,6 +1362,8 @@ TEST(RangedConstructor, ElementsAreConstructed) {
}
TEST(RangedAssign, SimpleType) {
+ const size_t inlined_capacity = absl::InlinedVector<int, 3>().capacity();
+
// Test for all combinations of original sizes (empty and non-empty inline,
// and out of line) and target sizes.
for (size_t original_size = 0; original_size <= 5; ++original_size) {
@@ -1367,13 +1371,13 @@ TEST(RangedAssign, SimpleType) {
// Original contents are [12345, 12345, ...]
std::vector<int> original_contents(original_size, 12345);
- for (int target_size = 0; target_size <= 5; ++target_size) {
+ for (size_t target_size = 0; target_size <= 5; ++target_size) {
SCOPED_TRACE(target_size);
// New contents are [3, 4, ...]
std::vector<int> new_contents;
- for (int i = 0; i < target_size; ++i) {
- new_contents.push_back(i + 3);
+ for (size_t i = 0; i < target_size; ++i) {
+ new_contents.push_back(static_cast<int>(i + 3));
}
absl::InlinedVector<int, 3> v(original_contents.begin(),
@@ -1382,9 +1386,10 @@ TEST(RangedAssign, SimpleType) {
EXPECT_EQ(new_contents.size(), v.size());
EXPECT_LE(new_contents.size(), v.capacity());
- if (target_size <= 3 && original_size <= 3) {
+ if (target_size <= inlined_capacity &&
+ original_size <= inlined_capacity) {
// Storage should stay inline when target size is small.
- EXPECT_EQ(3u, v.capacity());
+ EXPECT_EQ(v.capacity(), inlined_capacity);
}
EXPECT_THAT(v, ElementsAreArray(new_contents));
}
@@ -1470,9 +1475,12 @@ TEST(InitializerListConstructor, DisparateTypesInList) {
}
TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
- EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
- CopyableMovableInstance(0)}),
- AllOf(SizeIs(1u), CapacityIs(1u), ElementsAre(ValueIs(0))));
+ const size_t inlined_capacity =
+ absl::InlinedVector<CopyableMovableInstance, 1>().capacity();
+ EXPECT_THAT(
+ (absl::InlinedVector<CopyableMovableInstance, 1>{
+ CopyableMovableInstance(0)}),
+ AllOf(SizeIs(1u), CapacityIs(inlined_capacity), ElementsAre(ValueIs(0))));
}
TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
@@ -1841,98 +1849,212 @@ MATCHER(HasValue, "") {
return ::testing::get<0>(arg).value() == ::testing::get<1>(arg);
}
-TEST(MoveAssignment, NonAssignable) {
+TEST(NonAssignableMoveAssignmentTest, AllocatedToInline) {
using X = MoveConstructibleOnlyInstance;
- {
- InstanceTracker tracker;
- absl::InlinedVector<X, 2> inlined;
- inlined.emplace_back(1);
- absl::InlinedVector<X, 2> allocated;
- allocated.emplace_back(1);
- allocated.emplace_back(2);
- allocated.emplace_back(3);
- tracker.ResetCopiesMovesSwaps();
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> inlined;
+ inlined.emplace_back(1);
+ absl::InlinedVector<X, 2> allocated;
+ allocated.emplace_back(1);
+ allocated.emplace_back(2);
+ allocated.emplace_back(3);
+ tracker.ResetCopiesMovesSwaps();
- inlined = std::move(allocated);
- // passed ownership of the allocated storage
- EXPECT_EQ(tracker.moves(), 0);
- EXPECT_EQ(tracker.live_instances(), 3);
+ inlined = std::move(allocated);
+ // passed ownership of the allocated storage
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), 3);
- EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
- }
+ EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
+}
- {
- InstanceTracker tracker;
- absl::InlinedVector<X, 2> inlined;
- inlined.emplace_back(1);
- absl::InlinedVector<X, 2> allocated;
- allocated.emplace_back(1);
- allocated.emplace_back(2);
- allocated.emplace_back(3);
- tracker.ResetCopiesMovesSwaps();
+TEST(NonAssignableMoveAssignmentTest, InlineToAllocated) {
+ using X = MoveConstructibleOnlyInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> inlined;
+ inlined.emplace_back(1);
+ absl::InlinedVector<X, 2> allocated;
+ allocated.emplace_back(1);
+ allocated.emplace_back(2);
+ allocated.emplace_back(3);
+ tracker.ResetCopiesMovesSwaps();
- allocated = std::move(inlined);
- // Moved elements
- EXPECT_EQ(tracker.moves(), 1);
- EXPECT_EQ(tracker.live_instances(), 1);
+ allocated = std::move(inlined);
+ // Moved elements
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(tracker.live_instances(), 1);
- EXPECT_THAT(allocated, Pointwise(HasValue(), {1}));
- }
+ EXPECT_THAT(allocated, Pointwise(HasValue(), {1}));
+}
- {
- InstanceTracker tracker;
- absl::InlinedVector<X, 2> inlined_a;
- inlined_a.emplace_back(1);
- absl::InlinedVector<X, 2> inlined_b;
- inlined_b.emplace_back(1);
- tracker.ResetCopiesMovesSwaps();
+TEST(NonAssignableMoveAssignmentTest, InlineToInline) {
+ using X = MoveConstructibleOnlyInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> inlined_a;
+ inlined_a.emplace_back(1);
+ absl::InlinedVector<X, 2> inlined_b;
+ inlined_b.emplace_back(1);
+ tracker.ResetCopiesMovesSwaps();
- inlined_a = std::move(inlined_b);
- // Moved elements
- EXPECT_EQ(tracker.moves(), 1);
- EXPECT_EQ(tracker.live_instances(), 1);
+ inlined_a = std::move(inlined_b);
+ // Moved elements
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(tracker.live_instances(), 1);
- EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1}));
- }
+ EXPECT_THAT(inlined_a, Pointwise(HasValue(), {1}));
+}
- {
- InstanceTracker tracker;
- absl::InlinedVector<X, 2> allocated_a;
- allocated_a.emplace_back(1);
- allocated_a.emplace_back(2);
- allocated_a.emplace_back(3);
- absl::InlinedVector<X, 2> allocated_b;
- allocated_b.emplace_back(4);
- allocated_b.emplace_back(5);
- allocated_b.emplace_back(6);
- allocated_b.emplace_back(7);
- tracker.ResetCopiesMovesSwaps();
+TEST(NonAssignableMoveAssignmentTest, AllocatedToAllocated) {
+ using X = MoveConstructibleOnlyInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> allocated_a;
+ allocated_a.emplace_back(1);
+ allocated_a.emplace_back(2);
+ allocated_a.emplace_back(3);
+ absl::InlinedVector<X, 2> allocated_b;
+ allocated_b.emplace_back(4);
+ allocated_b.emplace_back(5);
+ allocated_b.emplace_back(6);
+ allocated_b.emplace_back(7);
+ tracker.ResetCopiesMovesSwaps();
- allocated_a = std::move(allocated_b);
- // passed ownership of the allocated storage
- EXPECT_EQ(tracker.moves(), 0);
- EXPECT_EQ(tracker.live_instances(), 4);
+ allocated_a = std::move(allocated_b);
+ // passed ownership of the allocated storage
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), 4);
- EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
- }
+ EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
+}
- {
- InstanceTracker tracker;
- absl::InlinedVector<X, 2> v;
- v.emplace_back(1);
- v.emplace_back(2);
- v.emplace_back(3);
+TEST(NonAssignableMoveAssignmentTest, AssignThis) {
+ using X = MoveConstructibleOnlyInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> v;
+ v.emplace_back(1);
+ v.emplace_back(2);
+ v.emplace_back(3);
- tracker.ResetCopiesMovesSwaps();
+ tracker.ResetCopiesMovesSwaps();
- // Obfuscated in order to pass -Wself-move.
- v = std::move(*std::addressof(v));
- // nothing happens
- EXPECT_EQ(tracker.moves(), 0);
- EXPECT_EQ(tracker.live_instances(), 3);
+ // Obfuscated in order to pass -Wself-move.
+ v = std::move(*std::addressof(v));
+ // nothing happens
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), 3);
- EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
- }
+ EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+class NonSwappableInstance : public absl::test_internal::BaseCountedInstance {
+ public:
+ explicit NonSwappableInstance(int x) : BaseCountedInstance(x) {}
+ NonSwappableInstance(const NonSwappableInstance& other) = default;
+ NonSwappableInstance& operator=(const NonSwappableInstance& other) = default;
+ NonSwappableInstance(NonSwappableInstance&& other) = default;
+ NonSwappableInstance& operator=(NonSwappableInstance&& other) = default;
+};
+
+void swap(NonSwappableInstance&, NonSwappableInstance&) = delete;
+
+TEST(NonSwappableSwapTest, InlineAndAllocatedTransferStorageAndMove) {
+ using X = NonSwappableInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> inlined;
+ inlined.emplace_back(1);
+ absl::InlinedVector<X, 2> allocated;
+ allocated.emplace_back(1);
+ allocated.emplace_back(2);
+ allocated.emplace_back(3);
+ tracker.ResetCopiesMovesSwaps();
+
+ inlined.swap(allocated);
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(tracker.live_instances(), 4);
+
+ EXPECT_THAT(inlined, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+TEST(NonSwappableSwapTest, InlineAndInlineMoveIndividualElements) {
+ using X = NonSwappableInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> inlined_a;
+ inlined_a.emplace_back(1);
+ absl::InlinedVector<X, 2> inlined_b;
+ inlined_b.emplace_back(2);
+ tracker.ResetCopiesMovesSwaps();
+
+ inlined_a.swap(inlined_b);
+ EXPECT_EQ(tracker.moves(), 3);
+ EXPECT_EQ(tracker.live_instances(), 2);
+
+ EXPECT_THAT(inlined_a, Pointwise(HasValue(), {2}));
+ EXPECT_THAT(inlined_b, Pointwise(HasValue(), {1}));
+}
+
+TEST(NonSwappableSwapTest, AllocatedAndAllocatedOnlyTransferStorage) {
+ using X = NonSwappableInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> allocated_a;
+ allocated_a.emplace_back(1);
+ allocated_a.emplace_back(2);
+ allocated_a.emplace_back(3);
+ absl::InlinedVector<X, 2> allocated_b;
+ allocated_b.emplace_back(4);
+ allocated_b.emplace_back(5);
+ allocated_b.emplace_back(6);
+ allocated_b.emplace_back(7);
+ tracker.ResetCopiesMovesSwaps();
+
+ allocated_a.swap(allocated_b);
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), 7);
+
+ EXPECT_THAT(allocated_a, Pointwise(HasValue(), {4, 5, 6, 7}));
+ EXPECT_THAT(allocated_b, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+TEST(NonSwappableSwapTest, SwapThis) {
+ using X = NonSwappableInstance;
+ InstanceTracker tracker;
+ absl::InlinedVector<X, 2> v;
+ v.emplace_back(1);
+ v.emplace_back(2);
+ v.emplace_back(3);
+
+ tracker.ResetCopiesMovesSwaps();
+
+ v.swap(v);
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), 3);
+
+ EXPECT_THAT(v, Pointwise(HasValue(), {1, 2, 3}));
+}
+
+template <size_t N>
+using CharVec = absl::InlinedVector<char, N>;
+
+// Warning: This struct "simulates" the type `InlinedVector::Storage::Allocated`
+// to make reasonable expectations for inlined storage capacity optimization. If
+// implementation changes `Allocated`, then `MySpan` and tests that use it need
+// to be updated accordingly.
+template <typename T>
+struct MySpan {
+ T* data;
+ size_t size;
+};
+
+TEST(StorageTest, InlinedCapacityAutoIncrease) {
+ // The requested capacity is auto increased to `sizeof(MySpan<char>)`.
+ EXPECT_GT(CharVec<1>().capacity(), 1);
+ EXPECT_EQ(CharVec<1>().capacity(), sizeof(MySpan<char>));
+ EXPECT_EQ(CharVec<1>().capacity(), CharVec<2>().capacity());
+ EXPECT_EQ(sizeof(CharVec<1>), sizeof(CharVec<2>));
+
+ // The requested capacity is auto increased to
+ // `sizeof(MySpan<int>) / sizeof(int)`.
+ EXPECT_GT((absl::InlinedVector<int, 1>().capacity()), 1);
+ EXPECT_EQ((absl::InlinedVector<int, 1>().capacity()),
+ sizeof(MySpan<int>) / sizeof(int));
}
} // anonymous namespace
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index 5000d1c3..d734676a 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -1017,8 +1017,61 @@ class btree_node {
friend struct btree_access;
};
+template <typename Node>
+bool AreNodesFromSameContainer(const Node *node_a, const Node *node_b) {
+ // If either node is null, then give up on checking whether they're from the
+ // same container. (If exactly one is null, then we'll trigger the
+ // default-constructed assert in Equals.)
+ if (node_a == nullptr || node_b == nullptr) return true;
+ while (!node_a->is_root()) node_a = node_a->parent();
+ while (!node_b->is_root()) node_b = node_b->parent();
+ return node_a == node_b;
+}
+
+class btree_iterator_generation_info_enabled {
+ public:
+ explicit btree_iterator_generation_info_enabled(uint32_t g)
+ : generation_(g) {}
+
+ // Updates the generation. For use internally right before we return an
+ // iterator to the user.
+ template <typename Node>
+ void update_generation(const Node *node) {
+ if (node != nullptr) generation_ = node->generation();
+ }
+ uint32_t generation() const { return generation_; }
+
+ template <typename Node>
+ void assert_valid_generation(const Node *node) const {
+ if (node != nullptr && node->generation() != generation_) {
+ ABSL_INTERNAL_LOG(
+ FATAL,
+ "Attempting to use an invalidated iterator. The corresponding b-tree "
+ "container has been mutated since this iterator was constructed.");
+ }
+ }
+
+ private:
+ // Used to check that the iterator hasn't been invalidated.
+ uint32_t generation_;
+};
+
+class btree_iterator_generation_info_disabled {
+ public:
+ explicit btree_iterator_generation_info_disabled(uint32_t) {}
+ void update_generation(const void *) {}
+ uint32_t generation() const { return 0; }
+ void assert_valid_generation(const void *) const {}
+};
+
+#ifdef ABSL_BTREE_ENABLE_GENERATIONS
+using btree_iterator_generation_info = btree_iterator_generation_info_enabled;
+#else
+using btree_iterator_generation_info = btree_iterator_generation_info_disabled;
+#endif
+
template <typename Node, typename Reference, typename Pointer>
-class btree_iterator {
+class btree_iterator : private btree_iterator_generation_info {
using field_type = typename Node::field_type;
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
@@ -1049,13 +1102,11 @@ class btree_iterator {
btree_iterator() : btree_iterator(nullptr, -1) {}
explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {}
- btree_iterator(Node *n, int p) : node_(n), position_(p) {
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- // Use `~uint32_t{}` as a sentinel value for iterator generations so it
- // doesn't match the initial value for the actual generation.
- generation_ = n != nullptr ? n->generation() : ~uint32_t{};
-#endif
- }
+ btree_iterator(Node *n, int p)
+ : btree_iterator_generation_info(n != nullptr ? n->generation()
+ : ~uint32_t{}),
+ node_(n),
+ position_(p) {}
// NOTE: this SFINAE allows for implicit conversions from iterator to
// const_iterator, but it specifically avoids hiding the copy constructor so
@@ -1066,23 +1117,21 @@ class btree_iterator {
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
- : node_(other.node_), position_(other.position_) {
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- generation_ = other.generation_;
-#endif
- }
+ : btree_iterator_generation_info(other),
+ node_(other.node_),
+ position_(other.position_) {}
bool operator==(const iterator &other) const {
- return node_ == other.node_ && position_ == other.position_;
+ return Equals(other);
}
bool operator==(const const_iterator &other) const {
- return node_ == other.node_ && position_ == other.position_;
+ return Equals(other);
}
bool operator!=(const iterator &other) const {
- return node_ != other.node_ || position_ != other.position_;
+ return !Equals(other);
}
bool operator!=(const const_iterator &other) const {
- return node_ != other.node_ || position_ != other.position_;
+ return !Equals(other);
}
// Returns n such that n calls to ++other yields *this.
@@ -1098,9 +1147,12 @@ class btree_iterator {
// Accessors for the key/value the iterator is pointing at.
reference operator*() const {
ABSL_HARDENING_ASSERT(node_ != nullptr);
- ABSL_HARDENING_ASSERT(node_->start() <= position_);
- ABSL_HARDENING_ASSERT(node_->finish() > position_);
- assert_valid_generation();
+ assert_valid_generation(node_);
+ ABSL_HARDENING_ASSERT(position_ >= node_->start());
+ if (position_ >= node_->finish()) {
+ ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator");
+ ABSL_HARDENING_ASSERT(position_ < node_->finish());
+ }
return node_->value(static_cast<field_type>(position_));
}
pointer operator->() const { return &operator*(); }
@@ -1151,11 +1203,33 @@ class btree_iterator {
std::is_same<btree_iterator, iterator>::value,
int> = 0>
explicit btree_iterator(const btree_iterator<N, R, P> other)
- : node_(const_cast<node_type *>(other.node_)),
- position_(other.position_) {
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- generation_ = other.generation_;
-#endif
+ : btree_iterator_generation_info(other.generation()),
+ node_(const_cast<node_type *>(other.node_)),
+ position_(other.position_) {}
+
+ bool Equals(const const_iterator other) const {
+ ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) ||
+ (node_ != nullptr && other.node_ != nullptr)) &&
+ "Comparing default-constructed iterator with "
+ "non-default-constructed iterator.");
+ // Note: we use assert instead of ABSL_HARDENING_ASSERT here because this
+ // changes the complexity of Equals from O(1) to O(log(N) + log(M)) where
+ // N/M are sizes of the containers containing node_/other.node_.
+ assert(AreNodesFromSameContainer(node_, other.node_) &&
+ "Comparing iterators from different containers.");
+ assert_valid_generation(node_);
+ other.assert_valid_generation(other.node_);
+ return node_ == other.node_ && position_ == other.position_;
+ }
+
+ bool IsEndIterator() const {
+ if (position_ != node_->finish()) return false;
+ node_type *node = node_;
+ while (!node->is_root()) {
+ if (node->position() != node->parent()->finish()) return false;
+ node = node->parent();
+ }
+ return true;
}
// Returns n such that n calls to ++other yields *this.
@@ -1165,7 +1239,7 @@ class btree_iterator {
// Increment/decrement the iterator.
void increment() {
- assert_valid_generation();
+ assert_valid_generation(node_);
if (node_->is_leaf() && ++position_ < node_->finish()) {
return;
}
@@ -1174,7 +1248,7 @@ class btree_iterator {
void increment_slow();
void decrement() {
- assert_valid_generation();
+ assert_valid_generation(node_);
if (node_->is_leaf() && --position_ >= node_->start()) {
return;
}
@@ -1182,14 +1256,6 @@ class btree_iterator {
}
void decrement_slow();
- // Updates the generation. For use internally right before we return an
- // iterator to the user.
- void update_generation() {
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- if (node_ != nullptr) generation_ = node_->generation();
-#endif
- }
-
const key_type &key() const {
return node_->key(static_cast<size_type>(position_));
}
@@ -1197,15 +1263,8 @@ class btree_iterator {
return node_->slot(static_cast<size_type>(position_));
}
- void assert_valid_generation() const {
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- if (node_ != nullptr && node_->generation() != generation_) {
- ABSL_INTERNAL_LOG(
- FATAL,
- "Attempting to use an invalidated iterator. The corresponding b-tree "
- "container has been mutated since this iterator was constructed.");
- }
-#endif
+ void update_generation() {
+ btree_iterator_generation_info::update_generation(node_);
}
// The node in the tree the iterator is pointing at.
@@ -1214,10 +1273,6 @@ class btree_iterator {
// NOTE: this is an int rather than a field_type because iterators can point
// to invalid positions (such as -1) in certain circumstances.
int position_;
-#ifdef ABSL_BTREE_ENABLE_GENERATIONS
- // Used to check that the iterator hasn't been invalidated.
- uint32_t generation_;
-#endif
};
template <typename Params>
diff --git a/absl/container/internal/btree_container.h b/absl/container/internal/btree_container.h
index 3e259861..2bff11db 100644
--- a/absl/container/internal/btree_container.h
+++ b/absl/container/internal/btree_container.h
@@ -65,6 +65,11 @@ class btree_container {
using const_reverse_iterator = typename Tree::const_reverse_iterator;
using node_type = typename Tree::node_handle_type;
+ struct extract_and_get_next_return_type {
+ node_type node;
+ iterator next;
+ };
+
// Constructors/assignments.
btree_container() : tree_(key_compare(), allocator_type()) {}
explicit btree_container(const key_compare &comp,
@@ -165,6 +170,15 @@ class btree_container {
}
// Extract routines.
+ extract_and_get_next_return_type extract_and_get_next(
+ const_iterator position) {
+ // Use Construct instead of Transfer because the rebalancing code will
+ // destroy the slot later.
+ // Note: we rely on erase() taking place after Construct().
+ return {CommonAccess::Construct<node_type>(get_allocator(),
+ iterator(position).slot()),
+ erase(position)};
+ }
node_type extract(iterator position) {
// Use Construct instead of Transfer because the rebalancing code will
// destroy the slot later.
diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h
index 0fd4866e..b42c9a48 100644
--- a/absl/container/internal/common_policy_traits.h
+++ b/absl/container/internal/common_policy_traits.h
@@ -63,7 +63,7 @@ struct common_policy_traits {
// UNINITIALIZED
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
- transfer_impl(alloc, new_slot, old_slot, 0);
+ transfer_impl(alloc, new_slot, old_slot, Rank0{});
}
// PRECONDITION: `slot` is INITIALIZED
@@ -80,29 +80,46 @@ struct common_policy_traits {
return P::element(slot);
}
+ static constexpr bool transfer_uses_memcpy() {
+ return std::is_same<decltype(transfer_impl<std::allocator<char>>(
+ nullptr, nullptr, nullptr, Rank0{})),
+ std::true_type>::value;
+ }
+
private:
+ // To rank the overloads below for overload resoltion. Rank0 is preferred.
+ struct Rank2 {};
+ struct Rank1 : Rank2 {};
+ struct Rank0 : Rank1 {};
+
// Use auto -> decltype as an enabler.
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, int)
+ slot_type* old_slot, Rank0)
-> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
P::transfer(alloc, new_slot, old_slot);
}
- template <class Alloc>
- static void transfer_impl(Alloc* alloc, slot_type* new_slot,
- slot_type* old_slot, char) {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
- if (absl::is_trivially_relocatable<value_type>()) {
- // TODO(b/247130232,b/251814870): remove casts after fixing warnings.
- std::memcpy(static_cast<void*>(
- std::launder(const_cast<std::remove_const_t<value_type>*>(
- &element(new_slot)))),
- static_cast<const void*>(&element(old_slot)),
- sizeof(value_type));
- return;
- }
+ // This overload returns true_type for the trait below.
+ // The conditional_t is to make the enabler type dependent.
+ template <class Alloc,
+ typename = std::enable_if_t<absl::is_trivially_relocatable<
+ std::conditional_t<false, Alloc, value_type>>::value>>
+ static std::true_type transfer_impl(Alloc*, slot_type* new_slot,
+ slot_type* old_slot, Rank1) {
+ // TODO(b/247130232): remove casts after fixing warnings.
+ // TODO(b/251814870): remove casts after fixing warnings.
+ std::memcpy(
+ static_cast<void*>(std::launder(
+ const_cast<std::remove_const_t<value_type>*>(&element(new_slot)))),
+ static_cast<const void*>(&element(old_slot)), sizeof(value_type));
+ return {};
+ }
#endif
+ template <class Alloc>
+ static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, Rank2) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
diff --git a/absl/container/internal/hashtablez_sampler.cc b/absl/container/internal/hashtablez_sampler.cc
index 5b8cf341..6b6d3491 100644
--- a/absl/container/internal/hashtablez_sampler.cc
+++ b/absl/container/internal/hashtablez_sampler.cc
@@ -14,6 +14,7 @@
#include "absl/container/internal/hashtablez_sampler.h"
+#include <algorithm>
#include <atomic>
#include <cassert>
#include <cmath>
@@ -158,6 +159,43 @@ void UnsampleSlow(HashtablezInfo* info) {
GlobalHashtablezSampler().Unregister(info);
}
+void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
+}
+
+void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+ }
+}
+
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
size_t distance_from_desired) {
// SwissTables probe in groups of 16, so scale this to count items probes and
@@ -180,6 +218,14 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->size.fetch_add(1, std::memory_order_relaxed);
}
+void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
void SetHashtablezConfigListener(HashtablezConfigListener l) {
g_hashtablez_config_listener.store(l, std::memory_order_release);
}
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index a89518bb..d8fd8f34 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -95,55 +95,19 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
size_t inline_element_size; // How big is the slot?
};
-inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#ifdef ABSL_INTERNAL_HAVE_SSE2
- total_probe_length /= 16;
-#else
- total_probe_length /= 8;
-#endif
- info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
- info->num_erases.store(0, std::memory_order_relaxed);
- // There is only one concurrent writer, so `load` then `store` is sufficient
- // instead of using `fetch_add`.
- info->num_rehashes.store(
- 1 + info->num_rehashes.load(std::memory_order_relaxed),
- std::memory_order_relaxed);
-}
+void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length);
-inline void RecordReservationSlow(HashtablezInfo* info,
- size_t target_capacity) {
- info->max_reserve.store(
- (std::max)(info->max_reserve.load(std::memory_order_relaxed),
- target_capacity),
- std::memory_order_relaxed);
-}
+void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity);
-inline void RecordClearedReservationSlow(HashtablezInfo* info) {
- info->max_reserve.store(0, std::memory_order_relaxed);
-}
+void RecordClearedReservationSlow(HashtablezInfo* info);
-inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
- size_t capacity) {
- info->size.store(size, std::memory_order_relaxed);
- info->capacity.store(capacity, std::memory_order_relaxed);
- if (size == 0) {
- // This is a clear, reset the total/num_erases too.
- info->total_probe_length.store(0, std::memory_order_relaxed);
- info->num_erases.store(0, std::memory_order_relaxed);
- }
-}
+void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity);
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
size_t distance_from_desired);
-inline void RecordEraseSlow(HashtablezInfo* info) {
- info->size.fetch_sub(1, std::memory_order_relaxed);
- // There is only one concurrent writer, so `load` then `store` is sufficient
- // instead of using `fetch_add`.
- info->num_erases.store(
- 1 + info->num_erases.load(std::memory_order_relaxed),
- std::memory_order_relaxed);
-}
+void RecordEraseSlow(HashtablezInfo* info);
struct SamplingState {
int64_t next_sample;
@@ -165,7 +129,10 @@ class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
- ~HashtablezInfoHandle() {
+
+ // We do not have a destructor. Caller is responsible for calling Unregister
+ // before destroying the handle.
+ void Unregister() {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
UnsampleSlow(info_);
}
@@ -230,6 +197,7 @@ class HashtablezInfoHandle {
explicit HashtablezInfoHandle() = default;
explicit HashtablezInfoHandle(std::nullptr_t) {}
+ inline void Unregister() {}
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordReservation(size_t /*target_capacity*/) {}
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index f623494c..0398f530 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -85,6 +85,8 @@ using IsMemcpyOk =
template <typename A>
using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
+template <typename A>
+using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
template <typename T>
struct TypeIdentity {
@@ -123,8 +125,8 @@ struct DestroyAdapter<A, /* IsTriviallyDestructible */ true> {
template <typename A>
struct Allocation {
- Pointer<A> data;
- SizeType<A> capacity;
+ Pointer<A> data = nullptr;
+ SizeType<A> capacity = 0;
};
template <typename A,
@@ -300,6 +302,20 @@ class ConstructionTransaction {
template <typename T, size_t N, typename A>
class Storage {
public:
+ struct MemcpyPolicy {};
+ struct ElementwiseAssignPolicy {};
+ struct ElementwiseSwapPolicy {};
+ struct ElementwiseConstructPolicy {};
+
+ using MoveAssignmentPolicy = absl::conditional_t<
+ IsMemcpyOk<A>::value, MemcpyPolicy,
+ absl::conditional_t<IsMoveAssignOk<A>::value, ElementwiseAssignPolicy,
+ ElementwiseConstructPolicy>>;
+ using SwapPolicy = absl::conditional_t<
+ IsMemcpyOk<A>::value, MemcpyPolicy,
+ absl::conditional_t<IsSwapOk<A>::value, ElementwiseSwapPolicy,
+ ElementwiseConstructPolicy>>;
+
static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
return current_capacity * 2;
}
@@ -363,7 +379,9 @@ class Storage {
return data_.allocated.allocated_capacity;
}
- SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
+ SizeType<A> GetInlinedCapacity() const {
+ return static_cast<SizeType<A>>(kOptimalInlinedSize);
+ }
StorageView<A> MakeStorageView() {
return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
@@ -467,8 +485,15 @@ class Storage {
SizeType<A> allocated_capacity;
};
+ // `kOptimalInlinedSize` is an automatically adjusted inlined capacity of the
+ // `InlinedVector`. Sometimes, it is possible to increase the capacity (from
+ // the user requested `N`) without increasing the size of the `InlinedVector`.
+ static constexpr size_t kOptimalInlinedSize =
+ (std::max)(N, sizeof(Allocated) / sizeof(ValueType<A>));
+
struct Inlined {
- alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
+ alignas(ValueType<A>) char inlined_data[sizeof(
+ ValueType<A>[kOptimalInlinedSize])];
};
union Data {
@@ -476,6 +501,13 @@ class Storage {
Inlined inlined;
};
+ void SwapN(ElementwiseSwapPolicy, Storage* other, SizeType<A> n);
+ void SwapN(ElementwiseConstructPolicy, Storage* other, SizeType<A> n);
+
+ void SwapInlinedElements(MemcpyPolicy, Storage* other);
+ template <typename NotMemcpyPolicy>
+ void SwapInlinedElements(NotMemcpyPolicy, Storage* other);
+
template <typename... Args>
ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
@@ -889,26 +921,7 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
swap(data_.allocated, other_storage_ptr->data_.allocated);
} else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
- Storage* small_ptr = this;
- Storage* large_ptr = other_storage_ptr;
- if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
-
- for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
- swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
- }
-
- IteratorValueAdapter<A, MoveIterator<A>> move_values(
- MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
-
- ConstructElements<A>(large_ptr->GetAllocator(),
- small_ptr->GetInlinedData() + small_ptr->GetSize(),
- move_values,
- large_ptr->GetSize() - small_ptr->GetSize());
-
- DestroyAdapter<A>::DestroyElements(
- large_ptr->GetAllocator(),
- large_ptr->GetInlinedData() + small_ptr->GetSize(),
- large_ptr->GetSize() - small_ptr->GetSize());
+ SwapInlinedElements(SwapPolicy{}, other_storage_ptr);
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
@@ -944,6 +957,68 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapN(ElementwiseSwapPolicy, Storage* other,
+ SizeType<A> n) {
+ std::swap_ranges(GetInlinedData(), GetInlinedData() + n,
+ other->GetInlinedData());
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapN(ElementwiseConstructPolicy, Storage* other,
+ SizeType<A> n) {
+ Pointer<A> a = GetInlinedData();
+ Pointer<A> b = other->GetInlinedData();
+ // see note on allocators in `SwapInlinedElements`.
+ A& allocator_a = GetAllocator();
+ A& allocator_b = other->GetAllocator();
+ for (SizeType<A> i = 0; i < n; ++i, ++a, ++b) {
+ ValueType<A> tmp(std::move(*a));
+
+ AllocatorTraits<A>::destroy(allocator_a, a);
+ AllocatorTraits<A>::construct(allocator_b, a, std::move(*b));
+
+ AllocatorTraits<A>::destroy(allocator_b, b);
+ AllocatorTraits<A>::construct(allocator_a, b, std::move(tmp));
+ }
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::SwapInlinedElements(MemcpyPolicy, Storage* other) {
+ Data tmp = data_;
+ data_ = other->data_;
+ other->data_ = tmp;
+}
+
+template <typename T, size_t N, typename A>
+template <typename NotMemcpyPolicy>
+void Storage<T, N, A>::SwapInlinedElements(NotMemcpyPolicy policy,
+ Storage* other) {
+ // Note: `destroy` needs to use pre-swap allocator while `construct` -
+ // post-swap allocator. Allocators will be swaped later on outside of
+ // `SwapInlinedElements`.
+ Storage* small_ptr = this;
+ Storage* large_ptr = other;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) {
+ std::swap(small_ptr, large_ptr);
+ }
+
+ auto small_size = small_ptr->GetSize();
+ auto diff = large_ptr->GetSize() - small_size;
+ SwapN(policy, other, small_size);
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(large_ptr->GetInlinedData() + small_size));
+
+ ConstructElements<A>(large_ptr->GetAllocator(),
+ small_ptr->GetInlinedData() + small_size, move_values,
+ diff);
+
+ DestroyAdapter<A>::DestroyElements(large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_size,
+ diff);
+}
+
// End ignore "array-bounds"
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index c63a2e02..79220836 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -16,6 +16,7 @@
#include <atomic>
#include <cstddef>
+#include <cstring>
#include "absl/base/config.h"
@@ -63,8 +64,155 @@ void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
}
-// Extern template instantiotion for inline function.
-template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+// Extern template instantiation for inline function.
+template FindInfo find_first_non_full(const CommonFields&, size_t);
+
+FindInfo find_first_non_full_outofline(const CommonFields& common,
+ size_t hash) {
+ return find_first_non_full(common, hash);
+}
+
+// Return address of the ith slot in slots where each slot occupies slot_size.
+static inline void* SlotAddress(void* slot_array, size_t slot,
+ size_t slot_size) {
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
+ (slot * slot_size));
+}
+
+// Return the address of the slot just after slot assuming each slot
+// has the specified size.
+static inline void* NextSlot(void* slot, size_t slot_size) {
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
+}
+
+// Return the address of the slot just before slot assuming each slot
+// has the specified size.
+static inline void* PrevSlot(void* slot, size_t slot_size) {
+ return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
+}
+
+void DropDeletesWithoutResize(CommonFields& common,
+ const PolicyFunctions& policy, void* tmp_space) {
+ void* set = &common;
+ void* slot_array = common.slots_;
+ const size_t capacity = common.capacity_;
+ assert(IsValidCapacity(capacity));
+ assert(!is_small(capacity));
+ // Algorithm:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ // - for each slot marked as DELETED
+ // hash = Hash(element)
+ // target = find_first_non_full(hash)
+ // if target is in the same group
+ // mark slot as FULL
+ // else if target is EMPTY
+ // transfer element to target
+ // mark slot as EMPTY
+ // mark target as FULL
+ // else if target is DELETED
+ // swap current element with target element
+ // mark target as FULL
+ // repeat procedure for current slot with moved from element (target)
+ ctrl_t* ctrl = common.control_;
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
+ auto hasher = policy.hash_slot;
+ auto transfer = policy.transfer;
+ const size_t slot_size = policy.slot_size;
+
+ size_t total_probe_length = 0;
+ void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
+ for (size_t i = 0; i != capacity;
+ ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
+ assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
+ if (!IsDeleted(ctrl[i])) continue;
+ const size_t hash = (*hasher)(set, slot_ptr);
+ const FindInfo target = find_first_non_full(common, hash);
+ const size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+
+ // Verify if the old and new i fall within the same group wrt the hash.
+ // If they do, we don't need to move the object as it falls already in the
+ // best probe we can.
+ const size_t probe_offset = probe(common, hash).offset();
+ const auto probe_index = [probe_offset, capacity](size_t pos) {
+ return ((pos - probe_offset) & capacity) / Group::kWidth;
+ };
+
+ // Element doesn't move.
+ if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+ SetCtrl(common, i, H2(hash), slot_size);
+ continue;
+ }
+
+ void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
+ if (IsEmpty(ctrl[new_i])) {
+ // Transfer element to the empty spot.
+ // SetCtrl poisons/unpoisons the slots so we have to call it at the
+ // right time.
+ SetCtrl(common, new_i, H2(hash), slot_size);
+ (*transfer)(set, new_slot_ptr, slot_ptr);
+ SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
+ } else {
+ assert(IsDeleted(ctrl[new_i]));
+ SetCtrl(common, new_i, H2(hash), slot_size);
+ // Until we are done rehashing, DELETED marks previously FULL slots.
+
+ // Swap i and new_i elements.
+ (*transfer)(set, tmp_space, new_slot_ptr);
+ (*transfer)(set, new_slot_ptr, slot_ptr);
+ (*transfer)(set, slot_ptr, tmp_space);
+
+ // repeat the processing of the ith slot
+ --i;
+ slot_ptr = PrevSlot(slot_ptr, slot_size);
+ }
+ }
+ ResetGrowthLeft(common);
+ common.infoz().RecordRehash(total_probe_length);
+}
+
+void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
+ assert(IsFull(*it) && "erasing a dangling iterator");
+ --c.size_;
+ const auto index = static_cast<size_t>(it - c.control_);
+ const size_t index_before = (index - Group::kWidth) & c.capacity_;
+ const auto empty_after = Group(it).MaskEmpty();
+ const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
+
+ // We count how many consecutive non empties we have to the right and to the
+ // left of `it`. If the sum is >= kWidth then there is at least one probe
+ // window that might have seen a full group.
+ bool was_never_full = empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros()) +
+ empty_before.LeadingZeros() <
+ Group::kWidth;
+
+ SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+ slot_size);
+ c.growth_left_ += (was_never_full ? 1 : 0);
+ c.infoz().RecordErase();
+}
+
+void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+ bool reuse) {
+ if (reuse) {
+ c.size_ = 0;
+ ResetCtrl(c, policy.slot_size);
+ c.infoz().RecordStorageChanged(0, c.capacity_);
+ } else {
+ void* set = &c;
+ (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
+ c.control_ = EmptyGroup();
+ c.slots_ = nullptr;
+ c.size_ = 0;
+ c.capacity_ = 0;
+ c.growth_left_ = 0;
+ c.infoz().RecordClearedReservation();
+ assert(c.size_ == 0);
+ c.infoz().RecordStorageChanged(0, 0);
+ }
+}
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 93de2221..8a33106f 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -717,6 +717,66 @@ using Group = GroupAArch64Impl;
using Group = GroupPortableImpl;
#endif
+// CommonFields hold the fields in raw_hash_set that do not depend
+// on template parameters. This allows us to conveniently pass all
+// of this state to helper functions as a single argument.
+//
+// We make HashtablezInfoHandle a base class to take advantage of
+// the empty base-class optimization when sampling is turned off.
+class CommonFields : public HashtablezInfoHandle {
+ public:
+ CommonFields() = default;
+
+ // Not copyable
+ CommonFields(const CommonFields&) = delete;
+ CommonFields& operator=(const CommonFields&) = delete;
+
+ // Movable
+ CommonFields(CommonFields&& that)
+ : HashtablezInfoHandle(
+ std::move(static_cast<HashtablezInfoHandle&&>(that))),
+ // Explicitly copying fields into "this" and then resetting "that"
+ // fields generates less code then calling absl::exchange per field.
+ control_(that.control_),
+ slots_(that.slots_),
+ size_(that.size_),
+ capacity_(that.capacity_),
+ growth_left_(that.growth_left_) {
+ that.control_ = EmptyGroup();
+ that.slots_ = nullptr;
+ that.size_ = 0;
+ that.capacity_ = 0;
+ that.growth_left_ = 0;
+ }
+ CommonFields& operator=(CommonFields&&) = default;
+
+ HashtablezInfoHandle& infoz() { return *this; }
+ const HashtablezInfoHandle& infoz() const { return *this; }
+
+ // TODO(b/259599413): Investigate removing some of these fields:
+ // - control/slots can be derived from each other
+ // - size can be moved into the slot array
+
+ // The control bytes (and, also, a pointer to the base of the backing array).
+ //
+ // This contains `capacity + 1 + NumClonedBytes()` entries, even
+ // when the table is empty (hence EmptyGroup).
+ ctrl_t* control_ = EmptyGroup();
+
+ // The beginning of the slots, located at `SlotOffset()` bytes after
+ // `control`. May be null for empty tables.
+ void* slots_ = nullptr;
+
+ // The number of filled slots.
+ size_t size_ = 0;
+
+ // The total number of available slots.
+ size_t capacity_ = 0;
+
+ // The number of slots we can still fill without needing to rehash.
+ size_t growth_left_ = 0;
+};
+
// Returns he number of "cloned control bytes".
//
// This is the number of control bytes that are present both at the beginning
@@ -797,15 +857,54 @@ size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
return 0;
}
-#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \
- ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg)
+#define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, operation) \
+ do { \
+ ABSL_HARDENING_ASSERT( \
+ (ctrl != nullptr) && operation \
+ " called on invalid iterator. The iterator might be an end() " \
+ "iterator or may have been default constructed."); \
+ ABSL_HARDENING_ASSERT( \
+ (IsFull(*ctrl)) && operation \
+ " called on invalid iterator. The element might have been erased or " \
+ "the table might have rehashed."); \
+ } while (0)
+
+// Note that for comparisons, null/end iterators are valid.
+inline void AssertIsValidForComparison(const ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid iterator comparison. The element might have "
+ "been erased or the table might have rehashed.");
+}
+
+// If the two iterators come from the same container, then their pointers will
+// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
+ const ctrl_t* ctrl_b,
+ const void* const& slot_a,
+ const void* const& slot_b) {
+ // If either control byte is null, then we can't tell.
+ if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
+ const void* low_slot = slot_a;
+ const void* hi_slot = slot_b;
+ if (ctrl_a > ctrl_b) {
+ std::swap(ctrl_a, ctrl_b);
+ std::swap(low_slot, hi_slot);
+ }
+ return ctrl_b < low_slot && low_slot <= hi_slot;
+}
-inline void AssertIsValid(ctrl_t* ctrl) {
+// Asserts that two iterators come from the same container.
+// Note: we take slots by reference so that it's not UB if they're uninitialized
+// as long as we don't read them (when ctrl is null).
+inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
+ const void* const& slot_a,
+ const void* const& slot_b) {
ABSL_HARDENING_ASSERT(
- (ctrl == nullptr || IsFull(*ctrl)) &&
- "Invalid operation on iterator. The element might have "
- "been erased, the table might have rehashed, or this may "
- "be an end() iterator.");
+ AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
+ "Invalid iterator comparison. The iterators may be from different "
+ "containers or the container might have rehashed.");
}
struct FindInfo {
@@ -827,9 +926,10 @@ struct FindInfo {
// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
-// Begins a probing operation on `ctrl`, using `hash`.
-inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
+// Begins a probing operation on `common.control`, using `hash`.
+inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
+ const ctrl_t* ctrl = common.control_;
+ const size_t capacity = common.capacity_;
return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
}
@@ -841,9 +941,9 @@ inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
// NOTE: this function must work with tables having both empty and deleted
// slots in the same group. Such tables appear during `erase()`.
template <typename = void>
-inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
- size_t capacity) {
- auto seq = probe(ctrl, hash, capacity);
+inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
+ auto seq = probe(common, hash);
+ const ctrl_t* ctrl = common.control_;
while (true) {
Group g{ctrl + seq.offset()};
auto mask = g.MaskEmptyOrDeleted();
@@ -853,55 +953,67 @@ inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ if (!is_small(common.capacity_) && ShouldInsertBackwards(hash, ctrl)) {
return {seq.offset(mask.HighestBitSet()), seq.index()};
}
#endif
return {seq.offset(mask.LowestBitSet()), seq.index()};
}
seq.next();
- assert(seq.index() <= capacity && "full table!");
+ assert(seq.index() <= common.capacity_ && "full table!");
}
}
// Extern template for inline function keep possibility of inlining.
// When compiler decided to not inline, no symbols will be added to the
// corresponding translation unit.
-extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+extern template FindInfo find_first_non_full(const CommonFields&, size_t);
+
+// Non-inlined version of find_first_non_full for use in less
+// performance critical routines.
+FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
+
+inline void ResetGrowthLeft(CommonFields& common) {
+ common.growth_left_ = CapacityToGrowth(common.capacity_) - common.size_;
+}
// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
// array as marked as empty.
-inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
- size_t slot_size) {
+inline void ResetCtrl(CommonFields& common, size_t slot_size) {
+ const size_t capacity = common.capacity_;
+ ctrl_t* ctrl = common.control_;
std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
capacity + 1 + NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
- SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
+ SanitizerPoisonMemoryRegion(common.slots_, slot_size * capacity);
+ ResetGrowthLeft(common);
}
// Sets `ctrl[i]` to `h`.
//
// Unlike setting it directly, this function will perform bounds checks and
// mirror the value to the cloned tail if necessary.
-inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
- const void* slot, size_t slot_size) {
+inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
+ size_t slot_size) {
+ const size_t capacity = common.capacity_;
assert(i < capacity);
- auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
+ auto* slot_i = static_cast<const char*>(common.slots_) + i * slot_size;
if (IsFull(h)) {
SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
} else {
SanitizerPoisonMemoryRegion(slot_i, slot_size);
}
+ ctrl_t* ctrl = common.control_;
ctrl[i] = h;
ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
-inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
- const void* slot, size_t slot_size) {
- SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
+inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
+ size_t slot_size) {
+ SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
}
// Given the capacity of a table, computes the offset (from the start of the
@@ -918,6 +1030,87 @@ inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
return SlotOffset(capacity, slot_align) + capacity * slot_size;
}
+template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
+ assert(c.capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or won't make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ const size_t sample_size =
+ (std::is_same<Alloc, std::allocator<char>>::value && c.slots_ == nullptr)
+ ? SizeOfSlot
+ : 0;
+
+ const size_t cap = c.capacity_;
+ char* mem = static_cast<char*>(
+ Allocate<AlignOfSlot>(&alloc, AllocSize(cap, SizeOfSlot, AlignOfSlot)));
+ c.control_ = reinterpret_cast<ctrl_t*>(mem);
+ c.slots_ = mem + SlotOffset(cap, AlignOfSlot);
+ ResetCtrl(c, SizeOfSlot);
+ if (sample_size) {
+ c.infoz() = Sample(sample_size);
+ }
+ c.infoz().RecordStorageChanged(c.size_, cap);
+}
+
+// PolicyFunctions bundles together some information for a particular
+// raw_hash_set<T, ...> instantiation. This information is passed to
+// type-erased functions that want to do small amounts of type-specific
+// work.
+struct PolicyFunctions {
+ size_t slot_size;
+
+ // Return the hash of the pointed-to slot.
+ size_t (*hash_slot)(void* set, void* slot);
+
+ // Transfer the contents of src_slot to dst_slot.
+ void (*transfer)(void* set, void* dst_slot, void* src_slot);
+
+ // Deallocate the specified backing store which is sized for n slots.
+ void (*dealloc)(void* set, const PolicyFunctions& policy, ctrl_t* ctrl,
+ void* slot_array, size_t n);
+};
+
+// ClearBackingArray clears the backing array, either modifying it in place,
+// or creating a new one based on the value of "reuse".
+// REQUIRES: c.capacity > 0
+void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
+ bool reuse);
+
+// Type-erased version of raw_hash_set::erase_meta_only.
+void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
+
+// Function to place in PolicyFunctions::dealloc for raw_hash_sets
+// that are using std::allocator. This allows us to share the same
+// function body for raw_hash_set instantiations that have the
+// same slot alignment.
+template <size_t AlignOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(void*,
+ const PolicyFunctions& policy,
+ ctrl_t* ctrl, void* slot_array,
+ size_t n) {
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_array, policy.slot_size * n);
+
+ std::allocator<char> alloc;
+ Deallocate<AlignOfSlot>(&alloc, ctrl,
+ AllocSize(n, policy.slot_size, AlignOfSlot));
+}
+
+// For trivially relocatable types we use memcpy directly. This allows us to
+// share the same function body for raw_hash_set instantiations that have the
+// same slot size as long as they are relocatable.
+template <size_t SizeOfSlot>
+ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
+ memcpy(dst, src, SizeOfSlot);
+}
+
+// Type-erased version of raw_hash_set::drop_deletes_without_resize.
+void DropDeletesWithoutResize(CommonFields& common,
+ const PolicyFunctions& policy, void* tmp_space);
+
// A SwissTable.
//
// Policy: a policy defines how to perform different operations on
@@ -1034,22 +1227,19 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
- "operator*() called on invalid iterator.");
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*()");
return PolicyTraits::element(slot_);
}
// PRECONDITION: not an end() iterator.
pointer operator->() const {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
- "operator-> called on invalid iterator.");
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator->");
return &operator*();
}
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_,
- "operator++ called on invalid iterator.");
+ ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++");
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -1063,8 +1253,9 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- AssertIsValid(a.ctrl_);
- AssertIsValid(b.ctrl_);
+ AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_);
+ AssertIsValidForComparison(a.ctrl_);
+ AssertIsValidForComparison(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -1081,7 +1272,7 @@ class raw_hash_set {
// Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
// they reach one.
//
- // If a sentinel is reached, we null both of them out instead.
+ // If a sentinel is reached, we null `ctrl_` out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
@@ -1109,9 +1300,9 @@ class raw_hash_set {
using pointer = typename raw_hash_set::const_pointer;
using difference_type = typename raw_hash_set::difference_type;
- const_iterator() {}
+ const_iterator() = default;
// Implicit construction from iterator.
- const_iterator(iterator i) : inner_(std::move(i)) {}
+ const_iterator(iterator i) : inner_(std::move(i)) {} // NOLINT
reference operator*() const { return *inner_; }
pointer operator->() const { return inner_.operator->(); }
@@ -1139,19 +1330,20 @@ class raw_hash_set {
using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
using insert_return_type = InsertReturnType<iterator, node_type>;
+ // Note: can't use `= default` due to non-default noexcept (causes
+ // problems for some compilers). NOLINTNEXTLINE
raw_hash_set() noexcept(
std::is_nothrow_default_constructible<hasher>::value&&
std::is_nothrow_default_constructible<key_equal>::value&&
std::is_nothrow_default_constructible<allocator_type>::value) {}
- explicit raw_hash_set(size_t bucket_count,
- const hasher& hash = hasher(),
- const key_equal& eq = key_equal(),
- const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()),
- settings_(0u, HashtablezInfoHandle(), hash, eq, alloc) {
+ ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
+ size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
- capacity_ = NormalizeCapacity(bucket_count);
+ common().capacity_ = NormalizeCapacity(bucket_count);
initialize_slots();
}
}
@@ -1258,47 +1450,29 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full(ctrl_, hash, capacity_);
- SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
- sizeof(slot_type));
+ auto target = find_first_non_full_outofline(common(), hash);
+ SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
emplace_at(target.offset, v);
infoz().RecordInsert(hash, target.probe_length);
}
- size_ = that.size();
+ common().size_ = that.size();
growth_left() -= that.size();
}
- raw_hash_set(raw_hash_set&& that) noexcept(
+ ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
std::is_nothrow_copy_constructible<hasher>::value&&
std::is_nothrow_copy_constructible<key_equal>::value&&
std::is_nothrow_copy_constructible<allocator_type>::value)
- : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
- slots_(absl::exchange(that.slots_, nullptr)),
- size_(absl::exchange(that.size_, size_t{0})),
- capacity_(absl::exchange(that.capacity_, size_t{0})),
- // Hash, equality and allocator are copied instead of moved because
- // `that` must be left valid. If Hash is std::function<Key>, moving it
- // would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.growth_left(), size_t{0}),
- absl::exchange(that.infoz(), HashtablezInfoHandle()),
- that.hash_ref(),
- that.eq_ref(),
- that.alloc_ref()) {}
+ : // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
+ settings_(absl::exchange(that.common(), CommonFields{}),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
- : ctrl_(EmptyGroup()),
- slots_(nullptr),
- size_(0),
- capacity_(0),
- settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
- a) {
+ : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
if (a == that.alloc_ref()) {
- std::swap(ctrl_, that.ctrl_);
- std::swap(slots_, that.slots_);
- std::swap(size_, that.size_);
- std::swap(capacity_, that.capacity_);
- std::swap(growth_left(), that.growth_left());
- std::swap(infoz(), that.infoz());
+ std::swap(common(), that.common());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -1322,12 +1496,25 @@ class raw_hash_set {
std::is_nothrow_move_assignable<key_equal>::value) {
// TODO(sbenza): We should only use the operations from the noexcept clause
// to make sure we actually adhere to that contract.
+ // NOLINTNEXTLINE: not returning *this for performance.
return move_assign(
std::move(that),
typename AllocTraits::propagate_on_container_move_assignment());
}
- ~raw_hash_set() { destroy_slots(); }
+ ~raw_hash_set() {
+ const size_t cap = capacity();
+ if (!cap) return;
+ destroy_slots();
+
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), control(),
+ AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
+
+ infoz().Unregister();
+ }
iterator begin() {
auto it = iterator_at(0);
@@ -1344,8 +1531,8 @@ class raw_hash_set {
const_iterator cend() const { return end(); }
bool empty() const { return !size(); }
- size_t size() const { return size_; }
- size_t capacity() const { return capacity_; }
+ size_t size() const { return common().size_; }
+ size_t capacity() const { return common().capacity_; }
size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
ABSL_ATTRIBUTE_REINITIALIZES void clear() {
@@ -1356,22 +1543,25 @@ class raw_hash_set {
// compared to destruction of the elements of the container. So we pick the
// largest bucket_count() threshold for which iteration is still fast and
// past that we simply deallocate the array.
- if (capacity_ > 127) {
+ const size_t cap = capacity();
+ if (cap == 0) {
+ // Already guaranteed to be empty; so nothing to do.
+ } else {
destroy_slots();
+ ClearBackingArray(common(), GetPolicyFunctions(),
+ /*reuse=*/cap < 128);
+ }
+ }
- infoz().RecordClearedReservation();
- } else if (capacity_) {
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
+ inline void destroy_slots() {
+ const size_t cap = capacity();
+ const ctrl_t* ctrl = control();
+ slot_type* slot = slot_array();
+ for (size_t i = 0; i != cap; ++i) {
+ if (IsFull(ctrl[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slot + i);
}
- size_ = 0;
- ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
- reset_growth_left();
}
- assert(empty());
- infoz().RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1559,7 +1749,7 @@ class raw_hash_set {
iterator lazy_emplace(const key_arg<K>& key, F&& f) {
auto res = find_or_prepare_insert(key);
if (res.second) {
- slot_type* slot = slots_ + res.first;
+ slot_type* slot = slot_array() + res.first;
std::forward<F>(f)(constructor(&alloc_ref(), &slot));
assert(!slot);
}
@@ -1601,8 +1791,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_,
- "erase() called on invalid iterator.");
+ ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase()");
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1636,8 +1825,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
- "extract() called on invalid iterator.");
+ ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract()");
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1657,24 +1845,18 @@ class raw_hash_set {
IsNoThrowSwappable<allocator_type>(
typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
- swap(ctrl_, that.ctrl_);
- swap(slots_, that.slots_);
- swap(size_, that.size_);
- swap(capacity_, that.capacity_);
- swap(growth_left(), that.growth_left());
+ swap(common(), that.common());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- swap(infoz(), that.infoz());
SwapAlloc(alloc_ref(), that.alloc_ref(),
typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
- if (n == 0 && capacity_ == 0) return;
- if (n == 0 && size_ == 0) {
- destroy_slots();
- infoz().RecordStorageChanged(0, 0);
- infoz().RecordClearedReservation();
+ if (n == 0 && capacity() == 0) return;
+ if (n == 0 && size() == 0) {
+ ClearBackingArray(common(), GetPolicyFunctions(),
+ /*reuse=*/false);
return;
}
@@ -1682,7 +1864,7 @@ class raw_hash_set {
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
- if (n == 0 || m > capacity_) {
+ if (n == 0 || m > capacity()) {
resize(m);
// This is after resize, to ensure that we have completed the allocation
@@ -1727,9 +1909,9 @@ class raw_hash_set {
// Avoid probing if we won't be able to prefetch the addresses received.
#ifdef ABSL_INTERNAL_HAVE_PREFETCH
prefetch_heap_block();
- auto seq = probe(ctrl_, hash_ref()(key), capacity_);
- base_internal::PrefetchT0(ctrl_ + seq.offset());
- base_internal::PrefetchT0(slots_ + seq.offset());
+ auto seq = probe(common(), hash_ref()(key));
+ base_internal::PrefetchT0(control() + seq.offset());
+ base_internal::PrefetchT0(slot_array() + seq.offset());
#endif // ABSL_INTERNAL_HAVE_PREFETCH
}
@@ -1742,18 +1924,20 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(common(), hash);
+ slot_type* slot_ptr = slot_array();
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
+ Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
+ PolicyTraits::element(slot_ptr + seq.offset(i)))))
return iterator_at(seq.offset(i));
}
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
}
template <class K = key_type>
@@ -1791,9 +1975,9 @@ class raw_hash_set {
return {it, it};
}
- size_t bucket_count() const { return capacity_; }
+ size_t bucket_count() const { return capacity(); }
float load_factor() const {
- return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
}
float max_load_factor() const { return 1.0f; }
void max_load_factor(float) {
@@ -1880,7 +2064,8 @@ class raw_hash_set {
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
- PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
+ &slot);
} else if (do_destroy) {
PolicyTraits::destroy(&s.alloc_ref(), &slot);
}
@@ -1896,102 +2081,43 @@ class raw_hash_set {
// This merely updates the pertinent control byte. This can be used in
// conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
- assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
- --size_;
- const size_t index = static_cast<size_t>(it.inner_.ctrl_ - ctrl_);
- const size_t index_before = (index - Group::kWidth) & capacity_;
- const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty();
- const auto empty_before = Group(ctrl_ + index_before).MaskEmpty();
-
- // We count how many consecutive non empties we have to the right and to the
- // left of `it`. If the sum is >= kWidth then there is at least one probe
- // window that might have seen a full group.
- bool was_never_full =
- empty_before && empty_after &&
- static_cast<size_t>(empty_after.TrailingZeros() +
- empty_before.LeadingZeros()) < Group::kWidth;
-
- SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
- capacity_, ctrl_, slots_, sizeof(slot_type));
- growth_left() += was_never_full;
- infoz().RecordErase();
+ EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
}
// Allocates a backing array for `self` and initializes its control bytes.
- // This reads `capacity_` and updates all other fields based on the result of
+ // This reads `capacity` and updates all other fields based on the result of
// the allocation.
//
- // This does not free the currently held array; `capacity_` must be nonzero.
- void initialize_slots() {
- assert(capacity_);
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or wont make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- //
+ // This does not free the currently held array; `capacity` must be nonzero.
+ inline void initialize_slots() {
// People are often sloppy with the exact type of their allocator (sometimes
// it has an extra const or is missing the pair, but rebinds made it work
- // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
- // bound more carefully.
- if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
- slots_ == nullptr) {
- infoz() = Sample(sizeof(slot_type));
- }
-
- char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
- &alloc_ref(),
- AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
- ctrl_ = reinterpret_cast<ctrl_t*>(mem);
- slots_ = reinterpret_cast<slot_type*>(
- mem + SlotOffset(capacity_, alignof(slot_type)));
- ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
- reset_growth_left();
- infoz().RecordStorageChanged(size_, capacity_);
- }
-
- // Destroys all slots in the backing array, frees the backing array, and
- // clears all top-level book-keeping data.
- //
- // This essentially implements `map = raw_hash_set();`.
- void destroy_slots() {
- if (!capacity_) return;
- for (size_t i = 0; i != capacity_; ++i) {
- if (IsFull(ctrl_[i])) {
- PolicyTraits::destroy(&alloc_ref(), slots_ + i);
- }
- }
-
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- Deallocate<alignof(slot_type)>(
- &alloc_ref(), ctrl_,
- AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
- ctrl_ = EmptyGroup();
- slots_ = nullptr;
- size_ = 0;
- capacity_ = 0;
- growth_left() = 0;
+ // anyway).
+ using CharAlloc =
+ typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
+ InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
+ common(), CharAlloc(alloc_ref()));
}
- void resize(size_t new_capacity) {
+ ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
assert(IsValidCapacity(new_capacity));
- auto* old_ctrl = ctrl_;
- auto* old_slots = slots_;
- const size_t old_capacity = capacity_;
- capacity_ = new_capacity;
+ auto* old_ctrl = control();
+ auto* old_slots = slot_array();
+ const size_t old_capacity = common().capacity_;
+ common().capacity_ = new_capacity;
initialize_slots();
+ auto* new_slots = slot_array();
size_t total_probe_length = 0;
for (size_t i = 0; i != old_capacity; ++i) {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(common(), hash);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
+ PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
}
}
if (old_capacity) {
@@ -2007,70 +2133,10 @@ class raw_hash_set {
// Prunes control bytes to remove as many tombstones as possible.
//
// See the comment on `rehash_and_grow_if_necessary()`.
- void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
- assert(IsValidCapacity(capacity_));
- assert(!is_small(capacity_));
- // Algorithm:
- // - mark all DELETED slots as EMPTY
- // - mark all FULL slots as DELETED
- // - for each slot marked as DELETED
- // hash = Hash(element)
- // target = find_first_non_full(hash)
- // if target is in the same group
- // mark slot as FULL
- // else if target is EMPTY
- // transfer element to target
- // mark slot as EMPTY
- // mark target as FULL
- // else if target is DELETED
- // swap current element with target element
- // mark target as FULL
- // repeat procedure for current slot with moved from element (target)
- ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
- alignas(slot_type) unsigned char raw[sizeof(slot_type)];
- size_t total_probe_length = 0;
- slot_type* slot = reinterpret_cast<slot_type*>(&raw);
- for (size_t i = 0; i != capacity_; ++i) {
- if (!IsDeleted(ctrl_[i])) continue;
- const size_t hash = PolicyTraits::apply(
- HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
- const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
- const size_t new_i = target.offset;
- total_probe_length += target.probe_length;
-
- // Verify if the old and new i fall within the same group wrt the hash.
- // If they do, we don't need to move the object as it falls already in the
- // best probe we can.
- const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
- const auto probe_index = [probe_offset, this](size_t pos) {
- return ((pos - probe_offset) & capacity_) / Group::kWidth;
- };
-
- // Element doesn't move.
- if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- continue;
- }
- if (IsEmpty(ctrl_[new_i])) {
- // Transfer element to the empty spot.
- // SetCtrl poisons/unpoisons the slots so we have to call it at the
- // right time.
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
- SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
- } else {
- assert(IsDeleted(ctrl_[new_i]));
- SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
- // Until we are done rehashing, DELETED marks previously FULL slots.
- // Swap i and new_i elements.
- PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
- PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
- --i; // repeat
- }
- }
- reset_growth_left();
- infoz().RecordRehash(total_probe_length);
+ inline void drop_deletes_without_resize() {
+ // Stack-allocate space for swapping elements.
+ alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
+ DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
}
// Called whenever the table *might* need to conditionally grow.
@@ -2079,14 +2145,13 @@ class raw_hash_set {
// growth is unnecessary, because vacating tombstones is beneficial for
// performance in the long-run.
void rehash_and_grow_if_necessary() {
- if (capacity_ == 0) {
- resize(1);
- } else if (capacity_ > Group::kWidth &&
- // Do these calcuations in 64-bit to avoid overflow.
- size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
+ const size_t cap = capacity();
+ if (cap > Group::kWidth &&
+ // Do these calcuations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= cap* uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
//
- // Rehash in place if the current size is <= 25/32 of capacity_.
+ // Rehash in place if the current size is <= 25/32 of capacity.
// Rationale for such a high factor: 1) drop_deletes_without_resize() is
// faster than resize, and 2) it takes quite a bit of work to add
// tombstones. In the worst case, seems to take approximately 4
@@ -2104,8 +2169,8 @@ class raw_hash_set {
//
// Here is output of an experiment using the BM_CacheInSteadyState
// benchmark running the old case (where we rehash-in-place only if we can
- // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
- // if we can recover 3/32*capacity_).
+ // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity).
//
// Note that although in the worst-case number of rehashes jumped up from
// 15 to 190, but the number of operations per second is almost the same.
@@ -2128,23 +2193,24 @@ class raw_hash_set {
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
- resize(capacity_ * 2 + 1);
+ resize(cap * 2 + 1);
}
}
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(common(), hash);
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
+ Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
- elem))
+ if (ABSL_PREDICT_TRUE(
+ PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
return true;
}
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
return false;
}
@@ -2169,18 +2235,19 @@ class raw_hash_set {
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
prefetch_heap_block();
auto hash = hash_ref()(key);
- auto seq = probe(ctrl_, hash, capacity_);
+ auto seq = probe(common(), hash);
+ const ctrl_t* ctrl = control();
while (true) {
- Group g{ctrl_ + seq.offset()};
+ Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(H2(hash))) {
if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
EqualElement<K>{key, eq_ref()},
- PolicyTraits::element(slots_ + seq.offset(i)))))
+ PolicyTraits::element(slot_array() + seq.offset(i)))))
return {seq.offset(i), false};
}
if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
seq.next();
- assert(seq.index() <= capacity_ && "full table!");
+ assert(seq.index() <= capacity() && "full table!");
}
return {prepare_insert(hash), true};
}
@@ -2190,16 +2257,15 @@ class raw_hash_set {
//
// REQUIRES: At least one non-full slot available.
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- auto target = find_first_non_full(ctrl_, hash, capacity_);
+ auto target = find_first_non_full(common(), hash);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
- !IsDeleted(ctrl_[target.offset]))) {
+ !IsDeleted(control()[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(ctrl_, hash, capacity_);
+ target = find_first_non_full(common(), hash);
}
- ++size_;
- growth_left() -= IsEmpty(ctrl_[target.offset]);
- SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
- sizeof(slot_type));
+ ++common().size_;
+ growth_left() -= IsEmpty(control()[target.offset]);
+ SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -2214,7 +2280,7 @@ class raw_hash_set {
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
template <class... Args>
void emplace_at(size_t i, Args&&... args) {
- PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ PolicyTraits::construct(&alloc_ref(), slot_array() + i,
std::forward<Args>(args)...);
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
@@ -2222,16 +2288,14 @@ class raw_hash_set {
"constructed value does not match the lookup key");
}
- iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
- const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+ iterator iterator_at(size_t i) { return {control() + i, slot_array() + i}; }
+ const_iterator iterator_at(size_t i) const {
+ return {control() + i, slot_array() + i};
+ }
private:
friend struct RawHashSetTestOnlyAccess;
- void reset_growth_left() {
- growth_left() = CapacityToGrowth(capacity()) - size_;
- }
-
// The number of slots we can still fill without needing to rehash.
//
// This is stored separately due to tombstones: we do not include tombstones
@@ -2242,49 +2306,76 @@ class raw_hash_set {
// side-effect.
//
// See `CapacityToGrowth()`.
- size_t& growth_left() { return settings_.template get<0>(); }
+ size_t& growth_left() { return common().growth_left_; }
// Prefetch the heap-allocated memory region to resolve potential TLB misses.
// This is intended to overlap with execution of calculating the hash for a
// key.
- void prefetch_heap_block() const {
- base_internal::PrefetchT2(ctrl_);
- }
+ void prefetch_heap_block() const { base_internal::PrefetchT2(control()); }
+
+ CommonFields& common() { return settings_.template get<0>(); }
+ const CommonFields& common() const { return settings_.template get<0>(); }
- HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
+ ctrl_t* control() const { return common().control_; }
+ slot_type* slot_array() const {
+ return static_cast<slot_type*>(common().slots_);
+ }
+ HashtablezInfoHandle& infoz() { return common().infoz(); }
- hasher& hash_ref() { return settings_.template get<2>(); }
- const hasher& hash_ref() const { return settings_.template get<2>(); }
- key_equal& eq_ref() { return settings_.template get<3>(); }
- const key_equal& eq_ref() const { return settings_.template get<3>(); }
- allocator_type& alloc_ref() { return settings_.template get<4>(); }
+ hasher& hash_ref() { return settings_.template get<1>(); }
+ const hasher& hash_ref() const { return settings_.template get<1>(); }
+ key_equal& eq_ref() { return settings_.template get<2>(); }
+ const key_equal& eq_ref() const { return settings_.template get<2>(); }
+ allocator_type& alloc_ref() { return settings_.template get<3>(); }
const allocator_type& alloc_ref() const {
- return settings_.template get<4>();
+ return settings_.template get<3>();
}
- // TODO(alkis): Investigate removing some of these fields:
- // - ctrl/slots can be derived from each other
- // - size can be moved into the slot array
+ // Make type-specific functions for this type's PolicyFunctions struct.
+ static size_t hash_slot_fn(void* set, void* slot) {
+ auto* h = static_cast<raw_hash_set*>(set);
+ return PolicyTraits::apply(
+ HashElement{h->hash_ref()},
+ PolicyTraits::element(static_cast<slot_type*>(slot)));
+ }
+ static void transfer_slot_fn(void* set, void* dst, void* src) {
+ auto* h = static_cast<raw_hash_set*>(set);
+ PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
+ static_cast<slot_type*>(src));
+ }
+ // Note: dealloc_fn will only be used if we have a non-standard allocator.
+ static void dealloc_fn(void* set, const PolicyFunctions&, ctrl_t* ctrl,
+ void* slot_mem, size_t n) {
+ auto* h = static_cast<raw_hash_set*>(set);
- // The control bytes (and, also, a pointer to the base of the backing array).
- //
- // This contains `capacity_ + 1 + NumClonedBytes()` entries, even
- // when the table is empty (hence EmptyGroup).
- ctrl_t* ctrl_ = EmptyGroup();
- // The beginning of the slots, located at `SlotOffset()` bytes after
- // `ctrl_`. May be null for empty tables.
- slot_type* slots_ = nullptr;
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_mem, sizeof(slot_type) * n);
- // The number of filled slots.
- size_t size_ = 0;
+ Deallocate<alignof(slot_type)>(
+ &h->alloc_ref(), ctrl,
+ AllocSize(n, sizeof(slot_type), alignof(slot_type)));
+ }
+
+ static const PolicyFunctions& GetPolicyFunctions() {
+ static constexpr PolicyFunctions value = {
+ sizeof(slot_type),
+ &raw_hash_set::hash_slot_fn,
+ PolicyTraits::transfer_uses_memcpy()
+ ? TransferRelocatable<sizeof(slot_type)>
+ : &raw_hash_set::transfer_slot_fn,
+ (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
+ ? &DeallocateStandard<alignof(slot_type)>
+ : &raw_hash_set::dealloc_fn),
+ };
+ return value;
+ }
- // The total number of available slots.
- size_t capacity_ = 0;
- absl::container_internal::CompressedTuple<size_t /* growth_left */,
- HashtablezInfoHandle, hasher,
- key_equal, allocator_type>
- settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{},
- allocator_type{}};
+ // Bundle together CommonFields plus other objects which might be empty.
+ // CompressedTuple will ensure that sizeof is not affected by any of the empty
+ // fields that occur after CommonFields.
+ absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
+ allocator_type>
+ settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
@@ -2312,14 +2403,15 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = probe(set.ctrl_, hash, set.capacity_);
+ auto seq = probe(set.common(), hash);
+ const ctrl_t* ctrl = set.control();
while (true) {
- container_internal::Group g{set.ctrl_ + seq.offset()};
+ container_internal::Group g{ctrl + seq.offset()};
for (uint32_t i : g.Match(container_internal::H2(hash))) {
if (Traits::apply(
typename Set::template EqualElement<typename Set::key_type>{
key, set.eq_ref()},
- Traits::element(set.slots_ + seq.offset(i))))
+ Traits::element(set.slot_array() + seq.offset(i))))
return num_probes;
++num_probes;
}
@@ -2330,7 +2422,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
}
static size_t AllocatedByteSize(const Set& c) {
- size_t capacity = c.capacity_;
+ size_t capacity = c.capacity();
if (capacity == 0) return 0;
size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
@@ -2338,9 +2430,10 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
if (per_slot != ~size_t{}) {
m += per_slot * c.size();
} else {
+ const ctrl_t* ctrl = c.control();
for (size_t i = 0; i != capacity; ++i) {
- if (container_internal::IsFull(c.ctrl_[i])) {
- m += Traits::space_used(c.slots_ + i);
+ if (container_internal::IsFull(ctrl[i])) {
+ m += Traits::space_used(c.slot_array() + i);
}
}
}
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index e17ba9b4..15deddcf 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -477,6 +477,24 @@ void BM_DropDeletes(benchmark::State& state) {
}
BENCHMARK(BM_DropDeletes);
+void BM_Resize(benchmark::State& state) {
+ // For now just measure a small cheap hash table since we
+ // are mostly interested in the overhead of type-erasure
+ // in resize().
+ constexpr int kElements = 64;
+ const int kCapacity = kElements * 2;
+
+ IntTable table;
+ for (int i = 0; i < kElements; i++) {
+ table.insert(i);
+ }
+ for (auto unused : state) {
+ table.rehash(0);
+ table.rehash(kCapacity);
+ }
+}
+BENCHMARK(BM_Resize);
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index eec9da43..eb0757b2 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -56,8 +56,8 @@ namespace container_internal {
struct RawHashSetTestOnlyAccess {
template <typename C>
- static auto GetSlots(const C& c) -> decltype(c.slots_) {
- return c.slots_;
+ static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
+ return c.slot_array();
}
};
@@ -399,7 +399,7 @@ struct StringEq : std::equal_to<absl::string_view> {
struct StringTable
: raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
using Base = typename StringTable::raw_hash_set;
- StringTable() {}
+ StringTable() = default;
using Base::Base;
};
@@ -419,7 +419,7 @@ struct Uint8Table
template <typename T>
struct CustomAlloc : std::allocator<T> {
- CustomAlloc() {}
+ CustomAlloc() = default;
template <typename U>
explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
@@ -446,7 +446,7 @@ struct BadFastHash {
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
- BadTable() {}
+ BadTable() = default;
using Base::Base;
};
@@ -455,12 +455,12 @@ TEST(Table, EmptyFunctorOptimization) {
static_assert(std::is_empty<std::allocator<int>>::value, "");
struct MockTable {
+ void* infoz;
void* ctrl;
void* slots;
size_t size;
size_t capacity;
size_t growth_left;
- void* infoz;
};
struct MockTableInfozDisabled {
void* ctrl;
@@ -1003,7 +1003,7 @@ TEST(Table, ClearBug) {
// We are checking that original and second are close enough to each other
// that they are probably still in the same group. This is not strictly
// guaranteed.
- EXPECT_LT(std::abs(original - second),
+ EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
capacity * sizeof(IntTable::value_type));
}
@@ -1080,19 +1080,6 @@ struct ProbeStats {
// Ratios total_probe_length/size for every tested table.
std::vector<double> single_table_ratios;
- friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
- ProbeStats res = a;
- res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
- b.all_probes_histogram.size()));
- std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
- res.all_probes_histogram.begin(),
- res.all_probes_histogram.begin(), std::plus<size_t>());
- res.single_table_ratios.insert(res.single_table_ratios.end(),
- b.single_table_ratios.begin(),
- b.single_table_ratios.end());
- return res;
- }
-
// Average ratio total_probe_length/size over tables.
double AvgRatio() const {
return std::accumulate(single_table_ratios.begin(),
@@ -1555,7 +1542,7 @@ TEST(Table, CopyConstructWithAlloc) {
struct ExplicitAllocIntTable
: raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
std::equal_to<int64_t>, Alloc<int64_t>> {
- ExplicitAllocIntTable() {}
+ ExplicitAllocIntTable() = default;
};
TEST(Table, AllocWithExplicitCtor) {
@@ -1943,7 +1930,7 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(res.inserted);
EXPECT_THAT(*res.position, Pair(k0, ""));
EXPECT_TRUE(res.node);
- EXPECT_FALSE(node);
+ EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
}
TEST(Nodes, HintInsert) {
@@ -1953,7 +1940,7 @@ TEST(Nodes, HintInsert) {
auto it = t.insert(t.begin(), std::move(node));
EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
EXPECT_EQ(*it, 1);
- EXPECT_FALSE(node);
+ EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
node = t.extract(2);
EXPECT_THAT(t, UnorderedElementsAre(1, 3));
@@ -1963,7 +1950,7 @@ TEST(Nodes, HintInsert) {
it = t.insert(t.begin(), std::move(node));
EXPECT_EQ(*it, 2);
// The node was not emptied by the insert call.
- EXPECT_TRUE(node);
+ EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
}
IntTable MakeSimpleTable(size_t size) {
@@ -2036,20 +2023,75 @@ TEST(Table, UnstablePointers) {
EXPECT_NE(old_ptr, addr(0));
}
-// Confirm that we assert if we try to erase() end().
-TEST(TableDeathTest, EraseOfEndAsserts) {
+bool IsAssertEnabled() {
// Use an assert with side-effects to figure out if they are actually enabled.
bool assert_enabled = false;
assert([&]() { // NOLINT
assert_enabled = true;
return true;
}());
- if (!assert_enabled) return;
+ return assert_enabled;
+}
+
+TEST(TableDeathTest, InvalidIteratorAsserts) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "erase.. called on invalid iterator";
- EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
+ EXPECT_DEATH_IF_SUPPORTED(
+ t.erase(t.end()),
+ "erase.* called on invalid iterator. The iterator might be an "
+ "end.*iterator or may have been default constructed.");
+ typename IntTable::iterator iter;
+ EXPECT_DEATH_IF_SUPPORTED(
+ ++iter,
+ "operator.* called on invalid iterator. The iterator might be an "
+ "end.*iterator or may have been default constructed.");
+ t.insert(0);
+ iter = t.begin();
+ t.erase(iter);
+ EXPECT_DEATH_IF_SUPPORTED(
+ ++iter,
+ "operator.* called on invalid iterator. The element might have been "
+ "erased or .*the table might have rehashed.");
+}
+
+TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
+ if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+
+ IntTable t;
+ t.insert(1);
+ t.insert(2);
+ t.insert(3);
+ auto iter1 = t.begin();
+ auto iter2 = std::next(iter1);
+ ASSERT_NE(iter1, t.end());
+ ASSERT_NE(iter2, t.end());
+ t.erase(iter1);
+ // Extra simple "regexp" as regexp support is highly varied across platforms.
+ const char* const kErasedDeathMessage =
+ "Invalid iterator comparison. The element might have .*been erased or "
+ "the table might have rehashed.";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
+ EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
+ t.erase(iter2);
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
+
+ IntTable t1, t2;
+ t1.insert(0);
+ t2.insert(0);
+ iter1 = t1.begin();
+ iter2 = t2.begin();
+ const char* const kContainerDiffDeathMessage =
+ "Invalid iterator comparison. The iterators may be from different "
+ ".*containers or the container might have rehashed.";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
+ EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
+
+ for (int i = 0; i < 10; ++i) t1.insert(i);
+ // There should have been a rehash in t1.
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
+ kContainerDiffDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
diff --git a/absl/container/node_hash_map_test.cc b/absl/container/node_hash_map_test.cc
index e941a836..9bcf470c 100644
--- a/absl/container/node_hash_map_test.cc
+++ b/absl/container/node_hash_map_test.cc
@@ -272,6 +272,14 @@ TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
}
#endif
+TEST(NodeHashMap, RecursiveTypeCompiles) {
+ struct RecursiveType {
+ node_hash_map<int, RecursiveType> m;
+ };
+ RecursiveType t;
+ t.m[0] = RecursiveType{};
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END