aboutsummaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
authorEvan Brown <ezb@google.com>2024-03-06 10:00:52 -0800
committerCopybara-Service <copybara-worker@google.com>2024-03-06 10:01:43 -0800
commit1449c9a106b090f61441ba245c781d7d2f89000c (patch)
tree94d6ec1a8980dfa6605f9b0e50e549e3e5761f0b /absl/container/internal/raw_hash_set_test.cc
parent6bf3c73fdfeb62733d2a0f81b9846ff77f3a3b9f (diff)
downloadabseil-1449c9a106b090f61441ba245c781d7d2f89000c.tar.gz
abseil-1449c9a106b090f61441ba245c781d7d2f89000c.tar.bz2
abseil-1449c9a106b090f61441ba245c781d7d2f89000c.zip
Implement small object optimization in swisstable - disabled for now.
Details: - We use the space for control/slots pointers as the inline buffer. - We use a max inline capacity of 1 to make the implementation much simpler and to avoid having to randomize the iteration order for inline tables. - For iteration of inline tables, we introduce the kSooControl buffer which just has 1 full control byte followed by 1 sentinel control byte so that incrementing yields an end() iterator. We don't access kSooControl during lookups - only iteration. PiperOrigin-RevId: 613253492 Change-Id: Id98ff11842f8bef27ac7ed88138dc03b46ce4fa6
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc450
1 files changed, 324 insertions, 126 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 109340c7..9180db59 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -394,7 +394,7 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
}
}
-template <class T, bool kTransferable = false>
+template <class T, bool kTransferable = false, bool kSoo = false>
struct ValuePolicy {
using slot_type = T;
using key_type = T;
@@ -433,6 +433,8 @@ struct ValuePolicy {
static constexpr HashSlotFn get_hash_slot_fn() {
return nullptr;
}
+
+ static constexpr bool soo_enabled() { return kSoo; }
};
using IntPolicy = ValuePolicy<int64_t>;
@@ -440,6 +442,44 @@ using Uint8Policy = ValuePolicy<uint8_t>;
using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
+// For testing SOO.
+template <int N>
+class SizedValue {
+ public:
+ SizedValue(int64_t v) { // NOLINT
+ vals_[0] = v;
+ }
+ SizedValue() : SizedValue(0) {}
+ SizedValue(const SizedValue&) = default;
+ SizedValue& operator=(const SizedValue&) = default;
+
+ int64_t operator*() const {
+ // Suppress erroneous uninitialized memory errors on GCC.
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+ return vals_[0];
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+ }
+ explicit operator int() const { return **this; }
+ explicit operator int64_t() const { return **this; }
+
+ template <typename H>
+ friend H AbslHashValue(H h, SizedValue sv) {
+ return H::combine(std::move(h), *sv);
+ }
+ bool operator==(const SizedValue& rhs) const { return **this == *rhs; }
+
+ private:
+ int64_t vals_[N / sizeof(int64_t)];
+};
+template <int N, bool kSoo>
+using SizedValuePolicy =
+ ValuePolicy<SizedValue<N>, /*kTransferable=*/true, kSoo>;
+
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@@ -517,9 +557,9 @@ struct StringTable
using Base::Base;
};
-template <typename T, bool kTransferable = false>
+template <typename T, bool kTransferable = false, bool kSoo = false>
struct ValueTable
- : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
+ : raw_hash_set<ValuePolicy<T, kTransferable, kSoo>, hash_default_hash<T>,
std::equal_to<T>, std::allocator<T>> {
using Base = typename ValueTable::raw_hash_set;
using Base::Base;
@@ -530,6 +570,11 @@ using Uint8Table = ValueTable<uint8_t>;
using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
+constexpr size_t kNonSooSize = sizeof(HeapOrSoo) + 8;
+static_assert(sizeof(SizedValue<kNonSooSize>) >= kNonSooSize, "too small");
+using NonSooIntTable = ValueTable<SizedValue<kNonSooSize>>;
+using SooIntTable = ValueTable<int64_t, /*kTransferable=*/true, /*kSoo=*/true>;
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() = default;
@@ -579,6 +624,16 @@ struct FreezableAlloc : std::allocator<T> {
bool* frozen;
};
+template <int N>
+struct FreezableSizedValueSooTable
+ : raw_hash_set<SizedValuePolicy<N, /*kSoo=*/true>,
+ container_internal::hash_default_hash<SizedValue<N>>,
+ std::equal_to<SizedValue<N>>,
+ FreezableAlloc<SizedValue<N>>> {
+ using Base = typename FreezableSizedValueSooTable::raw_hash_set;
+ using Base::Base;
+};
+
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
@@ -649,20 +704,25 @@ TEST(Table, EmptyFunctorOptimization) {
std::equal_to<absl::string_view>, std::allocator<int>>));
}
-TEST(Table, Empty) {
- IntTable t;
+template <class TableType>
+class SooTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(SooTest);
+
+TYPED_TEST_P(SooTest, Empty) {
+ TypeParam t;
EXPECT_EQ(0, t.size());
EXPECT_TRUE(t.empty());
}
-TEST(Table, LookupEmpty) {
- IntTable t;
+TYPED_TEST_P(SooTest, LookupEmpty) {
+ TypeParam t;
auto it = t.find(0);
EXPECT_TRUE(it == t.end());
}
-TEST(Table, Insert1) {
- IntTable t;
+TYPED_TEST_P(SooTest, Insert1) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
@@ -671,8 +731,8 @@ TEST(Table, Insert1) {
EXPECT_THAT(*t.find(0), 0);
}
-TEST(Table, Insert2) {
- IntTable t;
+TYPED_TEST_P(SooTest, Insert2) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
@@ -734,9 +794,9 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
-TEST(Table, EraseInSmallTables) {
+TYPED_TEST_P(SooTest, EraseInSmallTables) {
for (int64_t size = 0; size < 64; ++size) {
- IntTable t;
+ TypeParam t;
for (int64_t i = 0; i < size; ++i) {
t.insert(i);
}
@@ -751,8 +811,8 @@ TEST(Table, EraseInSmallTables) {
}
}
-TEST(Table, InsertWithinCapacity) {
- IntTable t;
+TYPED_TEST_P(SooTest, InsertWithinCapacity) {
+ TypeParam t;
t.reserve(10);
const size_t original_capacity = t.capacity();
const auto addr = [&](int i) {
@@ -841,7 +901,8 @@ TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
ResizeGrowSmallTables, ResizeReduceSmallTables);
-using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+using SmallTableTypes =
+ ::testing::Types<IntTable, TransferableIntTable, SooIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
SmallTableResizeTest, SmallTableTypes);
@@ -863,14 +924,14 @@ TEST(Table, LazyEmplace) {
EXPECT_THAT(*it, Pair("abc", "ABC"));
}
-TEST(Table, ContainsEmpty) {
- IntTable t;
+TYPED_TEST_P(SooTest, ContainsEmpty) {
+ TypeParam t;
EXPECT_FALSE(t.contains(0));
}
-TEST(Table, Contains1) {
- IntTable t;
+TYPED_TEST_P(SooTest, Contains1) {
+ TypeParam t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
@@ -880,8 +941,8 @@ TEST(Table, Contains1) {
EXPECT_FALSE(t.contains(0));
}
-TEST(Table, Contains2) {
- IntTable t;
+TYPED_TEST_P(SooTest, Contains2) {
+ TypeParam t;
EXPECT_TRUE(t.insert(0).second);
EXPECT_TRUE(t.contains(0));
@@ -1178,8 +1239,8 @@ TEST(Table, RehashWithNoResize) {
}
}
-TEST(Table, InsertEraseStressTest) {
- IntTable t;
+TYPED_TEST_P(SooTest, InsertEraseStressTest) {
+ TypeParam t;
const size_t kMinElementCount = 250;
std::deque<int> keys;
size_t i = 0;
@@ -1207,32 +1268,33 @@ TEST(Table, InsertOverloads) {
Pair("DEF", "!!!")));
}
-TEST(Table, LargeTable) {
- IntTable t;
+TYPED_TEST_P(SooTest, LargeTable) {
+ TypeParam t;
for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
- for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
+ for (int64_t i = 0; i != 100000; ++i)
+ ASSERT_EQ(i << 40, static_cast<int64_t>(*t.find(i << 40)));
}
// Timeout if copy is quadratic as it was in Rust.
-TEST(Table, EnsureNonQuadraticAsInRust) {
+TYPED_TEST_P(SooTest, EnsureNonQuadraticAsInRust) {
static const size_t kLargeSize = 1 << 15;
- IntTable t;
+ TypeParam t;
for (size_t i = 0; i != kLargeSize; ++i) {
t.insert(i);
}
// If this is quadratic, the test will timeout.
- IntTable t2;
+ TypeParam t2;
for (const auto& entry : t) t2.insert(entry);
}
-TEST(Table, ClearBug) {
+TYPED_TEST_P(SooTest, ClearBug) {
if (SwisstableGenerationsEnabled()) {
GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
}
- IntTable t;
+ TypeParam t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2 + 1;
for (size_t i = 0; i < max_size; ++i) {
@@ -1251,11 +1313,11 @@ TEST(Table, ClearBug) {
// that they are probably still in the same group. This is not strictly
// guaranteed.
EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
- capacity * sizeof(IntTable::value_type));
+ capacity * sizeof(typename TypeParam::value_type));
}
-TEST(Table, Erase) {
- IntTable t;
+TYPED_TEST_P(SooTest, Erase) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
@@ -1265,8 +1327,8 @@ TEST(Table, Erase) {
EXPECT_TRUE(t.find(0) == t.end());
}
-TEST(Table, EraseMaintainsValidIterator) {
- IntTable t;
+TYPED_TEST_P(SooTest, EraseMaintainsValidIterator) {
+ TypeParam t;
const int kNumElements = 100;
for (int i = 0; i < kNumElements; i++) {
EXPECT_TRUE(t.emplace(i).second);
@@ -1284,8 +1346,8 @@ TEST(Table, EraseMaintainsValidIterator) {
EXPECT_EQ(num_erase_calls, kNumElements);
}
-TEST(Table, EraseBeginEnd) {
- IntTable t;
+TYPED_TEST_P(SooTest, EraseBeginEnd) {
+ TypeParam t;
for (int i = 0; i < 10; ++i) t.insert(i);
EXPECT_EQ(t.size(), 10);
t.erase(t.begin(), t.end());
@@ -1684,8 +1746,8 @@ TEST(Table, EraseInsertProbing) {
EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
}
-TEST(Table, Clear) {
- IntTable t;
+TYPED_TEST_P(SooTest, Clear) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
t.clear();
EXPECT_TRUE(t.find(0) == t.end());
@@ -1697,13 +1759,13 @@ TEST(Table, Clear) {
EXPECT_TRUE(t.find(0) == t.end());
}
-TEST(Table, Swap) {
- IntTable t;
+TYPED_TEST_P(SooTest, Swap) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
auto res = t.emplace(0);
EXPECT_TRUE(res.second);
EXPECT_EQ(1, t.size());
- IntTable u;
+ TypeParam u;
t.swap(u);
EXPECT_EQ(0, t.size());
EXPECT_EQ(1, u.size());
@@ -1711,8 +1773,8 @@ TEST(Table, Swap) {
EXPECT_THAT(*u.find(0), 0);
}
-TEST(Table, Rehash) {
- IntTable t;
+TYPED_TEST_P(SooTest, Rehash) {
+ TypeParam t;
EXPECT_TRUE(t.find(0) == t.end());
t.emplace(0);
t.emplace(1);
@@ -1723,8 +1785,8 @@ TEST(Table, Rehash) {
EXPECT_THAT(*t.find(1), 1);
}
-TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
- IntTable t;
+TYPED_TEST_P(SooTest, RehashDoesNotRehashWhenNotNecessary) {
+ TypeParam t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
@@ -1732,14 +1794,15 @@ TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
EXPECT_EQ(p, &*t.find(0));
}
+// Following two tests use non-SOO table because they test for 0 capacity.
TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
- IntTable t;
+ NonSooIntTable t;
t.rehash(0);
EXPECT_EQ(0, t.bucket_count());
}
TEST(Table, RehashZeroDeallocatesEmptyTable) {
- IntTable t;
+ NonSooIntTable t;
t.emplace(0);
t.clear();
EXPECT_NE(0, t.bucket_count());
@@ -1747,8 +1810,8 @@ TEST(Table, RehashZeroDeallocatesEmptyTable) {
EXPECT_EQ(0, t.bucket_count());
}
-TEST(Table, RehashZeroForcesRehash) {
- IntTable t;
+TYPED_TEST_P(SooTest, RehashZeroForcesRehash) {
+ TypeParam t;
t.emplace(0);
t.emplace(1);
auto* p = &*t.find(0);
@@ -1764,33 +1827,33 @@ TEST(Table, ConstructFromInitList) {
StringTable t = {P(), Q(), {}, {{}, {}}};
}
-TEST(Table, CopyConstruct) {
- IntTable t;
+TYPED_TEST_P(SooTest, CopyConstruct) {
+ TypeParam t;
t.emplace(0);
EXPECT_EQ(1, t.size());
{
- IntTable u(t);
+ TypeParam u(t);
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
{
- IntTable u{t};
+ TypeParam u{t};
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
{
- IntTable u = t;
+ TypeParam u = t;
EXPECT_EQ(1, u.size());
EXPECT_THAT(*u.find(0), 0);
}
}
-TEST(Table, CopyDifferentSizes) {
- IntTable t;
+TYPED_TEST_P(SooTest, CopyDifferentSizes) {
+ TypeParam t;
for (int i = 0; i < 100; ++i) {
t.emplace(i);
- IntTable c = t;
+ TypeParam c = t;
for (int j = 0; j <= i; ++j) {
ASSERT_TRUE(c.find(j) != c.end()) << "i=" << i << " j=" << j;
}
@@ -1799,16 +1862,16 @@ TEST(Table, CopyDifferentSizes) {
}
}
-TEST(Table, CopyDifferentCapacities) {
+TYPED_TEST_P(SooTest, CopyDifferentCapacities) {
for (int cap = 1; cap < 100; cap = cap * 2 + 1) {
- IntTable t;
+ TypeParam t;
t.reserve(static_cast<size_t>(cap));
for (int i = 0; i <= cap; ++i) {
t.emplace(i);
if (i != cap && i % 5 != 0) {
continue;
}
- IntTable c = t;
+ TypeParam c = t;
for (int j = 0; j <= i; ++j) {
ASSERT_TRUE(c.find(j) != c.end())
<< "cap=" << cap << " i=" << i << " j=" << j;
@@ -1948,8 +2011,8 @@ TEST(Table, Equality3) {
EXPECT_NE(u, t);
}
-TEST(Table, NumDeletedRegression) {
- IntTable t;
+TYPED_TEST_P(SooTest, NumDeletedRegression) {
+ TypeParam t;
t.emplace(0);
t.erase(t.find(0));
// construct over a deleted slot.
@@ -1957,8 +2020,8 @@ TEST(Table, NumDeletedRegression) {
t.clear();
}
-TEST(Table, FindFullDeletedRegression) {
- IntTable t;
+TYPED_TEST_P(SooTest, FindFullDeletedRegression) {
+ TypeParam t;
for (int i = 0; i < 1000; ++i) {
t.emplace(i);
t.erase(t.find(i));
@@ -1966,17 +2029,17 @@ TEST(Table, FindFullDeletedRegression) {
EXPECT_EQ(0, t.size());
}
-TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
+TYPED_TEST_P(SooTest, ReplacingDeletedSlotDoesNotRehash) {
size_t n;
{
// Compute n such that n is the maximum number of elements before rehash.
- IntTable t;
+ TypeParam t;
t.emplace(0);
size_t c = t.bucket_count();
for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
--n;
}
- IntTable t;
+ TypeParam t;
t.rehash(n);
const size_t c = t.bucket_count();
for (size_t i = 0; i != n; ++i) t.emplace(i);
@@ -2227,8 +2290,8 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
}
-TEST(Nodes, HintInsert) {
- IntTable t = {1, 2, 3};
+TYPED_TEST_P(SooTest, HintInsert) {
+ TypeParam t = {1, 2, 3};
auto node = t.extract(1);
EXPECT_THAT(t, UnorderedElementsAre(2, 3));
auto it = t.insert(t.begin(), std::move(node));
@@ -2247,14 +2310,18 @@ TEST(Nodes, HintInsert) {
EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
}
-IntTable MakeSimpleTable(size_t size) {
- IntTable t;
+template <typename T>
+T MakeSimpleTable(size_t size) {
+ T t;
while (t.size() < size) t.insert(t.size());
return t;
}
-std::vector<int> OrderOfIteration(const IntTable& t) {
- return {t.begin(), t.end()};
+template <typename T>
+std::vector<int> OrderOfIteration(const T& t) {
+ std::vector<int> res;
+ for (auto i : t) res.push_back(static_cast<int>(i));
+ return res;
}
// These IterationOrderChanges tests depend on non-deterministic behavior.
@@ -2263,15 +2330,15 @@ std::vector<int> OrderOfIteration(const IntTable& t) {
// we are touching different memory pages to cause the ordering to change.
// We also need to keep the old tables around to avoid getting the same memory
// blocks over and over.
-TEST(Table, IterationOrderChangesByInstance) {
+TYPED_TEST_P(SooTest, IterationOrderChangesByInstance) {
for (size_t size : {2, 6, 12, 20}) {
- const auto reference_table = MakeSimpleTable(size);
+ const auto reference_table = MakeSimpleTable<TypeParam>(size);
const auto reference = OrderOfIteration(reference_table);
- std::vector<IntTable> tables;
+ std::vector<TypeParam> tables;
bool found_difference = false;
for (int i = 0; !found_difference && i < 5000; ++i) {
- tables.push_back(MakeSimpleTable(size));
+ tables.push_back(MakeSimpleTable<TypeParam>(size));
found_difference = OrderOfIteration(tables.back()) != reference;
}
if (!found_difference) {
@@ -2282,7 +2349,7 @@ TEST(Table, IterationOrderChangesByInstance) {
}
}
-TEST(Table, IterationOrderChangesOnRehash) {
+TYPED_TEST_P(SooTest, IterationOrderChangesOnRehash) {
// We test different sizes with many small numbers, because small table
// resize has a different codepath.
// Note: iteration order for size() <= 1 is always the same.
@@ -2291,10 +2358,10 @@ TEST(Table, IterationOrderChangesOnRehash) {
size_t{0}, // Force rehash is guaranteed.
size * 10 // Rehash to the larger capacity is guaranteed.
}) {
- std::vector<IntTable> garbage;
+ std::vector<TypeParam> garbage;
bool ok = false;
for (int i = 0; i < 5000; ++i) {
- auto t = MakeSimpleTable(size);
+ auto t = MakeSimpleTable<TypeParam>(size);
const auto reference = OrderOfIteration(t);
// Force rehash.
t.rehash(rehash_size);
@@ -2315,8 +2382,8 @@ TEST(Table, IterationOrderChangesOnRehash) {
// Verify that pointers are invalidated as soon as a second element is inserted.
// This prevents dependency on pointer stability on small tables.
-TEST(Table, UnstablePointers) {
- IntTable table;
+TYPED_TEST_P(SooTest, UnstablePointers) {
+ TypeParam table;
const auto addr = [&](int i) {
return reinterpret_cast<uintptr_t>(&*table.find(i));
@@ -2335,11 +2402,11 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
- IntTable t;
+ NonSooIntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
"erase.* called on end.. iterator.");
- typename IntTable::iterator iter;
+ typename NonSooIntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
++iter, "operator.* called on default-constructed iterator.");
t.insert(0);
@@ -2353,6 +2420,22 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
+TEST(TableDeathTest, InvalidIteratorAssertsSoo) {
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
+
+ SooIntTable t;
+ // Extra simple "regexp" as regexp support is highly varied across platforms.
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+ "erase.* called on end.. iterator.");
+ typename SooIntTable::iterator iter;
+ EXPECT_DEATH_IF_SUPPORTED(
+ ++iter, "operator.* called on default-constructed iterator.");
+
+ // We can't detect the erased iterator case as invalid in SOO mode because
+ // the control is static constant.
+}
+
// Invalid iterator use can trigger use-after-free in asan/hwasan,
// use-of-uninitialized-value in msan, or invalidated iterator assertions.
constexpr const char* kInvalidIteratorDeathMessage =
@@ -2366,11 +2449,11 @@ constexpr bool kMsvc = true;
constexpr bool kMsvc = false;
#endif
-TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
+TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperator) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
- IntTable t;
+ TypeParam t;
t.insert(1);
t.insert(2);
t.insert(3);
@@ -2389,35 +2472,50 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter2);
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
- IntTable t1, t2;
+ TypeParam t1, t2;
t1.insert(0);
t2.insert(0);
iter1 = t1.begin();
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
SwisstableGenerationsEnabled()
- ? "Invalid iterator comparison.*iterators from different hashtables"
+ ? "Invalid iterator comparison.*iterators from different.* hashtables"
: "Invalid iterator comparison.*may be from different "
".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
+}
+
+TYPED_TEST_P(SooTest, IteratorInvalidAssertsEqualityOperatorRehash) {
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regex.";
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+ GTEST_SKIP() << "ThreadSanitizer test runs fail on use-after-free even in "
+ "EXPECT_DEATH.";
+#endif
- for (int i = 0; i < 10; ++i) t1.insert(i);
- // There should have been a rehash in t1.
- if (kMsvc) return; // MSVC doesn't support | in regex.
+ TypeParam t;
+ t.insert(0);
+ auto iter = t.begin();
+
+ // Trigger a rehash in t.
+ for (int i = 0; i < 10; ++i) t.insert(i);
- // NOTE(b/293887834): After rehashing, iterators will contain pointers to
- // freed memory, which may be detected by ThreadSanitizer.
const char* const kRehashedDeathMessage =
SwisstableGenerationsEnabled()
? kInvalidIteratorDeathMessage
- : "Invalid iterator comparison.*might have rehashed.*config=asan"
- "|ThreadSanitizer: heap-use-after-free";
- EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
+ : "Invalid iterator comparison.*might have rehashed.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter == t.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-TEST(RawHashSamplerTest, Sample) {
+template <typename T>
+class RawHashSamplerTest : public testing::Test {};
+TYPED_TEST_SUITE_P(RawHashSamplerTest);
+
+TYPED_TEST_P(RawHashSamplerTest, Sample) {
+ constexpr bool soo_enabled = std::is_same<SooIntTable, TypeParam>::value;
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
@@ -2430,7 +2528,7 @@ TEST(RawHashSamplerTest, Sample) {
++start_size;
});
- std::vector<IntTable> tables;
+ std::vector<TypeParam> tables;
for (int i = 0; i < 1000000; ++i) {
tables.emplace_back();
@@ -2459,15 +2557,20 @@ TEST(RawHashSamplerTest, Sample) {
std::memory_order_relaxed)]++;
reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
}
- EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
+ EXPECT_EQ(info.inline_element_size, sizeof(typename TypeParam::value_type));
++end_size;
});
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
- EXPECT_EQ(observed_checksums.size(), 5);
- for (const auto& [_, count] : observed_checksums) {
- EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
+ if (soo_enabled) {
+ EXPECT_EQ(observed_checksums.size(), 9);
+ } else {
+ EXPECT_EQ(observed_checksums.size(), 5);
+ for (const auto& [_, count] : observed_checksums) {
+ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2,
+ 0.05);
+ }
}
EXPECT_EQ(reservations.size(), 10);
@@ -2479,6 +2582,10 @@ TEST(RawHashSamplerTest, Sample) {
<< reservation;
}
}
+
+REGISTER_TYPED_TEST_SUITE_P(RawHashSamplerTest, Sample);
+using RawHashSamplerTestTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, RawHashSamplerTest, RawHashSamplerTestTypes);
#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
@@ -2531,9 +2638,10 @@ using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
SanitizerTableTypes);
+// TODO(b/289225379): poison inline space when empty SOO.
TEST(Sanitizer, PoisoningOnErase) {
- IntTable t;
- int64_t& v = *t.insert(0).first;
+ NonSooIntTable t;
+ auto& v = *t.insert(0).first;
EXPECT_FALSE(__asan_address_is_poisoned(&v));
t.erase(0);
@@ -2581,7 +2689,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
- IntTable t;
+ NonSooIntTable t;
// Start with 1 element so that `it` is never an end iterator.
t.insert(-1);
for (int i = 0; i < 10; ++i) {
@@ -2628,11 +2736,11 @@ TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
- IntTable t1, t2;
+ NonSooIntTable t1, t2;
t1.insert(1);
auto it = t1.begin();
// ptr will become invalidated on rehash.
- const int64_t* ptr = &*it;
+ const auto* ptr = &*it;
(void)ptr;
t2 = std::move(t1);
@@ -2640,12 +2748,12 @@ TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
kInvalidIteratorDeathMessage);
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "heap-use-after-free");
#endif
}
-TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
- IntTable t;
+TYPED_TEST_P(SooTest, ReservedGrowthUpdatesWhenTableDoesntGrow) {
+ TypeParam t;
for (int i = 0; i < 8; ++i) t.insert(i);
// Want to insert twice without invalidating iterators so reserve.
const size_t cap = t.capacity();
@@ -2687,7 +2795,7 @@ TEST(Table, GenerationInfoResetsOnClear) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
- IntTable t;
+ NonSooIntTable t;
for (int i = 0; i < 1000; ++i) t.insert(i);
t.reserve(t.size() + 100);
@@ -2705,24 +2813,24 @@ TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
#endif
- IntTable t;
+ NonSooIntTable t;
t.insert(0);
// Rehashing is guaranteed on every insertion while capacity is less than
// RehashProbabilityConstant().
- int64_t i = 0;
+ int i = 0;
while (t.capacity() <= RehashProbabilityConstant()) {
// ptr will become invalidated on rehash.
- const int64_t* ptr = &*t.begin();
+ const auto* ptr = &*t.begin();
t.insert(++i);
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << **ptr, "use-after-free") << i;
}
}
TEST(Iterator, InvalidComparisonDifferentTables) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- IntTable t1, t2;
- IntTable::iterator default_constructed_iter;
+ NonSooIntTable t1, t2;
+ NonSooIntTable::iterator default_constructed_iter;
// We randomly use one of N empty generations for generations from empty
// hashtables. In general, we won't always detect when iterators from
// different empty hashtables are compared, but in this test case, we
@@ -2813,10 +2921,11 @@ TEST(Table, CountedHash) {
}
}
+// IterateOverFullSlots doesn't support SOO.
TEST(Table, IterateOverFullSlotsEmpty) {
- IntTable t;
- auto fail_if_any = [](const ctrl_t*, int64_t* i) {
- FAIL() << "expected no slots " << i;
+ NonSooIntTable t;
+ auto fail_if_any = [](const ctrl_t*, auto* i) {
+ FAIL() << "expected no slots " << **i;
};
container_internal::IterateOverFullSlots(
RawHashSetTestOnlyAccess::GetCommon(t),
@@ -2830,7 +2939,7 @@ TEST(Table, IterateOverFullSlotsEmpty) {
}
TEST(Table, IterateOverFullSlotsFull) {
- IntTable t;
+ NonSooIntTable t;
std::vector<int64_t> expected_slots;
for (int64_t i = 0; i < 128; ++i) {
@@ -2841,17 +2950,106 @@ TEST(Table, IterateOverFullSlotsFull) {
container_internal::IterateOverFullSlots(
RawHashSetTestOnlyAccess::GetCommon(t),
RawHashSetTestOnlyAccess::GetSlots(t),
- [&t, &slots](const ctrl_t* ctrl, int64_t* i) {
+ [&t, &slots](const ctrl_t* ctrl, auto* i) {
ptrdiff_t ctrl_offset =
ctrl - RawHashSetTestOnlyAccess::GetCommon(t).control();
ptrdiff_t slot_offset = i - RawHashSetTestOnlyAccess::GetSlots(t);
ASSERT_EQ(ctrl_offset, slot_offset);
- slots.push_back(*i);
+ slots.push_back(**i);
});
EXPECT_THAT(slots, testing::UnorderedElementsAreArray(expected_slots));
}
}
+template <typename T>
+class SooTable : public testing::Test {};
+TYPED_TEST_SUITE_P(SooTable);
+
+TYPED_TEST_P(SooTable, Basic) {
+ bool frozen = true;
+ TypeParam t{FreezableAlloc<typename TypeParam::value_type>(&frozen)};
+ if (t.capacity() != SooCapacity()) {
+ EXPECT_LT(sizeof(void*), 8);
+ GTEST_SKIP() << "not SOO on this platform";
+ }
+
+ t.insert(0);
+ EXPECT_EQ(t.capacity(), 1);
+ auto it = t.find(0);
+ EXPECT_EQ(it, t.begin());
+ ASSERT_NE(it, t.end());
+ EXPECT_EQ(*it, 0);
+ EXPECT_EQ(++it, t.end());
+ EXPECT_EQ(t.find(1), t.end());
+ EXPECT_EQ(t.size(), 1);
+
+ t.erase(0);
+ EXPECT_EQ(t.size(), 0);
+ t.insert(1);
+ it = t.find(1);
+ EXPECT_EQ(it, t.begin());
+ ASSERT_NE(it, t.end());
+ EXPECT_EQ(*it, 1);
+
+ t.clear();
+ EXPECT_EQ(t.size(), 0);
+}
+
+REGISTER_TYPED_TEST_SUITE_P(SooTable, Basic);
+using FreezableSooTableTypes =
+ ::testing::Types<FreezableSizedValueSooTable<8>,
+ FreezableSizedValueSooTable<16>>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTable, FreezableSooTableTypes);
+
+TEST(Table, RehashToSoo) {
+ SooIntTable t;
+ if (t.capacity() != SooCapacity()) {
+ EXPECT_LT(sizeof(void*), 8);
+ GTEST_SKIP() << "not SOO on this platform";
+ }
+
+ t.reserve(10);
+ t.insert(0);
+ EXPECT_EQ(*t.begin(), 0);
+
+ t.rehash(0); // Rehash back down to SOO table.
+
+ EXPECT_EQ(t.capacity(), SooCapacity());
+ EXPECT_EQ(t.size(), 1);
+ EXPECT_EQ(*t.begin(), 0);
+ EXPECT_EQ(t.find(0), t.begin());
+ EXPECT_EQ(t.find(1), t.end());
+}
+
+TEST(Table, ResizeToNonSoo) {
+ for (int reserve_capacity : {8, 100000}) {
+ SooIntTable t;
+ t.insert(0);
+
+ t.reserve(reserve_capacity);
+
+ EXPECT_EQ(t.find(0), t.begin());
+ EXPECT_EQ(t.size(), 1);
+ EXPECT_EQ(*t.begin(), 0);
+ EXPECT_EQ(t.find(1), t.end());
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(
+ SooTest, Empty, Clear, ClearBug, Contains1, Contains2, ContainsEmpty,
+ CopyConstruct, CopyDifferentCapacities, CopyDifferentSizes,
+ EnsureNonQuadraticAsInRust, Erase, EraseBeginEnd, EraseInSmallTables,
+ EraseMaintainsValidIterator, FindFullDeletedRegression, HintInsert, Insert1,
+ Insert2, InsertEraseStressTest, InsertWithinCapacity,
+ IterationOrderChangesByInstance, IterationOrderChangesOnRehash,
+ IteratorInvalidAssertsEqualityOperator,
+ IteratorInvalidAssertsEqualityOperatorRehash, LargeTable, LookupEmpty,
+ NumDeletedRegression, Rehash, RehashDoesNotRehashWhenNotNecessary,
+ RehashZeroForcesRehash, ReplacingDeletedSlotDoesNotRehash,
+ ReservedGrowthUpdatesWhenTableDoesntGrow, Swap, UnstablePointers);
+using SooTableTypes = ::testing::Types<SooIntTable, NonSooIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(My, SooTest, SooTableTypes);
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END