aboutsummaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc278
1 files changed, 235 insertions, 43 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 3d3b089c..242a97cb 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <atomic>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
@@ -40,8 +41,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/prefetch.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/internal/container_memory.h"
@@ -60,6 +60,10 @@ struct RawHashSetTestOnlyAccess {
static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
return c.slot_array();
}
+ template <typename C>
+ static size_t CountTombstones(const C& c) {
+ return c.common().TombstonesCount();
+ }
};
namespace {
@@ -437,6 +441,63 @@ struct CustomAllocIntTable
using Base::Base;
};
+// Tries to allocate memory at the minimum alignment even when the default
+// allocator uses a higher alignment.
+template <typename T>
+struct MinimumAlignmentAlloc : std::allocator<T> {
+ MinimumAlignmentAlloc() = default;
+
+ template <typename U>
+ explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc<U>& /*other*/) {}
+
+ template <class U>
+ struct rebind {
+ using other = MinimumAlignmentAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ T* ptr = std::allocator<T>::allocate(n + 1);
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr += alignof(T);
+ return reinterpret_cast<T*>(cptr);
+ }
+
+ void deallocate(T* ptr, size_t n) {
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr -= alignof(T);
+ std::allocator<T>::deallocate(reinterpret_cast<T*>(cptr), n + 1);
+ }
+};
+
+struct MinimumAlignmentUint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
+ using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
+ using Base::Base;
+};
+
+// Allows for freezing the allocator to expect no further allocations.
+template <typename T>
+struct FreezableAlloc : std::allocator<T> {
+ explicit FreezableAlloc(bool* f) : frozen(f) {}
+
+ template <typename U>
+ explicit FreezableAlloc(const FreezableAlloc<U>& other)
+ : frozen(other.frozen) {}
+
+ template <class U>
+ struct rebind {
+ using other = FreezableAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ EXPECT_FALSE(*frozen);
+ return std::allocator<T>::allocate(n);
+ }
+
+ bool* frozen;
+};
+
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
@@ -444,6 +505,13 @@ struct BadFastHash {
}
};
+struct BadHashFreezableIntTable
+ : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
+ FreezableAlloc<int64_t>> {
+ using Base = typename BadHashFreezableIntTable::raw_hash_set;
+ using Base::Base;
+};
+
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
@@ -461,14 +529,12 @@ TEST(Table, EmptyFunctorOptimization) {
void* slots;
size_t size;
size_t capacity;
- size_t growth_left;
};
struct MockTableInfozDisabled {
void* ctrl;
void* slots;
size_t size;
size_t capacity;
- size_t growth_left;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -479,6 +545,7 @@ TEST(Table, EmptyFunctorOptimization) {
struct GenerationData {
size_t reserved_growth;
+ size_t reservation_size;
GenerationType* generation;
};
@@ -865,6 +932,10 @@ void TestDecompose(bool construct_three) {
}
TEST(Table, Decompose) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
@@ -903,6 +974,10 @@ struct Modulo1000HashTable
// Test that rehash with no resize happen in case of many deleted slots.
TEST(Table, RehashWithNoResize) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
Modulo1000HashTable t;
// Adding the same length (and the same hash) strings
// to have at least kMinFullGroups groups
@@ -996,6 +1071,10 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
}
TEST(Table, ClearBug) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2 + 1;
@@ -1048,6 +1127,14 @@ TEST(Table, EraseMaintainsValidIterator) {
EXPECT_EQ(num_erase_calls, kNumElements);
}
+TEST(Table, EraseBeginEnd) {
+ IntTable t;
+ for (int i = 0; i < 10; ++i) t.insert(i);
+ EXPECT_EQ(t.size(), 10);
+ t.erase(t.begin(), t.end());
+ EXPECT_EQ(t.size(), 0);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1268,7 +1355,7 @@ ExpectedStats XorSeedExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -1364,7 +1451,7 @@ ExpectedStats LinearTransformExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -2044,30 +2131,43 @@ bool IsAssertEnabled() {
}
TEST(TableDeathTest, InvalidIteratorAsserts) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- EXPECT_DEATH_IF_SUPPORTED(
- t.erase(t.end()),
- "erase.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+ "erase.* called on end.. iterator.");
typename IntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ ++iter, "operator.* called on default-constructed iterator.");
t.insert(0);
iter = t.begin();
t.erase(iter);
- EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The element might have been "
- "erased or .*the table might have rehashed.");
+ const char* const kErasedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? "operator.* called on invalid iterator.*was likely erased"
+ : "operator.* called on invalid iterator.*might have been "
+ "erased.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
+// Invalid iterator use can trigger heap-use-after-free in asan,
+// use-of-uninitialized-value in msan, or invalidated iterator assertions.
+constexpr const char* kInvalidIteratorDeathMessage =
+ "heap-use-after-free|use-of-uninitialized-value|invalidated "
+ "iterator|Invalid iterator|invalid iterator";
+
+// MSVC doesn't support | in regex.
+#if defined(_MSC_VER)
+constexpr bool kMsvc = true;
+#else
+constexpr bool kMsvc = false;
+#endif
+
TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
t.insert(1);
@@ -2080,8 +2180,9 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter1);
// Extra simple "regexp" as regexp support is highly varied across platforms.
const char* const kErasedDeathMessage =
- "Invalid iterator comparison. The element might have .*been erased or "
- "the table might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*was likely erased"
+ : "Invalid iterator comparison.*might have been erased.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
t.erase(iter2);
@@ -2093,15 +2194,25 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
iter1 = t1.begin();
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
- "Invalid iterator comparison. The iterators may be from different "
- ".*containers or the container might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*iterators from different hashtables"
+ : "Invalid iterator comparison.*may be from different "
+ ".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
for (int i = 0; i < 10; ++i) t1.insert(i);
// There should have been a rehash in t1.
- EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
- kContainerDiffDeathMessage);
+ if (kMsvc) return; // MSVC doesn't support | in regex.
+
+ // NOTE(b/293887834): After rehashing, iterators will contain pointers to
+ // freed memory, which may be detected by ThreadSanitizer.
+ const char* const kRehashedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? kInvalidIteratorDeathMessage
+ : "Invalid iterator comparison.*might have rehashed.*config=asan"
+ "|ThreadSanitizer: heap-use-after-free";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -2216,11 +2327,17 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
-TEST(Table, AlignOne) {
+template <typename T>
+class AlignOneTest : public ::testing::Test {};
+using AlignOneTestTypes =
+ ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
+TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
+
+TYPED_TEST(AlignOneTest, AlignOne) {
// We previously had a bug in which we were copying a control byte over the
// first slot when alignof(value_type) is 1. We test repeated
// insertions/erases and verify that the behavior is correct.
- Uint8Table t;
+ TypeParam t;
std::unordered_set<uint8_t> verifier; // NOLINT
// Do repeated insertions/erases from the table.
@@ -2246,21 +2363,9 @@ TEST(Table, AlignOne) {
}
}
-// Invalid iterator use can trigger heap-use-after-free in asan,
-// use-of-uninitialized-value in msan, or invalidated iterator assertions.
-constexpr const char* kInvalidIteratorDeathMessage =
- "heap-use-after-free|use-of-uninitialized-value|invalidated "
- "iterator|Invalid iterator";
-
-#if defined(__clang__) && defined(_MSC_VER)
-constexpr bool kLexan = true;
-#else
-constexpr bool kLexan = false;
-#endif
-
TEST(Iterator, InvalidUseCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
// Start with 1 element so that `it` is never an end iterator.
@@ -2276,7 +2381,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) {
TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
t.reserve(10);
@@ -2289,6 +2394,7 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
}
// ptr will become invalidated on rehash.
const int64_t* ptr = &*it;
+ (void)ptr;
// erase decreases size but does not decrease reserved growth so the next
// insertion still invalidates iterators.
@@ -2299,8 +2405,9 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
kInvalidIteratorDeathMessage);
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr,
- "heap-use-after-free|use-of-uninitialized-value");
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
}
TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
@@ -2318,6 +2425,91 @@ TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
EXPECT_EQ(*it, 0);
}
+TEST(Table, EraseBeginEndResetsReservedGrowth) {
+ bool frozen = false;
+ BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
+ t.reserve(100);
+ const size_t cap = t.capacity();
+ frozen = true; // no further allocs allowed
+
+ for (int i = 0; i < 10; ++i) {
+ // Create a long run (hash function returns constant).
+ for (int j = 0; j < 100; ++j) t.insert(j);
+ // Erase elements from the middle of the long run, which creates tombstones.
+ for (int j = 30; j < 60; ++j) t.erase(j);
+ EXPECT_EQ(t.size(), 70);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
+
+ t.erase(t.begin(), t.end());
+
+ EXPECT_EQ(t.size(), 0);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+ }
+}
+
+TEST(Table, GenerationInfoResetsOnClear) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+ IntTable t;
+ for (int i = 0; i < 1000; ++i) t.insert(i);
+ t.reserve(t.size() + 100);
+
+ t.clear();
+
+ t.insert(0);
+ auto it = t.begin();
+ t.insert(1);
+ EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+}
+
+TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+ GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
+#endif
+
+ IntTable t;
+ t.insert(0);
+ // Rehashing is guaranteed on every insertion while capacity is less than
+ // RehashProbabilityConstant().
+ int64_t i = 0;
+ while (t.capacity() <= RehashProbabilityConstant()) {
+ // ptr will become invalidated on rehash.
+ const int64_t* ptr = &*t.begin();
+ t.insert(++i);
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free") << i;
+ }
+}
+
+TEST(Iterator, InvalidComparisonDifferentTables) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+
+ IntTable t1, t2;
+ IntTable::iterator default_constructed_iter;
+ // We randomly use one of N empty generations for generations from empty
+ // hashtables. In general, we won't always detect when iterators from
+ // different empty hashtables are compared, but in this test case, we
+ // should deterministically detect the error due to our randomness yielding
+ // consecutive random generations.
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
+ "Invalid iterator comparison.*empty hashtables");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t1.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*empty hashtable");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t2.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*end.. iterator");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
+ "Invalid iterator comparison.*non-end");
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END