aboutsummaryrefslogtreecommitdiff
path: root/absl/synchronization/internal
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization/internal')
-rw-r--r--absl/synchronization/internal/graphcycles.cc13
-rw-r--r--absl/synchronization/internal/graphcycles.h5
-rw-r--r--absl/synchronization/internal/graphcycles_test.cc19
-rw-r--r--absl/synchronization/internal/kernel_timeout_test.cc11
-rw-r--r--absl/synchronization/internal/per_thread_sem_test.cc6
-rw-r--r--absl/synchronization/internal/waiter_test.cc2
6 files changed, 48 insertions, 8 deletions
diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc
index 39b18482..129067c1 100644
--- a/absl/synchronization/internal/graphcycles.cc
+++ b/absl/synchronization/internal/graphcycles.cc
@@ -211,7 +211,7 @@ class NodeSet {
Vec<int32_t> table_;
uint32_t occupied_; // Count of non-empty slots (includes deleted slots)
- static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); }
+ static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a) * 41; }
// Return index for storing v. May return an empty index or deleted index
uint32_t FindIndex(int32_t v) const {
@@ -333,7 +333,7 @@ class PointerMap {
private:
// Number of buckets in hash table for pointer lookups.
- static constexpr uint32_t kHashTableSize = 8171; // should be prime
+ static constexpr uint32_t kHashTableSize = 262139; // should be prime
const Vec<Node*>* nodes_;
std::array<int32_t, kHashTableSize> table_;
@@ -365,6 +365,14 @@ static Node* FindNode(GraphCycles::Rep* rep, GraphId id) {
return (n->version == NodeVersion(id)) ? n : nullptr;
}
+void GraphCycles::TestOnlyAddNodes(uint32_t n) {
+ uint32_t old_size = rep_->nodes_.size();
+ rep_->nodes_.resize(n);
+ for (auto i = old_size; i < n; ++i) {
+ rep_->nodes_[i] = nullptr;
+ }
+}
+
GraphCycles::GraphCycles() {
InitArenaIfNecessary();
rep_ = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Rep), arena))
@@ -373,6 +381,7 @@ GraphCycles::GraphCycles() {
GraphCycles::~GraphCycles() {
for (auto* node : rep_->nodes_) {
+ if (node == nullptr) { continue; }
node->Node::~Node();
base_internal::LowLevelAlloc::Free(node);
}
diff --git a/absl/synchronization/internal/graphcycles.h b/absl/synchronization/internal/graphcycles.h
index ceba33e4..08f304bc 100644
--- a/absl/synchronization/internal/graphcycles.h
+++ b/absl/synchronization/internal/graphcycles.h
@@ -126,6 +126,11 @@ class GraphCycles {
// Expensive: should only be called from graphcycles_test.cc.
bool CheckInvariants() const;
+ // Test-only method to add more nodes. The nodes will not be valid, and this
+ // method should only be used to test the behavior of the graph when it is
+ // very full.
+ void TestOnlyAddNodes(uint32_t n);
+
// ----------------------------------------------------
struct Rep;
private:
diff --git a/absl/synchronization/internal/graphcycles_test.cc b/absl/synchronization/internal/graphcycles_test.cc
index 3c6ef798..47410aad 100644
--- a/absl/synchronization/internal/graphcycles_test.cc
+++ b/absl/synchronization/internal/graphcycles_test.cc
@@ -14,6 +14,7 @@
#include "absl/synchronization/internal/graphcycles.h"
+#include <climits>
#include <map>
#include <random>
#include <unordered_set>
@@ -458,6 +459,24 @@ TEST_F(GraphCyclesTest, ManyEdges) {
CheckInvariants(g_);
}
+TEST(GraphCycles, IntegerOverflow) {
+ GraphCycles graph_cycles;
+ char *buf = (char *)nullptr;
+ GraphId prev_id = graph_cycles.GetId(buf);
+ buf += 1;
+ GraphId id = graph_cycles.GetId(buf);
+ ASSERT_TRUE(graph_cycles.InsertEdge(prev_id, id));
+
+ // INT_MAX / 40 is enough to cause an overflow when multiplied by 41.
+ graph_cycles.TestOnlyAddNodes(INT_MAX / 40);
+
+ buf += 1;
+ GraphId newid = graph_cycles.GetId(buf);
+ graph_cycles.HasEdge(prev_id, newid);
+
+ graph_cycles.RemoveNode(buf);
+}
+
} // namespace synchronization_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc
index bc546719..33962f86 100644
--- a/absl/synchronization/internal/kernel_timeout_test.cc
+++ b/absl/synchronization/internal/kernel_timeout_test.cc
@@ -36,7 +36,7 @@ extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
if (c == CLOCK_MONOTONIC &&
!absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
- absl::SharedBitGen gen;
+ thread_local absl::BitGen gen; // NOLINT
ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
return 0;
@@ -58,7 +58,8 @@ constexpr absl::Duration kTimingBound = absl::Microseconds(250);
using absl::synchronization_internal::KernelTimeout;
-TEST(KernelTimeout, FiniteTimes) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_FiniteTimes) {
constexpr absl::Duration kDurationsToTest[] = {
absl::ZeroDuration(),
absl::Nanoseconds(1),
@@ -228,7 +229,8 @@ TEST(KernelTimeout, InfinitePast) {
EXPECT_EQ(t.ToChronoDuration(), std::chrono::nanoseconds(0));
}
-TEST(KernelTimeout, FiniteDurations) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_FiniteDurations) {
constexpr absl::Duration kDurationsToTest[] = {
absl::ZeroDuration(),
absl::Nanoseconds(1),
@@ -274,7 +276,8 @@ TEST(KernelTimeout, FiniteDurations) {
}
}
-TEST(KernelTimeout, NegativeDurations) {
+// TODO(b/348224897): re-enabled when the flakiness is fixed.
+TEST(KernelTimeout, DISABLED_NegativeDurations) {
constexpr absl::Duration kDurationsToTest[] = {
-absl::ZeroDuration(),
-absl::Nanoseconds(1),
diff --git a/absl/synchronization/internal/per_thread_sem_test.cc b/absl/synchronization/internal/per_thread_sem_test.cc
index 24a6b548..e3cf41d7 100644
--- a/absl/synchronization/internal/per_thread_sem_test.cc
+++ b/absl/synchronization/internal/per_thread_sem_test.cc
@@ -159,7 +159,11 @@ TEST_F(PerThreadSemTest, Timeouts) {
const absl::Duration elapsed = absl::Now() - start;
// Allow for a slight early return, to account for quality of implementation
// issues on various platforms.
- const absl::Duration slop = absl::Milliseconds(1);
+ absl::Duration slop = absl::Milliseconds(1);
+#ifdef _MSC_VER
+ // Use higher slop on MSVC due to flaky test failures.
+ slop = absl::Milliseconds(16);
+#endif
EXPECT_LE(delay - slop, elapsed)
<< "Wait returned " << delay - elapsed
<< " early (with " << slop << " slop), start time was " << start;
diff --git a/absl/synchronization/internal/waiter_test.cc b/absl/synchronization/internal/waiter_test.cc
index 992db29b..da138961 100644
--- a/absl/synchronization/internal/waiter_test.cc
+++ b/absl/synchronization/internal/waiter_test.cc
@@ -44,7 +44,7 @@ extern "C" int __clock_gettime(clockid_t c, struct timespec* ts);
extern "C" int clock_gettime(clockid_t c, struct timespec* ts) {
if (c == CLOCK_MONOTONIC &&
!absl::synchronization_internal::KernelTimeout::SupportsSteadyClock()) {
- absl::SharedBitGen gen;
+ thread_local absl::BitGen gen; // NOLINT
ts->tv_sec = absl::Uniform(gen, 0, 1'000'000'000);
ts->tv_nsec = absl::Uniform(gen, 0, 1'000'000'000);
return 0;