diff options
Diffstat (limited to 'absl/base/spinlock_test_common.cc')
-rw-r--r-- | absl/base/spinlock_test_common.cc | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/absl/base/spinlock_test_common.cc b/absl/base/spinlock_test_common.cc index 2b572c5b..52ecf580 100644 --- a/absl/base/spinlock_test_common.cc +++ b/absl/base/spinlock_test_common.cc @@ -34,7 +34,7 @@ #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/notification.h" -constexpr int32_t kNumThreads = 10; +constexpr uint32_t kNumThreads = 10; constexpr int32_t kIters = 1000; namespace absl { @@ -48,14 +48,14 @@ struct SpinLockTest { int64_t wait_end_time) { return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); } - static uint64_t DecodeWaitCycles(uint32_t lock_value) { + static int64_t DecodeWaitCycles(uint32_t lock_value) { return SpinLock::DecodeWaitCycles(lock_value); } }; namespace { -static constexpr int kArrayLength = 10; +static constexpr size_t kArrayLength = 10; static uint32_t values[kArrayLength]; ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( @@ -79,11 +79,11 @@ static uint32_t Hash32(uint32_t a, uint32_t c) { return c; } -static void TestFunction(int thread_salt, SpinLock* spinlock) { +static void TestFunction(uint32_t thread_salt, SpinLock* spinlock) { for (int i = 0; i < kIters; i++) { SpinLockHolder h(spinlock); - for (int j = 0; j < kArrayLength; j++) { - const int index = (j + thread_salt) % kArrayLength; + for (size_t j = 0; j < kArrayLength; j++) { + const size_t index = (j + thread_salt) % kArrayLength; values[index] = Hash32(values[index], thread_salt); std::this_thread::yield(); } @@ -93,7 +93,7 @@ static void TestFunction(int thread_salt, SpinLock* spinlock) { static void ThreadedTest(SpinLock* spinlock) { std::vector<std::thread> threads; threads.reserve(kNumThreads); - for (int i = 0; i < kNumThreads; ++i) { + for (uint32_t i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(TestFunction, i, spinlock)); } for (auto& thread : threads) { @@ -101,7 +101,7 @@ static void ThreadedTest(SpinLock* spinlock) { } SpinLockHolder h(spinlock); - for (int i = 1; i < kArrayLength; i++) { + for (size_t i = 1; i < kArrayLength; i++) { EXPECT_EQ(values[0], values[i]); } } @@ -133,28 +133,28 @@ TEST(SpinLock, WaitCyclesEncoding) { // but the lower kProfileTimestampShift will be dropped. const int kMaxCyclesShift = 32 - kLockwordReservedShift + kProfileTimestampShift; - const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; + const int64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; // These bits should be zero after encoding. const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; // These bits are dropped when wait cycles are encoded. - const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; + const int64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; // Test a bunch of random values std::default_random_engine generator; // Shift to avoid overflow below. - std::uniform_int_distribution<uint64_t> time_distribution( - 0, std::numeric_limits<uint64_t>::max() >> 4); - std::uniform_int_distribution<uint64_t> cycle_distribution(0, kMaxCycles); + std::uniform_int_distribution<int64_t> time_distribution( + 0, std::numeric_limits<int64_t>::max() >> 3); + std::uniform_int_distribution<int64_t> cycle_distribution(0, kMaxCycles); for (int i = 0; i < 100; i++) { int64_t start_time = time_distribution(generator); int64_t cycles = cycle_distribution(generator); int64_t end_time = start_time + cycles; uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); - EXPECT_EQ(0, lock_value & kLockwordReservedMask); - uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); + EXPECT_EQ(0u, lock_value & kLockwordReservedMask); + int64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); EXPECT_EQ(0, decoded & kProfileTimestampMask); EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); } @@ -178,21 +178,21 @@ TEST(SpinLock, WaitCyclesEncoding) { // Test clamping uint32_t max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); - uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); - uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; + int64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); + int64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; EXPECT_EQ(expected_max_value_decoded, max_value_decoded); const int64_t step = (1 << kProfileTimestampShift); uint32_t after_max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); - uint64_t after_max_value_decoded = + int64_t after_max_value_decoded = SpinLockTest::DecodeWaitCycles(after_max_value); EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( start_time, start_time + kMaxCycles - step); - uint64_t before_max_value_decoded = - SpinLockTest::DecodeWaitCycles(before_max_value); + int64_t before_max_value_decoded = + SpinLockTest::DecodeWaitCycles(before_max_value); EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); } |