diff options
23 files changed, 750 insertions, 214 deletions
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake index 52a563cd..3c0f6901 100644 --- a/CMake/AbseilDll.cmake +++ b/CMake/AbseilDll.cmake @@ -365,6 +365,7 @@ set(ABSL_INTERNAL_DLL_FILES "synchronization/internal/graphcycles.cc" "synchronization/internal/graphcycles.h" "synchronization/internal/kernel_timeout.h" + "synchronization/internal/kernel_timeout.cc" "synchronization/internal/per_thread_sem.cc" "synchronization/internal/per_thread_sem.h" "synchronization/internal/thread_pool.h" diff --git a/absl/base/config.h b/absl/base/config.h index 73d6213d..a17547be 100644 --- a/absl/base/config.h +++ b/absl/base/config.h @@ -889,7 +889,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error ABSL_INTERNAL_HAVE_SSE cannot be directly set #elif defined(__SSE__) #define ABSL_INTERNAL_HAVE_SSE 1 -#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && !defined(_M_ARM64EC) +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \ + !defined(_M_ARM64EC) // MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 // indicates that at least SSE was targeted with the /arch:SSE option. // All x86-64 processors support SSE, so support can be assumed. @@ -904,7 +905,8 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || #error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set #elif defined(__SSE2__) #define ABSL_INTERNAL_HAVE_SSE2 1 -#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && !defined(_M_ARM64EC) +#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \ + !defined(_M_ARM64EC) // MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 // indicates that at least SSE2 was targeted with the /arch:SSE2 option. // All x86-64 processors support SSE2, so support can be assumed. diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel index 96963c4b..038e5234 100644 --- a/absl/container/BUILD.bazel +++ b/absl/container/BUILD.bazel @@ -622,6 +622,7 @@ cc_library( ":hashtablez_sampler", "//absl/base:config", "//absl/base:core_headers", + "//absl/base:dynamic_annotations", "//absl/base:endian", "//absl/base:prefetch", "//absl/base:raw_logging_internal", diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt index 9ba95b32..43d60b9d 100644 --- a/absl/container/CMakeLists.txt +++ b/absl/container/CMakeLists.txt @@ -706,6 +706,7 @@ absl_cc_library( absl::container_common absl::container_memory absl::core_headers + absl::dynamic_annotations absl::endian absl::hash absl::hash_policy_traits diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc index 5dc8b2fa..a6d9b7c0 100644 --- a/absl/container/internal/raw_hash_set.cc +++ b/absl/container/internal/raw_hash_set.cc @@ -19,6 +19,7 @@ #include <cstring> #include "absl/base/config.h" +#include "absl/base/dynamic_annotations.h" #include "absl/hash/hash.h" namespace absl { @@ -44,6 +45,11 @@ constexpr size_t Group::kWidth; inline size_t RandomSeed() { #ifdef ABSL_HAVE_THREAD_LOCAL static thread_local size_t counter = 0; + // On Linux kernels >= 5.4 the MSAN runtime has a false-positive when + // accessing thread local storage data from loaded libraries + // (https://github.com/google/sanitizers/issues/1265), for this reason counter + // needs to be annotated as initialized. + ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(&counter, sizeof(size_t)); size_t value = ++counter; #else // ABSL_HAVE_THREAD_LOCAL static std::atomic<size_t> counter(0); diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h index 65dc66c2..a2ada0cb 100644 --- a/absl/container/internal/raw_hash_set.h +++ b/absl/container/internal/raw_hash_set.h @@ -853,6 +853,9 @@ class HashSetIteratorGenerationInfoEnabled { void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; } private: + // TODO(b/254649633): use a different static generation for default + // constructed iterators so that we can detect when default constructed + // iterators are compared to iterators from empty tables. const GenerationType* generation_ptr_ = EmptyGeneration(); GenerationType generation_ = *generation_ptr_; }; @@ -1087,12 +1090,33 @@ inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a, // Asserts that two iterators come from the same container. // Note: we take slots by reference so that it's not UB if they're uninitialized // as long as we don't read them (when ctrl is null). -// TODO(b/254649633): when generations are enabled, we can detect more cases of -// different containers by comparing the pointers to the generations - this -// can cover cases of end iterators that we would otherwise miss. inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b, const void* const& slot_a, - const void* const& slot_b) { + const void* const& slot_b, + const GenerationType* generation_ptr_a, + const GenerationType* generation_ptr_b) { + if (SwisstableGenerationsEnabled() && generation_ptr_a != generation_ptr_b) { + // TODO(b/254649633): use a different static generation for default + // constructed iterators so that we can split the empty/default cases. + const bool a_is_empty_or_default = generation_ptr_a == EmptyGeneration(); + const bool b_is_empty_or_default = generation_ptr_b == EmptyGeneration(); + if (a_is_empty_or_default != b_is_empty_or_default) { + ABSL_INTERNAL_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator from " + "a non-empty hashtable with an iterator from an empty " + "hashtable or a default-constructed iterator."); + } + const bool a_is_end = ctrl_a == nullptr; + const bool b_is_end = ctrl_b == nullptr; + if (a_is_end || b_is_end) { + ABSL_INTERNAL_LOG(FATAL, + "Invalid iterator comparison. Comparing iterator with " + "an end() iterator from a different hashtable."); + } + ABSL_INTERNAL_LOG(FATAL, + "Invalid iterator comparison. Comparing non-end() " + "iterators from different hashtables."); + } ABSL_HARDENING_ASSERT( AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) && "Invalid iterator comparison. The iterators may be from different " @@ -1463,9 +1487,10 @@ class raw_hash_set { } friend bool operator==(const iterator& a, const iterator& b) { - AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_); AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr()); AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr()); + AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_, + a.generation_ptr(), b.generation_ptr()); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { @@ -1499,6 +1524,8 @@ class raw_hash_set { if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; } + // TODO(b/254649633): use non-null control for default-constructed iterators + // so that we can distinguish between them and end iterators in debug mode. ctrl_t* ctrl_ = nullptr; // To avoid uninitialized member warnings, put slot_ in an anonymous union. // The member is not initialized on singleton and end iterators. diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc index e33fda20..f7ec1456 100644 --- a/absl/container/internal/raw_hash_set_test.cc +++ b/absl/container/internal/raw_hash_set_test.cc @@ -2078,6 +2078,19 @@ TEST(TableDeathTest, InvalidIteratorAsserts) { "erased or .*the table might have rehashed."); } +// Invalid iterator use can trigger heap-use-after-free in asan, +// use-of-uninitialized-value in msan, or invalidated iterator assertions. +constexpr const char* kInvalidIteratorDeathMessage = + "heap-use-after-free|use-of-uninitialized-value|invalidated " + "iterator|Invalid iterator"; + +// MSVC doesn't support | in regex. +#if defined(_MSC_VER) +constexpr bool kMsvc = true; +#else +constexpr bool kMsvc = false; +#endif + TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) { if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled."; @@ -2105,15 +2118,18 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) { iter1 = t1.begin(); iter2 = t2.begin(); const char* const kContainerDiffDeathMessage = - "Invalid iterator comparison. The iterators may be from different " - ".*containers or the container might have rehashed."; + SwisstableGenerationsEnabled() + ? "Invalid iterator comparison.*non-end" + : "Invalid iterator comparison. The iterators may be from different " + ".*containers or the container might have rehashed."; EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage); EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage); for (int i = 0; i < 10; ++i) t1.insert(i); // There should have been a rehash in t1. + if (kMsvc) return; // MSVC doesn't support | in regex. EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), - kContainerDiffDeathMessage); + kInvalidIteratorDeathMessage); } #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) @@ -2258,21 +2274,9 @@ TEST(Table, AlignOne) { } } -// Invalid iterator use can trigger heap-use-after-free in asan, -// use-of-uninitialized-value in msan, or invalidated iterator assertions. -constexpr const char* kInvalidIteratorDeathMessage = - "heap-use-after-free|use-of-uninitialized-value|invalidated " - "iterator|Invalid iterator"; - -#if defined(__clang__) && defined(_MSC_VER) -constexpr bool kLexan = true; -#else -constexpr bool kLexan = false; -#endif - TEST(Iterator, InvalidUseCrashesWithSanitizers) { if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; - if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp."; + if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; IntTable t; // Start with 1 element so that `it` is never an end iterator. @@ -2288,7 +2292,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) { TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) { if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; - if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp."; + if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp."; IntTable t; t.reserve(10); @@ -2349,6 +2353,30 @@ TEST(Table, InvalidReferenceUseCrashesWithSanitizers) { } } +TEST(Iterator, InvalidComparisonDifferentTables) { + if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled."; + + IntTable t1, t2; + IntTable::iterator default_constructed_iter; + // TODO(b/254649633): Currently, we can't detect when end iterators from + // different empty tables are compared. If we allocate generations separately + // from control bytes, then we could do so. + // EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()), + // "Invalid iterator comparison.*empty hashtable"); + // EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter), + // "Invalid iterator comparison.*default-const..."); + t1.insert(0); + EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()), + "Invalid iterator comparison.*empty hashtable"); + EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter), + "Invalid iterator comparison.*default-constructed"); + t2.insert(0); + EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()), + "Invalid iterator comparison.*end.. iterator"); + EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()), + "Invalid iterator comparison.*non-end"); +} + } // namespace } // namespace container_internal ABSL_NAMESPACE_END diff --git a/absl/crc/internal/crc32_x86_arm_combined_simd.h b/absl/crc/internal/crc32_x86_arm_combined_simd.h index fb643986..39e53dd0 100644 --- a/absl/crc/internal/crc32_x86_arm_combined_simd.h +++ b/absl/crc/internal/crc32_x86_arm_combined_simd.h @@ -33,7 +33,7 @@ #include <x86intrin.h> #define ABSL_CRC_INTERNAL_HAVE_X86_SIMD -#elif defined(_MSC_VER) && defined(__AVX__) +#elif defined(_MSC_VER) && !defined(__clang__) && defined(__AVX__) // MSVC AVX (/arch:AVX) implies SSE 4.2 and PCLMULQDQ. #include <intrin.h> diff --git a/absl/hash/hash_test.cc b/absl/hash/hash_test.cc index 5b556180..4ed6e50a 100644 --- a/absl/hash/hash_test.cc +++ b/absl/hash/hash_test.cc @@ -17,6 +17,7 @@ #include <algorithm> #include <array> #include <bitset> +#include <cstdint> #include <cstring> #include <deque> #include <forward_list> @@ -1241,14 +1242,24 @@ TEST(HashTest, DoesNotUseImplicitConversionsToBool) { TEST(HashOf, MatchesHashForSingleArgument) { std::string s = "forty two"; - int i = 42; double d = 42.0; std::tuple<int, int> t{4, 2}; + int i = 42; + int neg_i = -42; + int16_t i16 = 42; + int16_t neg_i16 = -42; + int8_t i8 = 42; + int8_t neg_i8 = -42; EXPECT_EQ(absl::HashOf(s), absl::Hash<std::string>{}(s)); - EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i)); EXPECT_EQ(absl::HashOf(d), absl::Hash<double>{}(d)); EXPECT_EQ(absl::HashOf(t), (absl::Hash<std::tuple<int, int>>{}(t))); + EXPECT_EQ(absl::HashOf(i), absl::Hash<int>{}(i)); + EXPECT_EQ(absl::HashOf(neg_i), absl::Hash<int>{}(neg_i)); + EXPECT_EQ(absl::HashOf(i16), absl::Hash<int16_t>{}(i16)); + EXPECT_EQ(absl::HashOf(neg_i16), absl::Hash<int16_t>{}(neg_i16)); + EXPECT_EQ(absl::HashOf(i8), absl::Hash<int8_t>{}(i8)); + EXPECT_EQ(absl::HashOf(neg_i8), absl::Hash<int8_t>{}(neg_i8)); } TEST(HashOf, MatchesHashOfTupleForMultipleArguments) { diff --git a/absl/hash/internal/hash.h b/absl/hash/internal/hash.h index ccf4cc1a..eb50e2c7 100644 --- a/absl/hash/internal/hash.h +++ b/absl/hash/internal/hash.h @@ -969,7 +969,8 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> { // The result should be the same as running the whole algorithm, but faster. template <typename T, absl::enable_if_t<IntegralFastPath<T>::value, int> = 0> static size_t hash(T value) { - return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value))); + return static_cast<size_t>( + Mix(Seed(), static_cast<std::make_unsigned_t<T>>(value))); } // Overload of MixingHashState::hash() diff --git a/absl/log/BUILD.bazel b/absl/log/BUILD.bazel index e9e411ff..378e1f3b 100644 --- a/absl/log/BUILD.bazel +++ b/absl/log/BUILD.bazel @@ -546,6 +546,7 @@ cc_test( deps = [ ":check", ":log", + "//absl/base:log_severity", "//absl/base:strerror", "//absl/flags:program_name", "//absl/log/internal:test_helpers", diff --git a/absl/log/CMakeLists.txt b/absl/log/CMakeLists.txt index fb1b59f5..78adbf1d 100644 --- a/absl/log/CMakeLists.txt +++ b/absl/log/CMakeLists.txt @@ -1014,6 +1014,7 @@ absl_cc_test( absl::flags_program_name absl::log absl::log_internal_test_helpers + absl::log_severity absl::strerror absl::strings absl::str_format diff --git a/absl/log/internal/check_op.h b/absl/log/internal/check_op.h index 4907b89b..20b01b5e 100644 --- a/absl/log/internal/check_op.h +++ b/absl/log/internal/check_op.h @@ -371,7 +371,9 @@ inline constexpr unsigned short GetReferenceableValue( // NOLINT return t; } inline constexpr int GetReferenceableValue(int t) { return t; } -inline unsigned int GetReferenceableValue(unsigned int t) { return t; } +inline constexpr unsigned int GetReferenceableValue(unsigned int t) { + return t; +} inline constexpr long GetReferenceableValue(long t) { return t; } // NOLINT inline constexpr unsigned long GetReferenceableValue( // NOLINT unsigned long t) { // NOLINT diff --git a/absl/log/stripping_test.cc b/absl/log/stripping_test.cc index d6a6606e..8b711ec7 100644 --- a/absl/log/stripping_test.cc +++ b/absl/log/stripping_test.cc @@ -49,6 +49,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/strerror.h" +#include "absl/base/log_severity.h" #include "absl/flags/internal/program_name.h" #include "absl/log/check.h" #include "absl/log/internal/test_helpers.h" @@ -57,6 +58,10 @@ #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" +// Set a flag that controls whether we actually execute fatal statements, but +// prevent the compiler from optimizing it out. +static volatile bool kReallyDie = false; + namespace { using ::testing::_; using ::testing::Eq; @@ -304,7 +309,10 @@ TEST_F(StrippingTest, Fatal) { // as would happen if we used a literal. We might (or might not) leave it // lying around later; that's what the tests are for! const std::string needle = absl::Base64Escape("StrippingTest.Fatal"); - EXPECT_DEATH_IF_SUPPORTED(LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA==", ""); + // We don't care if the LOG statement is actually executed, we're just + // checking that it's stripped. + if (kReallyDie) LOG(FATAL) << "U3RyaXBwaW5nVGVzdC5GYXRhbA=="; + std::unique_ptr<FILE, std::function<void(FILE*)>> exe = OpenTestExecutable(); ASSERT_THAT(exe, NotNull()); if (absl::LogSeverity::kFatal >= kAbslMinLogLevel) { diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel index ccaee796..9c89ac49 100644 --- a/absl/synchronization/BUILD.bazel +++ b/absl/synchronization/BUILD.bazel @@ -53,6 +53,7 @@ cc_library( cc_library( name = "kernel_timeout_internal", + srcs = ["internal/kernel_timeout.cc"], hdrs = ["internal/kernel_timeout.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, @@ -60,12 +61,25 @@ cc_library( "//absl/synchronization:__pkg__", ], deps = [ - "//absl/base:core_headers", + "//absl/base:config", "//absl/base:raw_logging_internal", "//absl/time", ], ) +cc_test( + name = "kernel_timeout_internal_test", + srcs = ["internal/kernel_timeout_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":kernel_timeout_internal", + "//absl/base:config", + "//absl/time", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "synchronization", srcs = [ diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt index f64653bb..5eb9b566 100644 --- a/absl/synchronization/CMakeLists.txt +++ b/absl/synchronization/CMakeLists.txt @@ -39,14 +39,30 @@ absl_cc_library( kernel_timeout_internal HDRS "internal/kernel_timeout.h" + SRCS + "internal/kernel_timeout.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS - absl::core_headers + absl::config absl::raw_logging_internal absl::time ) +absl_cc_test( + NAME + kernel_timeout_internal_test + SRCS + "internal/kernel_timeout_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::kernel_timeout_internal + absl::config + absl::time + GTest::gmock_main +) + absl_cc_library( NAME synchronization diff --git a/absl/synchronization/internal/create_thread_identity.cc b/absl/synchronization/internal/create_thread_identity.cc index 44e6129b..f1ddbbb9 100644 --- a/absl/synchronization/internal/create_thread_identity.cc +++ b/absl/synchronization/internal/create_thread_identity.cc @@ -17,6 +17,7 @@ // This file is a no-op if the required LowLevelAlloc support is missing. #include "absl/base/internal/low_level_alloc.h" +#include "absl/synchronization/internal/waiter.h" #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING #include <string.h> @@ -71,6 +72,9 @@ static intptr_t RoundUp(intptr_t addr, intptr_t align) { void OneTimeInitThreadIdentity(base_internal::ThreadIdentity* identity) { PerThreadSem::Init(identity); + identity->ticker.store(0, std::memory_order_relaxed); + identity->wait_start.store(0, std::memory_order_relaxed); + identity->is_idle.store(false, std::memory_order_relaxed); } static void ResetThreadIdentityBetweenReuse( diff --git a/absl/synchronization/internal/kernel_timeout.cc b/absl/synchronization/internal/kernel_timeout.cc new file mode 100644 index 00000000..4015bd0c --- /dev/null +++ b/absl/synchronization/internal/kernel_timeout.cc @@ -0,0 +1,168 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/synchronization/internal/kernel_timeout.h" + +#include <algorithm> +#include <cstdint> +#include <ctime> +#include <limits> + +#include "absl/base/config.h" +#include "absl/time/time.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace synchronization_internal { + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +constexpr uint64_t KernelTimeout::kNoTimeout; +constexpr int64_t KernelTimeout::kMaxNanos; +#endif + +KernelTimeout::KernelTimeout(absl::Time t) { + // `absl::InfiniteFuture()` is a common "no timeout" value and cheaper to + // compare than convert. + if (t == absl::InfiniteFuture()) { + rep_ = kNoTimeout; + return; + } + + int64_t unix_nanos = absl::ToUnixNanos(t); + + // A timeout that lands before the unix epoch is converted to 0. + // In theory implementations should expire these timeouts immediately. + if (unix_nanos < 0) { + unix_nanos = 0; + } + + // Values greater than or equal to kMaxNanos are converted to infinite. + if (unix_nanos >= kMaxNanos) { + rep_ = kNoTimeout; + return; + } + + rep_ = static_cast<uint64_t>(unix_nanos) << 1; +} + +KernelTimeout::KernelTimeout(absl::Duration d) { + // `absl::InfiniteDuration()` is a common "no timeout" value and cheaper to + // compare than convert. + if (d == absl::InfiniteDuration()) { + rep_ = kNoTimeout; + return; + } + + int64_t nanos = absl::ToInt64Nanoseconds(d); + + // Negative durations are normalized to 0. + // In theory implementations should expire these timeouts immediately. + if (nanos < 0) { + nanos = 0; + } + + // Values greater than or equal to kMaxNanos are converted to infinite. + if (nanos >= kMaxNanos) { + rep_ = kNoTimeout; + return; + } + + rep_ = (static_cast<uint64_t>(nanos) << 1) | uint64_t{1}; +} + +int64_t KernelTimeout::MakeAbsNanos() const { + if (!has_timeout()) { + return kMaxNanos; + } + + int64_t nanos = RawNanos(); + + if (is_relative_timeout()) { + int64_t now = ToUnixNanos(absl::Now()); + if (nanos > kMaxNanos - now) { + // Overflow. + nanos = kMaxNanos; + } else { + nanos += now; + } + } else if (nanos == 0) { + // Some callers have assumed that 0 means no timeout, so instead we return a + // time of 1 nanosecond after the epoch. + nanos = 1; + } + + return nanos; +} + +struct timespec KernelTimeout::MakeAbsTimespec() const { + return absl::ToTimespec(absl::Nanoseconds(MakeAbsNanos())); +} + +struct timespec KernelTimeout::MakeRelativeTimespec() const { + if (!has_timeout()) { + return absl::ToTimespec(absl::Nanoseconds(kMaxNanos)); + } + if (is_relative_timeout()) { + return absl::ToTimespec(absl::Nanoseconds(RawNanos())); + } + + int64_t nanos = RawNanos(); + int64_t now = ToUnixNanos(absl::Now()); + if (now > nanos) { + // Convert past values to 0 to be safe. + nanos = 0; + } else { + nanos -= now; + } + return absl::ToTimespec(absl::Nanoseconds(nanos)); +} + +KernelTimeout::DWord KernelTimeout::InMillisecondsFromNow() const { + constexpr DWord kInfinite = std::numeric_limits<DWord>::max(); + + if (!has_timeout()) { + return kInfinite; + } + + const int64_t nanos = RawNanos(); + constexpr uint64_t kNanosInMillis = uint64_t{1000000}; + + if (is_relative_timeout()) { + uint64_t ms = static_cast<uint64_t>(nanos) / kNanosInMillis; + if (ms > static_cast<uint64_t>(kInfinite)) { + ms = static_cast<uint64_t>(kInfinite); + } + return static_cast<DWord>(ms); + } + + int64_t now = ToUnixNanos(absl::Now()); + if (nanos >= now) { + // Round up so that now + ms_from_now >= nanos. + constexpr uint64_t kMaxValueNanos = + std::numeric_limits<int64_t>::max() - kNanosInMillis + 1; + uint64_t ms_from_now = + (std::min(kMaxValueNanos, static_cast<uint64_t>(nanos - now)) + + kNanosInMillis - 1) / + kNanosInMillis; + if (ms_from_now > kInfinite) { + return kInfinite; + } + return static_cast<DWord>(ms_from_now); + } + return DWord{0}; +} + +} // namespace synchronization_internal +ABSL_NAMESPACE_END +} // namespace absl diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h index f5c2c0ef..1f4d82cd 100644 --- a/absl/synchronization/internal/kernel_timeout.h +++ b/absl/synchronization/internal/kernel_timeout.h @@ -11,26 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// - -// An optional absolute timeout, with nanosecond granularity, -// compatible with absl::Time. Suitable for in-register -// parameter-passing (e.g. syscalls.) -// Constructible from a absl::Time (for a timeout to be respected) or {} -// (for "no timeout".) -// This is a private low-level API for use by a handful of low-level -// components. Higher-level components should build APIs based on -// absl::Time and absl::Duration. #ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ #define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ -#include <time.h> - #include <algorithm> #include <cstdint> +#include <ctime> #include <limits> +#include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/time/clock.h" #include "absl/time/time.h" @@ -41,56 +31,59 @@ namespace synchronization_internal { class Waiter; +// An optional timeout, with nanosecond granularity. +// +// This is a private low-level API for use by a handful of low-level +// components. Higher-level components should build APIs based on +// absl::Time and absl::Duration. class KernelTimeout { public: - // A timeout that should expire at <t>. Any value, in the full - // InfinitePast() to InfiniteFuture() range, is valid here and will be - // respected. - explicit KernelTimeout(absl::Time t) : ns_(MakeNs(t)) {} - // No timeout. - KernelTimeout() : ns_(0) {} + // Construct an absolute timeout that should expire at `t`. + explicit KernelTimeout(absl::Time t); + + // Construct a relative timeout that should expire after `d`. + explicit KernelTimeout(absl::Duration d); + + // Infinite timeout. + constexpr KernelTimeout() : rep_(kNoTimeout) {} + + // A more explicit factory for those who prefer it. + // Equivalent to `KernelTimeout()`. + static constexpr KernelTimeout Never() { return KernelTimeout(); } - // A more explicit factory for those who prefer it. Equivalent to {}. - static KernelTimeout Never() { return {}; } + // Returns true if there is a timeout that will eventually expire. + // Returns false if the timeout is infinite. + bool has_timeout() const { return rep_ != kNoTimeout; } - // We explicitly do not support other custom formats: timespec, int64_t nanos. - // Unify on this and absl::Time, please. + // If `has_timeout()` is true, returns true if the timeout was provided as an + // `absl::Time`. The return value is undefined if `has_timeout()` is false + // because all indefinite timeouts are equivalent. + bool is_absolute_timeout() const { return (rep_ & 1) == 0; } - bool has_timeout() const { return ns_ != 0; } + // If `has_timeout()` is true, returns true if the timeout was provided as an + // `absl::Duration`. The return value is undefined if `has_timeout()` is false + // because all indefinite timeouts are equivalent. + bool is_relative_timeout() const { return (rep_ & 1) == 1; } - // Convert to parameter for sem_timedwait/futex/similar. Only for approved - // users. Do not call if !has_timeout. + // Convert to `struct timespec` for interfaces that expect an absolute + // timeout. If !has_timeout() or is_relative_timeout(), attempts to convert to + // a reasonable absolute timeout, but callers should to test has_timeout() and + // is_relative_timeout() and prefer to use a more appropriate interface. struct timespec MakeAbsTimespec() const; - // Convert to unix epoch nanos. Do not call if !has_timeout. + // Convert to `struct timespec` for interfaces that expect a relative + // timeout. If !has_timeout() or is_absolute_timeout(), attempts to convert to + // a reasonable relative timeout, but callers should to test has_timeout() and + // is_absolute_timeout() and prefer to use a more appropriate interface. + struct timespec MakeRelativeTimespec() const; + + // Convert to unix epoch nanos for interfaces that expect an absolute timeout + // in nanoseconds. If !has_timeout() or is_relative_timeout(), attempts to + // convert to a reasonable absolute timeout, but callers should to test + // has_timeout() and is_relative_timeout() and prefer to use a more + // appropriate interface. int64_t MakeAbsNanos() const; - private: - // internal rep, not user visible: ns after unix epoch. - // zero = no timeout. - // Negative we treat as an unlikely (and certainly expired!) but valid - // timeout. - int64_t ns_; - - static int64_t MakeNs(absl::Time t) { - // optimization--InfiniteFuture is common "no timeout" value - // and cheaper to compare than convert. - if (t == absl::InfiniteFuture()) return 0; - int64_t x = ToUnixNanos(t); - - // A timeout that lands exactly on the epoch (x=0) needs to be respected, - // so we alter it unnoticably to 1. Negative timeouts are in - // theory supported, but handled poorly by the kernel (long - // delays) so push them forward too; since all such times have - // already passed, it's indistinguishable. - if (x <= 0) x = 1; - // A time larger than what can be represented to the kernel is treated - // as no timeout. - if (x == (std::numeric_limits<int64_t>::max)()) x = 0; - return x; - } - -#ifdef _WIN32 // Converts to milliseconds from now, or INFINITE when // !has_timeout(). For use by SleepConditionVariableSRW on // Windows. Callers should recognize that the return value is a @@ -100,68 +93,29 @@ class KernelTimeout { // so we define our own DWORD and INFINITE instead of getting them from // <intsafe.h> and <WinBase.h>. typedef unsigned long DWord; // NOLINT - DWord InMillisecondsFromNow() const { - constexpr DWord kInfinite = (std::numeric_limits<DWord>::max)(); - if (!has_timeout()) { - return kInfinite; - } - // The use of absl::Now() to convert from absolute time to - // relative time means that absl::Now() cannot use anything that - // depends on KernelTimeout (for example, Mutex) on Windows. - int64_t now = ToUnixNanos(absl::Now()); - if (ns_ >= now) { - // Round up so that Now() + ms_from_now >= ns_. - constexpr uint64_t max_nanos = - (std::numeric_limits<int64_t>::max)() - 999999u; - uint64_t ms_from_now = - ((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) / - 1000000u; - if (ms_from_now > kInfinite) { - return kInfinite; - } - return static_cast<DWord>(ms_from_now); - } - return 0; - } - - friend class Waiter; -#endif -}; + DWord InMillisecondsFromNow() const; -inline struct timespec KernelTimeout::MakeAbsTimespec() const { - int64_t n = ns_; - static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; - if (n == 0) { - ABSL_RAW_LOG( - ERROR, "Tried to create a timespec from a non-timeout; never do this."); - // But we'll try to continue sanely. no-timeout ~= saturated timeout. - n = (std::numeric_limits<int64_t>::max)(); - } - - // Kernel APIs validate timespecs as being at or after the epoch, - // despite the kernel time type being signed. However, no one can - // tell the difference between a timeout at or before the epoch (since - // all such timeouts have expired!) - if (n < 0) n = 0; - - struct timespec abstime; - int64_t seconds = (std::min)(n / kNanosPerSecond, - int64_t{(std::numeric_limits<time_t>::max)()}); - abstime.tv_sec = static_cast<time_t>(seconds); - abstime.tv_nsec = static_cast<decltype(abstime.tv_nsec)>(n % kNanosPerSecond); - return abstime; -} - -inline int64_t KernelTimeout::MakeAbsNanos() const { - if (ns_ == 0) { - ABSL_RAW_LOG( - ERROR, "Tried to create a timeout from a non-timeout; never do this."); - // But we'll try to continue sanely. no-timeout ~= saturated timeout. - return (std::numeric_limits<int64_t>::max)(); - } - - return ns_; -} + private: + // Internal representation. + // - If the value is kNoTimeout, then the timeout is infinite, and + // has_timeout() will return true. + // - If the low bit is 0, then the high 63 bits is number of nanoseconds + // after the unix epoch. + // - If the low bit is 1, then the high 63 bits is a relative duration in + // nanoseconds. + uint64_t rep_; + + // Returns the number of nanoseconds stored in the internal representation. + // Together with is_absolute_timeout() and is_relative_timeout(), the return + // value is used to compute when the timeout should occur. + int64_t RawNanos() const { return static_cast<int64_t>(rep_ >> 1); } + + // A value that represents no timeout (or an infinite timeout). + static constexpr uint64_t kNoTimeout = (std::numeric_limits<uint64_t>::max)(); + + // The maximum value that can be stored in the high 63 bits. + static constexpr int64_t kMaxNanos = (std::numeric_limits<int64_t>::max)(); +}; } // namespace synchronization_internal ABSL_NAMESPACE_END diff --git a/absl/synchronization/internal/kernel_timeout_test.cc b/absl/synchronization/internal/kernel_timeout_test.cc new file mode 100644 index 00000000..a89ae220 --- /dev/null +++ b/absl/synchronization/internal/kernel_timeout_test.cc @@ -0,0 +1,278 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/synchronization/internal/kernel_timeout.h" + +#include <limits> + +#include "gtest/gtest.h" +#include "absl/base/config.h" +#include "absl/time/clock.h" +#include "absl/time/time.h" + +namespace { + +#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ + defined(ABSL_HAVE_MEMORY_SANITIZER) || \ + defined(ABSL_HAVE_THREAD_SANITIZER) || defined(__ANDROID__) +constexpr absl::Duration kTimingBound = absl::Milliseconds(5); +#else +constexpr absl::Duration kTimingBound = absl::Microseconds(250); +#endif + +using absl::synchronization_internal::KernelTimeout; + +TEST(KernelTimeout, FiniteTimes) { + constexpr absl::Duration kDurationsToTest[] = { + absl::ZeroDuration(), + absl::Nanoseconds(1), + absl::Microseconds(1), + absl::Milliseconds(1), + absl::Seconds(1), + absl::Minutes(1), + absl::Hours(1), + absl::Hours(1000), + -absl::Nanoseconds(1), + -absl::Microseconds(1), + -absl::Milliseconds(1), + -absl::Seconds(1), + -absl::Minutes(1), + -absl::Hours(1), + -absl::Hours(1000), + }; + + for (auto duration : kDurationsToTest) { + const absl::Time now = absl::Now(); + const absl::Time when = now + duration; + SCOPED_TRACE(duration); + KernelTimeout t(when); + EXPECT_TRUE(t.has_timeout()); + EXPECT_TRUE(t.is_absolute_timeout()); + EXPECT_FALSE(t.is_relative_timeout()); + EXPECT_EQ(absl::TimeFromTimespec(t.MakeAbsTimespec()), when); + // MakeRelativeTimespec() doesn't quite round trip when using an absolute + // time, but it should get pretty close. Past times are converted to zero + // durations. + EXPECT_LE( + absl::AbsDuration(absl::DurationFromTimespec(t.MakeRelativeTimespec()) - + std::max(duration, absl::ZeroDuration())), + kTimingBound); + EXPECT_EQ(absl::FromUnixNanos(t.MakeAbsNanos()), when); + EXPECT_LE(absl::AbsDuration(absl::Milliseconds(t.InMillisecondsFromNow()) - + std::max(duration, absl::ZeroDuration())), + absl::Milliseconds(5)); + } +} + +TEST(KernelTimeout, InfiniteFuture) { + KernelTimeout t(absl::InfiniteFuture()); + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, DefaultConstructor) { + // The default constructor is equivalent to absl::InfiniteFuture(). + KernelTimeout t; + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, TimeMaxNanos) { + // Time >= kMaxNanos should behave as no timeout. + KernelTimeout t(absl::FromUnixNanos(std::numeric_limits<int64_t>::max())); + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, Never) { + // KernelTimeout::Never() is equivalent to absl::InfiniteFuture(). + KernelTimeout t = KernelTimeout::Never(); + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, InfinitePast) { + KernelTimeout t(absl::InfinitePast()); + EXPECT_TRUE(t.has_timeout()); + EXPECT_TRUE(t.is_absolute_timeout()); + EXPECT_FALSE(t.is_relative_timeout()); + EXPECT_LE(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::FromUnixNanos(1)); + EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::ZeroDuration()); + EXPECT_LE(absl::FromUnixNanos(t.MakeAbsNanos()), absl::FromUnixNanos(1)); + EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0}); +} + +TEST(KernelTimeout, FiniteDurations) { + constexpr absl::Duration kDurationsToTest[] = { + absl::ZeroDuration(), + absl::Nanoseconds(1), + absl::Microseconds(1), + absl::Milliseconds(1), + absl::Seconds(1), + absl::Minutes(1), + absl::Hours(1), + absl::Hours(1000), + }; + + for (auto duration : kDurationsToTest) { + SCOPED_TRACE(duration); + KernelTimeout t(duration); + EXPECT_TRUE(t.has_timeout()); + EXPECT_FALSE(t.is_absolute_timeout()); + EXPECT_TRUE(t.is_relative_timeout()); + EXPECT_LE(absl::AbsDuration(absl::Now() + duration - + absl::TimeFromTimespec(t.MakeAbsTimespec())), + absl::Milliseconds(5)); + EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()), duration); + EXPECT_LE(absl::AbsDuration(absl::Now() + duration - + absl::FromUnixNanos(t.MakeAbsNanos())), + absl::Milliseconds(5)); + EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration, + absl::Milliseconds(5)); + } +} + +TEST(KernelTimeout, NegativeDurations) { + constexpr absl::Duration kDurationsToTest[] = { + -absl::ZeroDuration(), + -absl::Nanoseconds(1), + -absl::Microseconds(1), + -absl::Milliseconds(1), + -absl::Seconds(1), + -absl::Minutes(1), + -absl::Hours(1), + -absl::Hours(1000), + -absl::InfiniteDuration(), + }; + + for (auto duration : kDurationsToTest) { + // Negative durations should all be converted to zero durations or "now". + SCOPED_TRACE(duration); + KernelTimeout t(duration); + EXPECT_TRUE(t.has_timeout()); + EXPECT_FALSE(t.is_absolute_timeout()); + EXPECT_TRUE(t.is_relative_timeout()); + EXPECT_LE(absl::AbsDuration(absl::Now() - + absl::TimeFromTimespec(t.MakeAbsTimespec())), + absl::Milliseconds(5)); + EXPECT_EQ(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::ZeroDuration()); + EXPECT_LE( + absl::AbsDuration(absl::Now() - absl::FromUnixNanos(t.MakeAbsNanos())), + absl::Milliseconds(5)); + EXPECT_EQ(t.InMillisecondsFromNow(), KernelTimeout::DWord{0}); + } +} + +TEST(KernelTimeout, InfiniteDuration) { + KernelTimeout t(absl::InfiniteDuration()); + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, DurationMaxNanos) { + // Duration >= kMaxNanos should behave as no timeout. + KernelTimeout t(absl::Nanoseconds(std::numeric_limits<int64_t>::max())); + EXPECT_FALSE(t.has_timeout()); + // Callers are expected to check has_timeout() instead of using the methods + // below, but we do try to do something reasonable if they don't. We may not + // be able to round-trip back to absl::InfiniteDuration() or + // absl::InfiniteFuture(), but we should return a very large value. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_EQ(t.InMillisecondsFromNow(), + std::numeric_limits<KernelTimeout::DWord>::max()); +} + +TEST(KernelTimeout, OverflowNanos) { + // Test what happens when KernelTimeout is constructed with an absl::Duration + // that would overflow now_nanos + duration. + int64_t now_nanos = absl::ToUnixNanos(absl::Now()); + int64_t limit = std::numeric_limits<int64_t>::max() - now_nanos; + absl::Duration duration = absl::Nanoseconds(limit) + absl::Seconds(1); + KernelTimeout t(duration); + EXPECT_TRUE(t.has_timeout()); + // Timeouts should still be far in the future. + EXPECT_GT(absl::TimeFromTimespec(t.MakeAbsTimespec()), + absl::Now() + absl::Hours(100000)); + EXPECT_GT(absl::DurationFromTimespec(t.MakeRelativeTimespec()), + absl::Hours(100000)); + EXPECT_GT(absl::FromUnixNanos(t.MakeAbsNanos()), + absl::Now() + absl::Hours(100000)); + EXPECT_LE(absl::Milliseconds(t.InMillisecondsFromNow()) - duration, + absl::Milliseconds(5)); +} + +} // namespace diff --git a/absl/synchronization/internal/per_thread_sem.cc b/absl/synchronization/internal/per_thread_sem.cc index 469e8f32..c9b8dc1e 100644 --- a/absl/synchronization/internal/per_thread_sem.cc +++ b/absl/synchronization/internal/per_thread_sem.cc @@ -40,13 +40,6 @@ std::atomic<int> *PerThreadSem::GetThreadBlockedCounter() { return identity->blocked_count_ptr; } -void PerThreadSem::Init(base_internal::ThreadIdentity *identity) { - new (Waiter::GetWaiter(identity)) Waiter(); - identity->ticker.store(0, std::memory_order_relaxed); - identity->wait_start.store(0, std::memory_order_relaxed); - identity->is_idle.store(false, std::memory_order_relaxed); -} - void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) { const int ticker = identity->ticker.fetch_add(1, std::memory_order_relaxed) + 1; @@ -54,7 +47,7 @@ void PerThreadSem::Tick(base_internal::ThreadIdentity *identity) { const bool is_idle = identity->is_idle.load(std::memory_order_relaxed); if (wait_start && (ticker - wait_start > Waiter::kIdlePeriods) && !is_idle) { // Wakeup the waiting thread since it is time for it to become idle. - Waiter::GetWaiter(identity)->Poke(); + ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)(identity); } } @@ -64,11 +57,22 @@ ABSL_NAMESPACE_END extern "C" { +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)( + absl::base_internal::ThreadIdentity *identity) { + new (absl::synchronization_internal::Waiter::GetWaiter(identity)) + absl::synchronization_internal::Waiter(); +} + ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)( absl::base_internal::ThreadIdentity *identity) { absl::synchronization_internal::Waiter::GetWaiter(identity)->Post(); } +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)( + absl::base_internal::ThreadIdentity *identity) { + absl::synchronization_internal::Waiter::GetWaiter(identity)->Poke(); +} + ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)( absl::synchronization_internal::KernelTimeout t) { bool timeout = false; diff --git a/absl/synchronization/internal/per_thread_sem.h b/absl/synchronization/internal/per_thread_sem.h index 90a88809..144ab3cd 100644 --- a/absl/synchronization/internal/per_thread_sem.h +++ b/absl/synchronization/internal/per_thread_sem.h @@ -64,7 +64,7 @@ class PerThreadSem { private: // Create the PerThreadSem associated with "identity". Initializes count=0. // REQUIRES: May only be called by ThreadIdentity. - static void Init(base_internal::ThreadIdentity* identity); + static inline void Init(base_internal::ThreadIdentity* identity); // Increments "identity"'s count. static inline void Post(base_internal::ThreadIdentity* identity); @@ -91,12 +91,21 @@ ABSL_NAMESPACE_END // By changing our extension points to be extern "C", we dodge this // check. extern "C" { +void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)( + absl::base_internal::ThreadIdentity* identity); void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)( absl::base_internal::ThreadIdentity* identity); bool ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemWait)( absl::synchronization_internal::KernelTimeout t); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPoke)( + absl::base_internal::ThreadIdentity* identity); } // extern "C" +void absl::synchronization_internal::PerThreadSem::Init( + absl::base_internal::ThreadIdentity* identity) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemInit)(identity); +} + void absl::synchronization_internal::PerThreadSem::Post( absl::base_internal::ThreadIdentity* identity) { ABSL_INTERNAL_C_SYMBOL(AbslInternalPerThreadSemPost)(identity); diff --git a/absl/types/compare.h b/absl/types/compare.h index 78aa26de..1a965e97 100644 --- a/absl/types/compare.h +++ b/absl/types/compare.h @@ -44,29 +44,28 @@ namespace compare_internal { using value_type = int8_t; -template <typename T> -struct Fail { - static_assert(sizeof(T) < 0, "Only literal `0` is allowed."); -}; +class OnlyLiteralZero { + // A private type which cannot be named to explicitly cast to it. + struct MatchLiteralZero; -// We need the NullPtrT template to avoid triggering the modernize-use-nullptr -// ClangTidy warning in user code. -template <typename NullPtrT = std::nullptr_t> -struct OnlyLiteralZero { - constexpr OnlyLiteralZero(NullPtrT) noexcept {} // NOLINT + public: + // Accept only literal zero since it can be implicitly converted to a pointer + // type. nullptr constants will be caught by the other constructor which + // accepts a nullptr_t. + constexpr OnlyLiteralZero(MatchLiteralZero *) noexcept {} // NOLINT // Fails compilation when `nullptr` or integral type arguments other than // `int` are passed. This constructor doesn't accept `int` because literal `0` // has type `int`. Literal `0` arguments will be implicitly converted to // `std::nullptr_t` and accepted by the above constructor, while other `int` // arguments will fail to be converted and cause compilation failure. - template < - typename T, - typename = typename std::enable_if< - std::is_same<T, std::nullptr_t>::value || - (std::is_integral<T>::value && !std::is_same<T, int>::value)>::type, - typename = typename Fail<T>::type> - OnlyLiteralZero(T); // NOLINT + template <typename T, typename = typename std::enable_if< + std::is_same<T, std::nullptr_t>::value || + (std::is_integral<T>::value && + !std::is_same<T, int>::value)>::type> + OnlyLiteralZero(T) { // NOLINT + static_assert(sizeof(T) < 0, "Only literal `0` is allowed."); + } }; enum class eq : value_type { @@ -163,18 +162,18 @@ class weak_equality // Comparisons friend constexpr bool operator==( - weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_equality v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ == 0; } friend constexpr bool operator!=( - weak_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_equality v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ != 0; } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, weak_equality v) noexcept { return 0 == v.value_; } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, weak_equality v) noexcept { return 0 != v.value_; } @@ -214,18 +213,18 @@ class strong_equality } // Comparisons friend constexpr bool operator==( - strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_equality v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ == 0; } friend constexpr bool operator!=( - strong_equality v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_equality v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ != 0; } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, strong_equality v) noexcept { return 0 == v.value_; } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, strong_equality v) noexcept { return 0 != v.value_; } @@ -277,50 +276,50 @@ class partial_ordering } // Comparisons friend constexpr bool operator==( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.is_ordered() && v.value_ == 0; } friend constexpr bool operator!=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return !v.is_ordered() || v.value_ != 0; } friend constexpr bool operator<( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.is_ordered() && v.value_ < 0; } friend constexpr bool operator<=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.is_ordered() && v.value_ <= 0; } friend constexpr bool operator>( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.is_ordered() && v.value_ > 0; } friend constexpr bool operator>=( - partial_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + partial_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.is_ordered() && v.value_ >= 0; } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return v.is_ordered() && 0 == v.value_; } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return !v.is_ordered() || 0 != v.value_; } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return v.is_ordered() && 0 < v.value_; } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return v.is_ordered() && 0 <= v.value_; } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return v.is_ordered() && 0 > v.value_; } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, partial_ordering v) noexcept { return v.is_ordered() && 0 >= v.value_; } @@ -369,50 +368,50 @@ class weak_ordering } // Comparisons friend constexpr bool operator==( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ == 0; } friend constexpr bool operator!=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ != 0; } friend constexpr bool operator<( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ < 0; } friend constexpr bool operator<=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ <= 0; } friend constexpr bool operator>( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ > 0; } friend constexpr bool operator>=( - weak_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + weak_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ >= 0; } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 == v.value_; } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 != v.value_; } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 < v.value_; } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 <= v.value_; } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 > v.value_; } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, weak_ordering v) noexcept { return 0 >= v.value_; } @@ -468,50 +467,50 @@ class strong_ordering } // Comparisons friend constexpr bool operator==( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ == 0; } friend constexpr bool operator!=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ != 0; } friend constexpr bool operator<( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ < 0; } friend constexpr bool operator<=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ <= 0; } friend constexpr bool operator>( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ > 0; } friend constexpr bool operator>=( - strong_ordering v, compare_internal::OnlyLiteralZero<>) noexcept { + strong_ordering v, compare_internal::OnlyLiteralZero) noexcept { return v.value_ >= 0; } - friend constexpr bool operator==(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator==(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 == v.value_; } - friend constexpr bool operator!=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator!=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 != v.value_; } - friend constexpr bool operator<(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 < v.value_; } - friend constexpr bool operator<=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator<=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 <= v.value_; } - friend constexpr bool operator>(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 > v.value_; } - friend constexpr bool operator>=(compare_internal::OnlyLiteralZero<>, + friend constexpr bool operator>=(compare_internal::OnlyLiteralZero, strong_ordering v) noexcept { return 0 >= v.value_; } |