diff options
Diffstat (limited to 'absl/synchronization')
-rw-r--r-- | absl/synchronization/BUILD.bazel | 16 | ||||
-rw-r--r-- | absl/synchronization/CMakeLists.txt | 17 | ||||
-rw-r--r-- | absl/synchronization/internal/futex.h | 37 | ||||
-rw-r--r-- | absl/synchronization/internal/graphcycles.cc | 68 | ||||
-rw-r--r-- | absl/synchronization/internal/kernel_timeout.h | 30 | ||||
-rw-r--r-- | absl/synchronization/internal/thread_pool.h | 9 | ||||
-rw-r--r-- | absl/synchronization/lifetime_test.cc | 6 | ||||
-rw-r--r-- | absl/synchronization/mutex.cc | 159 | ||||
-rw-r--r-- | absl/synchronization/mutex.h | 131 | ||||
-rw-r--r-- | absl/synchronization/mutex_method_pointer_test.cc | 138 | ||||
-rw-r--r-- | absl/synchronization/mutex_test.cc | 20 | ||||
-rw-r--r-- | absl/synchronization/notification.cc | 1 | ||||
-rw-r--r-- | absl/synchronization/notification.h | 1 |
13 files changed, 456 insertions, 177 deletions
diff --git a/absl/synchronization/BUILD.bazel b/absl/synchronization/BUILD.bazel index 64d3b929..ccaee796 100644 --- a/absl/synchronization/BUILD.bazel +++ b/absl/synchronization/BUILD.bazel @@ -200,6 +200,7 @@ cc_library( deps = [ ":synchronization", "//absl/base:core_headers", + "//absl/functional:any_invocable", ], ) @@ -223,6 +224,18 @@ cc_test( ], ) +cc_test( + name = "mutex_method_pointer_test", + srcs = ["mutex_method_pointer_test.cc"], + copts = ABSL_TEST_COPTS, + linkopts = ABSL_DEFAULT_LINKOPTS, + deps = [ + ":synchronization", + "//absl/base:config", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "mutex_benchmark_common", testonly = 1, @@ -259,6 +272,7 @@ cc_test( srcs = ["notification_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, + tags = ["no_test_lexan"], deps = [ ":synchronization", "//absl/time", @@ -285,7 +299,7 @@ cc_library( cc_test( name = "per_thread_sem_test", - size = "medium", + size = "large", copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ diff --git a/absl/synchronization/CMakeLists.txt b/absl/synchronization/CMakeLists.txt index 9335c264..f64653bb 100644 --- a/absl/synchronization/CMakeLists.txt +++ b/absl/synchronization/CMakeLists.txt @@ -136,8 +136,9 @@ absl_cc_library( COPTS ${ABSL_DEFAULT_COPTS} DEPS - absl::synchronization + absl::any_invocable absl::core_headers + absl::synchronization TESTONLY ) @@ -162,6 +163,20 @@ absl_cc_test( absl_cc_test( NAME + mutex_method_pointer_test + SRCS + "mutex_method_pointer_test.cc" + COPTS + ${ABSL_TEST_COPTS} + DEPS + absl::base + absl::config + absl::synchronization + GTest::gmock_main +) + +absl_cc_test( + NAME notification_test SRCS "notification_test.cc" diff --git a/absl/synchronization/internal/futex.h b/absl/synchronization/internal/futex.h index 06fbd6d0..cb97da09 100644 --- a/absl/synchronization/internal/futex.h +++ b/absl/synchronization/internal/futex.h @@ -87,7 +87,7 @@ class FutexImpl { public: static int WaitUntil(std::atomic<int32_t> *v, int32_t val, KernelTimeout t) { - int err = 0; + long err = 0; // NOLINT(runtime/int) if (t.has_timeout()) { // https://locklessinc.com/articles/futex_cheat_sheet/ // Unlike FUTEX_WAIT, FUTEX_WAIT_BITSET uses absolute time. @@ -105,41 +105,44 @@ class FutexImpl { FUTEX_WAIT | FUTEX_PRIVATE_FLAG, val, nullptr); } if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int WaitBitsetAbsoluteTimeout(std::atomic<int32_t> *v, int32_t val, int32_t bits, const struct timespec *abstime) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG, val, abstime, + nullptr, bits); if (ABSL_PREDICT_FALSE(err != 0)) { - err = -errno; + return -errno; } - return err; + return 0; } static int Wake(std::atomic<int32_t> *v, int32_t count) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAKE | FUTEX_PRIVATE_FLAG, count); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } // FUTEX_WAKE_BITSET static int WakeBitset(std::atomic<int32_t> *v, int32_t count, int32_t bits) { - int err = syscall(SYS_futex, reinterpret_cast<int32_t *>(v), - FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, - nullptr, bits); + // NOLINTNEXTLINE(runtime/int) + long err = syscall(SYS_futex, reinterpret_cast<int32_t*>(v), + FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG, count, nullptr, + nullptr, bits); if (ABSL_PREDICT_FALSE(err < 0)) { - err = -errno; + return -errno; } - return err; + return 0; } }; diff --git a/absl/synchronization/internal/graphcycles.cc b/absl/synchronization/internal/graphcycles.cc index 27fec216..feec4581 100644 --- a/absl/synchronization/internal/graphcycles.cc +++ b/absl/synchronization/internal/graphcycles.cc @@ -181,9 +181,9 @@ class NodeSet { return true; } - void erase(uint32_t v) { + void erase(int32_t v) { uint32_t i = FindIndex(v); - if (static_cast<uint32_t>(table_[i]) == v) { + if (table_[i] == v) { table_[i] = kDel; } } @@ -195,7 +195,7 @@ class NodeSet { for (int32_t elem, _cursor = 0; (eset).Next(&_cursor, &elem); ) bool Next(int32_t* cursor, int32_t* elem) { while (static_cast<uint32_t>(*cursor) < table_.size()) { - int32_t v = table_[*cursor]; + int32_t v = table_[static_cast<uint32_t>(*cursor)]; (*cursor)++; if (v >= 0) { *elem = v; @@ -210,24 +210,26 @@ class NodeSet { Vec<int32_t> table_; uint32_t occupied_; // Count of non-empty slots (includes deleted slots) - static uint32_t Hash(uint32_t a) { return a * 41; } + static uint32_t Hash(int32_t a) { return static_cast<uint32_t>(a * 41); } // Return index for storing v. May return an empty index or deleted index - int FindIndex(int32_t v) const { + uint32_t FindIndex(int32_t v) const { // Search starting at hash index. const uint32_t mask = table_.size() - 1; uint32_t i = Hash(v) & mask; - int deleted_index = -1; // If >= 0, index of first deleted element we see + uint32_t deleted_index = 0; // index of first deleted element we see + bool seen_deleted_element = false; while (true) { int32_t e = table_[i]; if (v == e) { return i; } else if (e == kEmpty) { // Return any previously encountered deleted slot. - return (deleted_index >= 0) ? deleted_index : i; - } else if (e == kDel && deleted_index < 0) { + return seen_deleted_element ? deleted_index : i; + } else if (e == kDel && !seen_deleted_element) { // Keep searching since v might be present later. deleted_index = i; + seen_deleted_element = true; } i = (i + 1) & mask; // Linear probing; quadratic is slightly slower. } @@ -268,7 +270,7 @@ inline GraphId MakeId(int32_t index, uint32_t version) { } inline int32_t NodeIndex(GraphId id) { - return static_cast<uint32_t>(id.handle & 0xfffffffful); + return static_cast<int32_t>(id.handle); } inline uint32_t NodeVersion(GraphId id) { @@ -298,7 +300,7 @@ class PointerMap { int32_t Find(void* ptr) { auto masked = base_internal::HidePtr(ptr); for (int32_t i = table_[Hash(ptr)]; i != -1;) { - Node* n = (*nodes_)[i]; + Node* n = (*nodes_)[static_cast<uint32_t>(i)]; if (n->masked_ptr == masked) return i; i = n->next_hash; } @@ -307,7 +309,7 @@ class PointerMap { void Add(void* ptr, int32_t i) { int32_t* head = &table_[Hash(ptr)]; - (*nodes_)[i]->next_hash = *head; + (*nodes_)[static_cast<uint32_t>(i)]->next_hash = *head; *head = i; } @@ -317,7 +319,7 @@ class PointerMap { auto masked = base_internal::HidePtr(ptr); for (int32_t* slot = &table_[Hash(ptr)]; *slot != -1; ) { int32_t index = *slot; - Node* n = (*nodes_)[index]; + Node* n = (*nodes_)[static_cast<uint32_t>(index)]; if (n->masked_ptr == masked) { *slot = n->next_hash; // Remove n from linked list n->next_hash = -1; @@ -358,7 +360,7 @@ struct GraphCycles::Rep { }; static Node* FindNode(GraphCycles::Rep* rep, GraphId id) { - Node* n = rep->nodes_[NodeIndex(id)]; + Node* n = rep->nodes_[static_cast<uint32_t>(NodeIndex(id))]; return (n->version == NodeVersion(id)) ? n : nullptr; } @@ -393,7 +395,7 @@ bool GraphCycles::CheckInvariants() const { ABSL_RAW_LOG(FATAL, "Duplicate occurrence of rank %d", nx->rank); } HASH_FOR_EACH(y, nx->out) { - Node* ny = r->nodes_[y]; + Node* ny = r->nodes_[static_cast<uint32_t>(y)]; if (nx->rank >= ny->rank) { ABSL_RAW_LOG(FATAL, "Edge %u->%d has bad rank assignment %d->%d", x, y, nx->rank, ny->rank); @@ -406,14 +408,14 @@ bool GraphCycles::CheckInvariants() const { GraphId GraphCycles::GetId(void* ptr) { int32_t i = rep_->ptrmap_.Find(ptr); if (i != -1) { - return MakeId(i, rep_->nodes_[i]->version); + return MakeId(i, rep_->nodes_[static_cast<uint32_t>(i)]->version); } else if (rep_->free_nodes_.empty()) { Node* n = new (base_internal::LowLevelAlloc::AllocWithArena(sizeof(Node), arena)) Node; n->version = 1; // Avoid 0 since it is used by InvalidGraphId() n->visited = false; - n->rank = rep_->nodes_.size(); + n->rank = static_cast<int32_t>(rep_->nodes_.size()); n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; @@ -425,7 +427,7 @@ GraphId GraphCycles::GetId(void* ptr) { // a permutation of [0,rep_->nodes_.size()-1]. int32_t r = rep_->free_nodes_.back(); rep_->free_nodes_.pop_back(); - Node* n = rep_->nodes_[r]; + Node* n = rep_->nodes_[static_cast<uint32_t>(r)]; n->masked_ptr = base_internal::HidePtr(ptr); n->nstack = 0; n->priority = 0; @@ -439,12 +441,12 @@ void GraphCycles::RemoveNode(void* ptr) { if (i == -1) { return; } - Node* x = rep_->nodes_[i]; + Node* x = rep_->nodes_[static_cast<uint32_t>(i)]; HASH_FOR_EACH(y, x->out) { - rep_->nodes_[y]->in.erase(i); + rep_->nodes_[static_cast<uint32_t>(y)]->in.erase(i); } HASH_FOR_EACH(y, x->in) { - rep_->nodes_[y]->out.erase(i); + rep_->nodes_[static_cast<uint32_t>(y)]->out.erase(i); } x->in.clear(); x->out.clear(); @@ -520,7 +522,7 @@ bool GraphCycles::InsertEdge(GraphId idx, GraphId idy) { // Since we do not call Reorder() on this path, clear any visited // markers left by ForwardDFS. for (const auto& d : r->deltaf_) { - r->nodes_[d]->visited = false; + r->nodes_[static_cast<uint32_t>(d)]->visited = false; } return false; } @@ -538,14 +540,14 @@ static bool ForwardDFS(GraphCycles::Rep* r, int32_t n, int32_t upper_bound) { while (!r->stack_.empty()) { n = r->stack_.back(); r->stack_.pop_back(); - Node* nn = r->nodes_[n]; + Node* nn = r->nodes_[static_cast<uint32_t>(n)]; if (nn->visited) continue; nn->visited = true; r->deltaf_.push_back(n); HASH_FOR_EACH(w, nn->out) { - Node* nw = r->nodes_[w]; + Node* nw = r->nodes_[static_cast<uint32_t>(w)]; if (nw->rank == upper_bound) { return false; // Cycle } @@ -564,14 +566,14 @@ static void BackwardDFS(GraphCycles::Rep* r, int32_t n, int32_t lower_bound) { while (!r->stack_.empty()) { n = r->stack_.back(); r->stack_.pop_back(); - Node* nn = r->nodes_[n]; + Node* nn = r->nodes_[static_cast<uint32_t>(n)]; if (nn->visited) continue; nn->visited = true; r->deltab_.push_back(n); HASH_FOR_EACH(w, nn->in) { - Node* nw = r->nodes_[w]; + Node* nw = r->nodes_[static_cast<uint32_t>(w)]; if (!nw->visited && lower_bound < nw->rank) { r->stack_.push_back(w); } @@ -596,7 +598,7 @@ static void Reorder(GraphCycles::Rep* r) { // Assign the ranks in order to the collected list. for (uint32_t i = 0; i < r->list_.size(); i++) { - r->nodes_[r->list_[i]]->rank = r->merged_[i]; + r->nodes_[static_cast<uint32_t>(r->list_[i])]->rank = r->merged_[i]; } } @@ -604,7 +606,8 @@ static void Sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) { struct ByRank { const Vec<Node*>* nodes; bool operator()(int32_t a, int32_t b) const { - return (*nodes)[a]->rank < (*nodes)[b]->rank; + return (*nodes)[static_cast<uint32_t>(a)]->rank < + (*nodes)[static_cast<uint32_t>(b)]->rank; } }; ByRank cmp; @@ -616,8 +619,10 @@ static void MoveToList( GraphCycles::Rep* r, Vec<int32_t>* src, Vec<int32_t>* dst) { for (auto& v : *src) { int32_t w = v; - v = r->nodes_[w]->rank; // Replace v entry with its rank - r->nodes_[w]->visited = false; // Prepare for future DFS calls + // Replace v entry with its rank + v = r->nodes_[static_cast<uint32_t>(w)]->rank; + // Prepare for future DFS calls + r->nodes_[static_cast<uint32_t>(w)]->visited = false; dst->push_back(w); } } @@ -647,7 +652,8 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len, } if (path_len < max_path_len) { - path[path_len] = MakeId(n, rep_->nodes_[n]->version); + path[path_len] = + MakeId(n, rep_->nodes_[static_cast<uint32_t>(n)]->version); } path_len++; r->stack_.push_back(-1); // Will remove tentative path entry @@ -656,7 +662,7 @@ int GraphCycles::FindPath(GraphId idx, GraphId idy, int max_path_len, return path_len; } - HASH_FOR_EACH(w, r->nodes_[n]->out) { + HASH_FOR_EACH(w, r->nodes_[static_cast<uint32_t>(n)]->out) { if (seen.insert(w)) { r->stack_.push_back(w); } diff --git a/absl/synchronization/internal/kernel_timeout.h b/absl/synchronization/internal/kernel_timeout.h index bbd4d2d7..f5c2c0ef 100644 --- a/absl/synchronization/internal/kernel_timeout.h +++ b/absl/synchronization/internal/kernel_timeout.h @@ -19,8 +19,8 @@ // Constructible from a absl::Time (for a timeout to be respected) or {} // (for "no timeout".) // This is a private low-level API for use by a handful of low-level -// components that are friends of this class. Higher-level components -// should build APIs based on absl::Time and absl::Duration. +// components. Higher-level components should build APIs based on +// absl::Time and absl::Duration. #ifndef ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ #define ABSL_SYNCHRONIZATION_INTERNAL_KERNEL_TIMEOUT_H_ @@ -28,6 +28,7 @@ #include <time.h> #include <algorithm> +#include <cstdint> #include <limits> #include "absl/base/internal/raw_logging.h" @@ -38,7 +39,6 @@ namespace absl { ABSL_NAMESPACE_BEGIN namespace synchronization_internal { -class Futex; class Waiter; class KernelTimeout { @@ -60,7 +60,10 @@ class KernelTimeout { // Convert to parameter for sem_timedwait/futex/similar. Only for approved // users. Do not call if !has_timeout. - struct timespec MakeAbsTimespec(); + struct timespec MakeAbsTimespec() const; + + // Convert to unix epoch nanos. Do not call if !has_timeout. + int64_t MakeAbsNanos() const; private: // internal rep, not user visible: ns after unix epoch. @@ -111,7 +114,8 @@ class KernelTimeout { constexpr uint64_t max_nanos = (std::numeric_limits<int64_t>::max)() - 999999u; uint64_t ms_from_now = - (std::min<uint64_t>(max_nanos, ns_ - now) + 999999u) / 1000000u; + ((std::min)(max_nanos, static_cast<uint64_t>(ns_ - now)) + 999999u) / + 1000000u; if (ms_from_now > kInfinite) { return kInfinite; } @@ -119,13 +123,12 @@ class KernelTimeout { } return 0; } -#endif - friend class Futex; friend class Waiter; +#endif }; -inline struct timespec KernelTimeout::MakeAbsTimespec() { +inline struct timespec KernelTimeout::MakeAbsTimespec() const { int64_t n = ns_; static const int64_t kNanosPerSecond = 1000 * 1000 * 1000; if (n == 0) { @@ -149,6 +152,17 @@ inline struct timespec KernelTimeout::MakeAbsTimespec() { return abstime; } +inline int64_t KernelTimeout::MakeAbsNanos() const { + if (ns_ == 0) { + ABSL_RAW_LOG( + ERROR, "Tried to create a timeout from a non-timeout; never do this."); + // But we'll try to continue sanely. no-timeout ~= saturated timeout. + return (std::numeric_limits<int64_t>::max)(); + } + + return ns_; +} + } // namespace synchronization_internal ABSL_NAMESPACE_END } // namespace absl diff --git a/absl/synchronization/internal/thread_pool.h b/absl/synchronization/internal/thread_pool.h index 0cb96dac..5eb0bb60 100644 --- a/absl/synchronization/internal/thread_pool.h +++ b/absl/synchronization/internal/thread_pool.h @@ -20,9 +20,11 @@ #include <functional> #include <queue> #include <thread> // NOLINT(build/c++11) +#include <utility> #include <vector> #include "absl/base/thread_annotations.h" +#include "absl/functional/any_invocable.h" #include "absl/synchronization/mutex.h" namespace absl { @@ -33,6 +35,7 @@ namespace synchronization_internal { class ThreadPool { public: explicit ThreadPool(int num_threads) { + threads_.reserve(num_threads); for (int i = 0; i < num_threads; ++i) { threads_.push_back(std::thread(&ThreadPool::WorkLoop, this)); } @@ -54,7 +57,7 @@ class ThreadPool { } // Schedule a function to be run on a ThreadPool thread immediately. - void Schedule(std::function<void()> func) { + void Schedule(absl::AnyInvocable<void()> func) { assert(func != nullptr); absl::MutexLock l(&mu_); queue_.push(std::move(func)); @@ -67,7 +70,7 @@ class ThreadPool { void WorkLoop() { while (true) { - std::function<void()> func; + absl::AnyInvocable<void()> func; { absl::MutexLock l(&mu_); mu_.Await(absl::Condition(this, &ThreadPool::WorkAvailable)); @@ -82,7 +85,7 @@ class ThreadPool { } absl::Mutex mu_; - std::queue<std::function<void()>> queue_ ABSL_GUARDED_BY(mu_); + std::queue<absl::AnyInvocable<void()>> queue_ ABSL_GUARDED_BY(mu_); std::vector<std::thread> threads_; }; diff --git a/absl/synchronization/lifetime_test.cc b/absl/synchronization/lifetime_test.cc index cc973a32..e6274232 100644 --- a/absl/synchronization/lifetime_test.cc +++ b/absl/synchronization/lifetime_test.cc @@ -123,10 +123,10 @@ class OnDestruction { }; // These tests require that the compiler correctly supports C++11 constant -// initialization... but MSVC has a known regression since v19.10: +// initialization... but MSVC has a known regression since v19.10 till v19.25: // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html -// TODO(epastor): Limit the affected range once MSVC fixes this bug. -#if defined(__clang__) || !(defined(_MSC_VER) && _MSC_VER > 1900) +#if defined(__clang__) || \ + !(defined(_MSC_VER) && _MSC_VER > 1900 && _MSC_VER < 1925) // kConstInit // Test early usage. (Declaration comes first; definitions must appear after // the test runner.) diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 52e2455d..064ccb74 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -36,6 +36,9 @@ #include <algorithm> #include <atomic> #include <cinttypes> +#include <cstddef> +#include <cstring> +#include <iterator> #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" @@ -51,6 +54,7 @@ #include "absl/base/internal/sysinfo.h" #include "absl/base/internal/thread_identity.h" #include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" @@ -134,25 +138,42 @@ enum DelayMode { AGGRESSIVE, GENTLE }; struct ABSL_CACHELINE_ALIGNED MutexGlobals { absl::once_flag once; int spinloop_iterations = 0; - int32_t mutex_sleep_limit[2] = {}; + int32_t mutex_sleep_spins[2] = {}; + absl::Duration mutex_sleep_time; }; +absl::Duration MeasureTimeToYield() { + absl::Time before = absl::Now(); + ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)(); + return absl::Now() - before; +} + const MutexGlobals &GetMutexGlobals() { ABSL_CONST_INIT static MutexGlobals data; absl::base_internal::LowLevelCallOnce(&data.once, [&]() { const int num_cpus = absl::base_internal::NumCPUs(); data.spinloop_iterations = num_cpus > 1 ? 1500 : 0; - // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is + // If this a uniprocessor, only yield/sleep. + // Real-time threads are often unable to yield, so the sleep time needs + // to be long enough to keep the calling thread asleep until scheduling + // happens. + // If this is multiprocessor, allow spinning. If the mode is // aggressive then spin many times before yielding. If the mode is // gentle then spin only a few times before yielding. Aggressive spinning // is used to ensure that an Unlock() call, which must get the spin lock // for any thread to make progress gets it without undue delay. if (num_cpus > 1) { - data.mutex_sleep_limit[AGGRESSIVE] = 5000; - data.mutex_sleep_limit[GENTLE] = 250; + data.mutex_sleep_spins[AGGRESSIVE] = 5000; + data.mutex_sleep_spins[GENTLE] = 250; + data.mutex_sleep_time = absl::Microseconds(10); } else { - data.mutex_sleep_limit[AGGRESSIVE] = 0; - data.mutex_sleep_limit[GENTLE] = 0; + data.mutex_sleep_spins[AGGRESSIVE] = 0; + data.mutex_sleep_spins[GENTLE] = 0; + data.mutex_sleep_time = MeasureTimeToYield() * 5; + data.mutex_sleep_time = + std::min(data.mutex_sleep_time, absl::Milliseconds(1)); + data.mutex_sleep_time = + std::max(data.mutex_sleep_time, absl::Microseconds(10)); } }); return data; @@ -163,7 +184,8 @@ namespace synchronization_internal { // Returns the Mutex delay on iteration `c` depending on the given `mode`. // The returned value should be used as `c` for the next call to `MutexDelay`. int MutexDelay(int32_t c, int mode) { - const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode]; + const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode]; + const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time; if (c < limit) { // Spin. c++; @@ -176,7 +198,7 @@ int MutexDelay(int32_t c, int mode) { c++; } else { // Then wait. - absl::SleepFor(absl::Microseconds(10)); + absl::SleepFor(sleep_time); c = 0; } ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0); @@ -325,7 +347,7 @@ static struct SynchEvent { // this is a trivial hash table for the events static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, const char *name, intptr_t bits, intptr_t lockbit) { - uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; SynchEvent *e; // first look for existing SynchEvent struct.. synch_event_mu.Lock(); @@ -378,7 +400,7 @@ static void UnrefSynchEvent(SynchEvent *e) { // is clear before doing so). static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, intptr_t lockbit) { - uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; SynchEvent **pe; SynchEvent *e; synch_event_mu.Lock(); @@ -402,7 +424,7 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is // called. static SynchEvent *GetSynchEvent(const void *addr) { - uint32_t h = reinterpret_cast<intptr_t>(addr) % kNSynchEvent; + uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; SynchEvent *e; synch_event_mu.Lock(); for (e = synch_event[h]; @@ -430,7 +452,13 @@ static void PostSynchEvent(void *obj, int ev) { char buffer[ABSL_ARRAYSIZE(pcs) * 24]; int pos = snprintf(buffer, sizeof (buffer), " @"); for (int i = 0; i != n; i++) { - pos += snprintf(&buffer[pos], sizeof (buffer) - pos, " %p", pcs[i]); + int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos), + " %p", pcs[i]); + if (b < 0 || + static_cast<size_t>(b) >= sizeof(buffer) - static_cast<size_t>(pos)) { + break; + } + pos += b; } ABSL_RAW_LOG(INFO, "%s%p %s %s", event_properties[ev].msg, obj, (e == nullptr ? "" : e->name), buffer); @@ -486,7 +514,8 @@ struct SynchWaitParams { cvmu(cvmu_arg), thread(thread_arg), cv_word(cv_word_arg), - contention_start_cycles(base_internal::CycleClock::Now()) {} + contention_start_cycles(base_internal::CycleClock::Now()), + should_submit_contention_data(false) {} const Mutex::MuHow how; // How this thread needs to wait. const Condition *cond; // The condition that this thread is waiting for. @@ -504,6 +533,7 @@ struct SynchWaitParams { int64_t contention_start_cycles; // Time (in cycles) when this thread started // to contend for the mutex. + bool should_submit_contention_data; }; struct SynchLocksHeld { @@ -562,10 +592,15 @@ static SynchLocksHeld *Synch_GetAllLocks() { void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); - } - PerThreadSem::Post(w->thread_identity()); - if (mu) { + // We miss synchronization around passing PerThreadSynch between threads + // since it happens inside of the Mutex code, so we need to ignore all + // accesses to the object. + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN(); + PerThreadSem::Post(w->thread_identity()); + ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END(); ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0); + } else { + PerThreadSem::Post(w->thread_identity()); } } @@ -1120,7 +1155,7 @@ void Mutex::TryRemove(PerThreadSynch *s) { // if the wait extends past the absolute time specified, even if "s" is still // on the mutex queue. In this case, remove "s" from the queue and return // true, otherwise return false. -ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) { +void Mutex::Block(PerThreadSynch *s) { while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { if (!DecrementSynchSem(this, s, s->waitp->timeout)) { // After a timeout, we go into a spin loop until we remove ourselves @@ -1273,15 +1308,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen, char sym[kSymLen]; int len = 0; for (int i = 0; i != n; i++) { + if (len >= maxlen) + return buf; + size_t count = static_cast<size_t>(maxlen - len); if (symbolize) { if (!symbolizer(pcs[i], sym, kSymLen)) { sym[0] = '\0'; } - snprintf(buf + len, maxlen - len, "%s\t@ %p %s\n", - (i == 0 ? "\n" : ""), - pcs[i], sym); + snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i], + sym); } else { - snprintf(buf + len, maxlen - len, " %p", pcs[i]); + snprintf(buf + len, count, " %p", pcs[i]); } len += strlen(&buf[len]); } @@ -1366,12 +1403,12 @@ static GraphId DeadlockCheck(Mutex *mu) { bool symbolize = number_of_reported_deadlocks <= 2; ABSL_RAW_LOG(ERROR, "Potential Mutex deadlock: %s", CurrentStackString(b->buf, sizeof (b->buf), symbolize)); - int len = 0; + size_t len = 0; for (int j = 0; j != all_locks->n; j++) { void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); if (pr != nullptr) { snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); - len += static_cast<int>(strlen(&b->buf[len])); + len += strlen(&b->buf[len]); } } ABSL_RAW_LOG(ERROR, @@ -1467,7 +1504,7 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { return false; } -ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() { +void Mutex::Lock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); GraphId id = DebugOnlyDeadlockCheck(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1485,7 +1522,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() { ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } -ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() { +void Mutex::ReaderLock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); GraphId id = DebugOnlyDeadlockCheck(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1598,7 +1635,7 @@ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) { return res; } -ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() { +bool Mutex::TryLock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); intptr_t v = mu_.load(std::memory_order_relaxed); if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire @@ -1627,7 +1664,7 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() { return false; } -ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() { +bool Mutex::ReaderTryLock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock | __tsan_mutex_try_lock); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1673,7 +1710,7 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() { return false; } -ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() { +void Mutex::Unlock() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1725,7 +1762,7 @@ static bool ExactlyOneReader(intptr_t v) { return (v & kMuMultipleWaitersMask) == 0; } -ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() { +void Mutex::ReaderUnlock() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock); DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); @@ -1755,7 +1792,7 @@ static intptr_t ClearDesignatedWakerMask(int flag) { case 1: // blocked; turn off the designated waker bit return ~static_cast<intptr_t>(kMuDesig); } - ABSL_INTERNAL_UNREACHABLE; + ABSL_UNREACHABLE(); } // Conditionally ignores the existence of waiting writers if a reader that has @@ -1769,7 +1806,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) { case 1: // blocked; pretend there are no waiting writers return ~static_cast<intptr_t>(kMuWrWait); } - ABSL_INTERNAL_UNREACHABLE; + ABSL_UNREACHABLE(); } // Internal version of LockWhen(). See LockSlowWithDeadline() @@ -1790,8 +1827,8 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // operation tsan considers that we've already released the mutex. bool res = false; #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE - const int flags = read_lock ? __tsan_mutex_read_lock : 0; - const int tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); + const uint32_t flags = read_lock ? __tsan_mutex_read_lock : 0; + const uint32_t tryflags = flags | (trylock ? __tsan_mutex_try_lock : 0); #endif if (locking) { // For lock we pretend that we have finished the operation, @@ -1904,7 +1941,7 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { // Test for either of two situations that should not occur in v: // kMuWriter and kMuReader // kMuWrWait and !kMuWait - const uintptr_t w = v ^ kMuWait; + const uintptr_t w = static_cast<uintptr_t>(v ^ kMuWait); // By flipping that bit, we can now test for: // kMuWriter and kMuReader in w // kMuWrWait and kMuWait in w @@ -2331,21 +2368,26 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } // end of for(;;)-loop if (wake_list != kPerThreadSynchNull) { - int64_t wait_cycles = 0; + int64_t total_wait_cycles = 0; + int64_t max_wait_cycles = 0; int64_t now = base_internal::CycleClock::Now(); do { - // Sample lock contention events only if the waiter was trying to acquire + // Profile lock contention events only if the waiter was trying to acquire // the lock, not waiting on a condition variable or Condition. if (!wake_list->cond_waiter) { - wait_cycles += (now - wake_list->waitp->contention_start_cycles); + int64_t cycles_waited = + (now - wake_list->waitp->contention_start_cycles); + total_wait_cycles += cycles_waited; + if (max_wait_cycles == 0) max_wait_cycles = cycles_waited; wake_list->waitp->contention_start_cycles = now; + wake_list->waitp->should_submit_contention_data = true; } wake_list = Wakeup(wake_list); // wake waiters } while (wake_list != kPerThreadSynchNull); - if (wait_cycles > 0) { - mutex_tracer("slow release", this, wait_cycles); + if (total_wait_cycles > 0) { + mutex_tracer("slow release", this, total_wait_cycles); ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); - submit_profile_data(wait_cycles); + submit_profile_data(total_wait_cycles); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); } } @@ -2746,25 +2788,31 @@ static bool Dereference(void *arg) { return *(static_cast<bool *>(arg)); } -Condition::Condition() {} // null constructor, used for kTrue only -const Condition Condition::kTrue; +ABSL_CONST_INIT const Condition Condition::kTrue; Condition::Condition(bool (*func)(void *), void *arg) : eval_(&CallVoidPtrFunction), - function_(func), - method_(nullptr), - arg_(arg) {} + arg_(arg) { + static_assert(sizeof(&func) <= sizeof(callback_), + "An overlarge function pointer passed to Condition."); + StoreCallback(func); +} bool Condition::CallVoidPtrFunction(const Condition *c) { - return (*c->function_)(c->arg_); + using FunctionPointer = bool (*)(void *); + FunctionPointer function_pointer; + std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer)); + return (*function_pointer)(c->arg_); } Condition::Condition(const bool *cond) : eval_(CallVoidPtrFunction), - function_(Dereference), - method_(nullptr), // const_cast is safe since Dereference does not modify arg - arg_(const_cast<bool *>(cond)) {} + arg_(const_cast<bool *>(cond)) { + using FunctionPointer = bool (*)(void *); + const FunctionPointer dereference = Dereference; + StoreCallback(dereference); +} bool Condition::Eval() const { // eval_ == null for kTrue @@ -2772,14 +2820,15 @@ bool Condition::Eval() const { } bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) { - if (a == nullptr) { + // kTrue logic. + if (a == nullptr || a->eval_ == nullptr) { return b == nullptr || b->eval_ == nullptr; + } else if (b == nullptr || b->eval_ == nullptr) { + return false; } - if (b == nullptr || b->eval_ == nullptr) { - return a->eval_ == nullptr; - } - return a->eval_ == b->eval_ && a->function_ == b->function_ && - a->arg_ == b->arg_ && a->method_ == b->method_; + // Check equality of the representative fields. + return a->eval_ == b->eval_ && a->arg_ == b->arg_ && + !memcmp(a->callback_, b->callback_, sizeof(a->callback_)); } ABSL_NAMESPACE_END diff --git a/absl/synchronization/mutex.h b/absl/synchronization/mutex.h index b69b7089..f793cc0e 100644 --- a/absl/synchronization/mutex.h +++ b/absl/synchronization/mutex.h @@ -60,6 +60,8 @@ #include <atomic> #include <cstdint> +#include <cstring> +#include <iterator> #include <string> #include "absl/base/const_init.h" @@ -612,12 +614,12 @@ class ABSL_SCOPED_LOCKABLE WriterMutexLock { // Condition // ----------------------------------------------------------------------------- // -// As noted above, `Mutex` contains a number of member functions which take a -// `Condition` as an argument; clients can wait for conditions to become `true` -// before attempting to acquire the mutex. These sections are known as -// "condition critical" sections. To use a `Condition`, you simply need to -// construct it, and use within an appropriate `Mutex` member function; -// everything else in the `Condition` class is an implementation detail. +// `Mutex` contains a number of member functions which take a `Condition` as an +// argument; clients can wait for conditions to become `true` before attempting +// to acquire the mutex. These sections are known as "condition critical" +// sections. To use a `Condition`, you simply need to construct it, and use +// within an appropriate `Mutex` member function; everything else in the +// `Condition` class is an implementation detail. // // A `Condition` is specified as a function pointer which returns a boolean. // `Condition` functions should be pure functions -- their results should depend @@ -727,7 +729,7 @@ class Condition { : Condition(obj, static_cast<bool (T::*)() const>(&T::operator())) {} // A Condition that always returns `true`. - static const Condition kTrue; + ABSL_CONST_INIT static const Condition kTrue; // Evaluates the condition. bool Eval() const; @@ -742,22 +744,54 @@ class Condition { static bool GuaranteedEqual(const Condition *a, const Condition *b); private: - typedef bool (*InternalFunctionType)(void * arg); - typedef bool (Condition::*InternalMethodType)(); - typedef bool (*InternalMethodCallerType)(void * arg, - InternalMethodType internal_method); - - bool (*eval_)(const Condition*); // Actual evaluator - InternalFunctionType function_; // function taking pointer returning bool - InternalMethodType method_; // method returning bool - void *arg_; // arg of function_ or object of method_ - - Condition(); // null constructor used only to create kTrue + // Sizing an allocation for a method pointer can be subtle. In the Itanium + // specifications, a method pointer has a predictable, uniform size. On the + // other hand, MSVC ABI, method pointer sizes vary based on the + // inheritance of the class. Specifically, method pointers from classes with + // multiple inheritance are bigger than those of classes with single + // inheritance. Other variations also exist. + +#ifndef _MSC_VER + // Allocation for a function pointer or method pointer. + // The {0} initializer ensures that all unused bytes of this buffer are + // always zeroed out. This is necessary, because GuaranteedEqual() compares + // all of the bytes, unaware of which bytes are relevant to a given `eval_`. + using MethodPtr = bool (Condition::*)(); + char callback_[sizeof(MethodPtr)] = {0}; +#else + // It is well known that the larget MSVC pointer-to-member is 24 bytes. This + // may be the largest known pointer-to-member of any platform. For this + // reason we will allocate 24 bytes for MSVC platform toolchains. + char callback_[24] = {0}; +#endif + + // Function with which to evaluate callbacks and/or arguments. + bool (*eval_)(const Condition*) = nullptr; + + // Either an argument for a function call or an object for a method call. + void *arg_ = nullptr; // Various functions eval_ can point to: static bool CallVoidPtrFunction(const Condition*); template <typename T> static bool CastAndCallFunction(const Condition* c); template <typename T> static bool CastAndCallMethod(const Condition* c); + + // Helper methods for storing, validating, and reading callback arguments. + template <typename T> + inline void StoreCallback(T callback) { + static_assert( + sizeof(callback) <= sizeof(callback_), + "An overlarge pointer was passed as a callback to Condition."); + std::memcpy(callback_, &callback, sizeof(callback)); + } + + template <typename T> + inline void ReadCallback(T *callback) const { + std::memcpy(callback, callback_, sizeof(*callback)); + } + + // Used only to create kTrue. + constexpr Condition() = default; }; // ----------------------------------------------------------------------------- @@ -949,56 +983,61 @@ inline CondVar::CondVar() : cv_(0) {} // static template <typename T> bool Condition::CastAndCallMethod(const Condition *c) { - typedef bool (T::*MemberType)(); - MemberType rm = reinterpret_cast<MemberType>(c->method_); - T *x = static_cast<T *>(c->arg_); - return (x->*rm)(); + T *object = static_cast<T *>(c->arg_); + bool (T::*method_pointer)(); + c->ReadCallback(&method_pointer); + return (object->*method_pointer)(); } // static template <typename T> bool Condition::CastAndCallFunction(const Condition *c) { - typedef bool (*FuncType)(T *); - FuncType fn = reinterpret_cast<FuncType>(c->function_); - T *x = static_cast<T *>(c->arg_); - return (*fn)(x); + bool (*function)(T *); + c->ReadCallback(&function); + T *argument = static_cast<T *>(c->arg_); + return (*function)(argument); } template <typename T> inline Condition::Condition(bool (*func)(T *), T *arg) : eval_(&CastAndCallFunction<T>), - function_(reinterpret_cast<InternalFunctionType>(func)), - method_(nullptr), - arg_(const_cast<void *>(static_cast<const void *>(arg))) {} + arg_(const_cast<void *>(static_cast<const void *>(arg))) { + static_assert(sizeof(&func) <= sizeof(callback_), + "An overlarge function pointer was passed to Condition."); + StoreCallback(func); +} template <typename T> inline Condition::Condition(T *object, bool (absl::internal::identity<T>::type::*method)()) : eval_(&CastAndCallMethod<T>), - function_(nullptr), - method_(reinterpret_cast<InternalMethodType>(method)), - arg_(object) {} + arg_(object) { + static_assert(sizeof(&method) <= sizeof(callback_), + "An overlarge method pointer was passed to Condition."); + StoreCallback(method); +} template <typename T> inline Condition::Condition(const T *object, bool (absl::internal::identity<T>::type::*method)() const) : eval_(&CastAndCallMethod<T>), - function_(nullptr), - method_(reinterpret_cast<InternalMethodType>(method)), - arg_(reinterpret_cast<void *>(const_cast<T *>(object))) {} + arg_(reinterpret_cast<void *>(const_cast<T *>(object))) { + StoreCallback(method); +} -// Register a hook for profiling support. +// Register hooks for profiling support. // // The function pointer registered here will be called whenever a mutex is // contended. The callback is given the cycles for which waiting happened (as // measured by //absl/base/internal/cycleclock.h, and which may not // be real "cycle" counts.) // -// Calls to this function do not race or block, but there is no ordering -// guaranteed between calls to this function and call to the provided hook. -// In particular, the previously registered hook may still be called for some -// time after this function returns. +// There is no ordering guarantee between when the hook is registered and when +// callbacks will begin. Only a single profiler can be installed in a running +// binary; if this function is called a second time with a different function +// pointer, the value is ignored (and will cause an assertion failure in debug +// mode.) void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)); // Register a hook for Mutex tracing. @@ -1011,13 +1050,11 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)); // // The only event name currently sent is "slow release". // -// This has the same memory ordering concerns as RegisterMutexProfiler() above. +// This has the same ordering and single-use limitations as +// RegisterMutexProfiler() above. void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, int64_t wait_cycles)); -// TODO(gfalcon): Combine RegisterMutexProfiler() and RegisterMutexTracer() -// into a single interface, since they are only ever called in pairs. - // Register a hook for CondVar tracing. // // The function pointer registered here will be called here on various CondVar @@ -1028,7 +1065,8 @@ void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, // Events that can be sent are "Wait", "Unwait", "Signal wakeup", and // "SignalAll wakeup". // -// This has the same memory ordering concerns as RegisterMutexProfiler() above. +// This has the same ordering and single-use limitations as +// RegisterMutexProfiler() above. void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)); // Register a hook for symbolizing stack traces in deadlock detector reports. @@ -1038,7 +1076,8 @@ void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)); // false if symbolizing failed, or true if a NUL-terminated symbol was written // to 'out.' // -// This has the same memory ordering concerns as RegisterMutexProfiler() above. +// This has the same ordering and single-use limitations as +// RegisterMutexProfiler() above. // // DEPRECATED: The default symbolizer function is absl::Symbolize() and the // ability to register a different hook for symbolizing stack traces will be diff --git a/absl/synchronization/mutex_method_pointer_test.cc b/absl/synchronization/mutex_method_pointer_test.cc new file mode 100644 index 00000000..1ec801a0 --- /dev/null +++ b/absl/synchronization/mutex_method_pointer_test.cc @@ -0,0 +1,138 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "absl/synchronization/mutex.h" + +#include <cstdlib> +#include <string> + +#include "gtest/gtest.h" +#include "absl/base/config.h" + +namespace { + +class IncompleteClass; + +#ifdef _MSC_VER +// These tests verify expectations about sizes of MSVC pointers to methods. +// Pointers to methods are distinguished by whether their class hierachies +// contain single inheritance, multiple inheritance, or virtual inheritence. + +// Declare classes of the various MSVC inheritance types. +class __single_inheritance SingleInheritance{}; +class __multiple_inheritance MultipleInheritance; +class __virtual_inheritance VirtualInheritance; + +TEST(MutexMethodPointerTest, MicrosoftMethodPointerSize) { + void (SingleInheritance::*single_inheritance_method_pointer)(); + void (MultipleInheritance::*multiple_inheritance_method_pointer)(); + void (VirtualInheritance::*virtual_inheritance_method_pointer)(); + +#if defined(_M_IX86) || defined(_M_ARM) + static_assert(sizeof(single_inheritance_method_pointer) == 4, + "Unexpected sizeof(single_inheritance_method_pointer)."); + static_assert(sizeof(multiple_inheritance_method_pointer) == 8, + "Unexpected sizeof(multiple_inheritance_method_pointer)."); + static_assert(sizeof(virtual_inheritance_method_pointer) == 12, + "Unexpected sizeof(virtual_inheritance_method_pointer)."); +#elif defined(_M_X64) || defined(__aarch64__) + static_assert(sizeof(single_inheritance_method_pointer) == 8, + "Unexpected sizeof(single_inheritance_method_pointer)."); + static_assert(sizeof(multiple_inheritance_method_pointer) == 16, + "Unexpected sizeof(multiple_inheritance_method_pointer)."); + static_assert(sizeof(virtual_inheritance_method_pointer) == 16, + "Unexpected sizeof(virtual_inheritance_method_pointer)."); +#endif + void (IncompleteClass::*incomplete_class_method_pointer)(); + static_assert(sizeof(incomplete_class_method_pointer) >= + sizeof(virtual_inheritance_method_pointer), + "Failed invariant: sizeof(incomplete_class_method_pointer) >= " + "sizeof(virtual_inheritance_method_pointer)!"); +} + +class Callback { + bool x = true; + + public: + Callback() {} + bool method() { + x = !x; + return x; + } +}; + +class M2 { + bool x = true; + + public: + M2() {} + bool method2() { + x = !x; + return x; + } +}; + +class MultipleInheritance : public Callback, public M2 {}; + +TEST(MutexMethodPointerTest, ConditionWithMultipleInheritanceMethod) { + // This test ensures that Condition can deal with method pointers from classes + // with multiple inheritance. + MultipleInheritance object = MultipleInheritance(); + absl::Condition condition(&object, &MultipleInheritance::method); + EXPECT_FALSE(condition.Eval()); + EXPECT_TRUE(condition.Eval()); +} + +class __virtual_inheritance VirtualInheritance : virtual public Callback { + bool x = false; + + public: + VirtualInheritance() {} + bool method() { + x = !x; + return x; + } +}; + +TEST(MutexMethodPointerTest, ConditionWithVirtualInheritanceMethod) { + // This test ensures that Condition can deal with method pointers from classes + // with virtual inheritance. + VirtualInheritance object = VirtualInheritance(); + absl::Condition condition(&object, &VirtualInheritance::method); + EXPECT_TRUE(condition.Eval()); + EXPECT_FALSE(condition.Eval()); +} +#endif // #ifdef _MSC_VER + +TEST(MutexMethodPointerTest, ConditionWithIncompleteClassMethod) { + using IncompleteClassMethodPointer = void (IncompleteClass::*)(); + + union CallbackSlot { + void (*anonymous_function_pointer)(); + IncompleteClassMethodPointer incomplete_class_method_pointer; + }; + + static_assert(sizeof(CallbackSlot) >= sizeof(IncompleteClassMethodPointer), + "The callback slot is not big enough for method pointers."); + static_assert( + sizeof(CallbackSlot) == sizeof(IncompleteClassMethodPointer), + "The callback slot is not big enough for anonymous function pointers."); + +#if defined(_MSC_VER) + static_assert(sizeof(IncompleteClassMethodPointer) <= 24, + "The pointer to a method of an incomplete class is too big."); +#endif +} + +} // namespace diff --git a/absl/synchronization/mutex_test.cc b/absl/synchronization/mutex_test.cc index 99bb0175..34751cb1 100644 --- a/absl/synchronization/mutex_test.cc +++ b/absl/synchronization/mutex_test.cc @@ -295,8 +295,9 @@ static void TestTime(TestContext *cxt, int c, bool use_cv) { "TestTime failed"); } elapsed = absl::Now() - start; - ABSL_RAW_CHECK(absl::Seconds(0.9) <= elapsed && - elapsed <= absl::Seconds(2.0), "TestTime failed"); + ABSL_RAW_CHECK( + absl::Seconds(0.9) <= elapsed && elapsed <= absl::Seconds(2.0), + "TestTime failed"); ABSL_RAW_CHECK(cxt->g0 == cxt->threads, "TestTime failed"); } else if (c == 1) { @@ -343,7 +344,7 @@ static void TestMuTime(TestContext *cxt, int c) { TestTime(cxt, c, false); } static void TestCVTime(TestContext *cxt, int c) { TestTime(cxt, c, true); } static void EndTest(int *c0, int *c1, absl::Mutex *mu, absl::CondVar *cv, - const std::function<void(int)>& cb) { + const std::function<void(int)> &cb) { mu->Lock(); int c = (*c0)++; mu->Unlock(); @@ -366,9 +367,9 @@ static int RunTestCommon(TestContext *cxt, void (*test)(TestContext *cxt, int), cxt->threads = threads; absl::synchronization_internal::ThreadPool tp(threads); for (int i = 0; i != threads; i++) { - tp.Schedule(std::bind(&EndTest, &c0, &c1, &mu2, &cv2, - std::function<void(int)>( - std::bind(test, cxt, std::placeholders::_1)))); + tp.Schedule(std::bind( + &EndTest, &c0, &c1, &mu2, &cv2, + std::function<void(int)>(std::bind(test, cxt, std::placeholders::_1)))); } mu2.Lock(); while (c1 != threads) { @@ -682,14 +683,14 @@ struct LockWhenTestStruct { bool waiting = false; }; -static bool LockWhenTestIsCond(LockWhenTestStruct* s) { +static bool LockWhenTestIsCond(LockWhenTestStruct *s) { s->mu2.Lock(); s->waiting = true; s->mu2.Unlock(); return s->cond; } -static void LockWhenTestWaitForIsCond(LockWhenTestStruct* s) { +static void LockWhenTestWaitForIsCond(LockWhenTestStruct *s) { s->mu1.LockWhen(absl::Condition(&LockWhenTestIsCond, s)); s->mu1.Unlock(); } @@ -1694,8 +1695,7 @@ TEST(Mutex, Timed) { TEST(Mutex, CVTime) { int threads = 10; // Use a fixed thread count of 10 int iterations = 1; - EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), - threads * iterations); + EXPECT_EQ(RunTest(&TestCVTime, threads, iterations, 1), threads * iterations); } TEST(Mutex, MuTime) { diff --git a/absl/synchronization/notification.cc b/absl/synchronization/notification.cc index e91b9038..165ba669 100644 --- a/absl/synchronization/notification.cc +++ b/absl/synchronization/notification.cc @@ -16,7 +16,6 @@ #include <atomic> -#include "absl/base/attributes.h" #include "absl/base/internal/raw_logging.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" diff --git a/absl/synchronization/notification.h b/absl/synchronization/notification.h index 4bec2689..8986d9a4 100644 --- a/absl/synchronization/notification.h +++ b/absl/synchronization/notification.h @@ -53,7 +53,6 @@ #include <atomic> #include "absl/base/attributes.h" -#include "absl/base/macros.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" |