diff options
author | Benjamin Barenblat <bbaren@google.com> | 2023-09-07 13:16:09 -0400 |
---|---|---|
committer | Benjamin Barenblat <bbaren@google.com> | 2023-09-07 13:16:09 -0400 |
commit | 6fdbff8bbce2a1debdc060df381f39e3dcfb65af (patch) | |
tree | 71f1ef38477a65d5cce472fc042c90087c2bb351 /absl/synchronization/mutex.cc | |
parent | 8d4a80fe37176b1170d7dce0772dea9584ec3e32 (diff) | |
parent | 29bf8085f3bf17b84d30e34b3d7ff8248fda404e (diff) | |
download | abseil-6fdbff8bbce2a1debdc060df381f39e3dcfb65af.tar.gz abseil-6fdbff8bbce2a1debdc060df381f39e3dcfb65af.tar.bz2 abseil-6fdbff8bbce2a1debdc060df381f39e3dcfb65af.zip |
Merge new upstream LTS 20230802.0
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r-- | absl/synchronization/mutex.cc | 818 |
1 files changed, 407 insertions, 411 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc index 064ccb74..3aa5560a 100644 --- a/absl/synchronization/mutex.cc +++ b/absl/synchronization/mutex.cc @@ -35,10 +35,9 @@ #include <algorithm> #include <atomic> -#include <cinttypes> #include <cstddef> +#include <cstdlib> #include <cstring> -#include <iterator> #include <thread> // NOLINT(build/c++11) #include "absl/base/attributes.h" @@ -55,7 +54,6 @@ #include "absl/base/internal/thread_identity.h" #include "absl/base/internal/tsan_mutex_interface.h" #include "absl/base/optimization.h" -#include "absl/base/port.h" #include "absl/debugging/stacktrace.h" #include "absl/debugging/symbolize.h" #include "absl/synchronization/internal/graphcycles.h" @@ -63,6 +61,7 @@ #include "absl/time/time.h" using absl::base_internal::CurrentThreadIdentityIfPresent; +using absl::base_internal::CycleClock; using absl::base_internal::PerThreadSynch; using absl::base_internal::SchedulingGuard; using absl::base_internal::ThreadIdentity; @@ -98,18 +97,15 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(int64_t wait_cycles)> submit_profile_data; ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)( - const char *msg, const void *obj, int64_t wait_cycles)> + const char* msg, const void* obj, int64_t wait_cycles)> mutex_tracer; ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES - absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)> - cond_var_tracer; -ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook< - bool (*)(const void *pc, char *out, int out_size)> - symbolizer(absl::Symbolize); +absl::base_internal::AtomicHook<void (*)(const char* msg, const void* cv)> + cond_var_tracer; } // namespace -static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, +static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu, bool locking, bool trylock, bool read_lock); @@ -117,19 +113,15 @@ void RegisterMutexProfiler(void (*fn)(int64_t wait_cycles)) { submit_profile_data.Store(fn); } -void RegisterMutexTracer(void (*fn)(const char *msg, const void *obj, +void RegisterMutexTracer(void (*fn)(const char* msg, const void* obj, int64_t wait_cycles)) { mutex_tracer.Store(fn); } -void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) { +void RegisterCondVarTracer(void (*fn)(const char* msg, const void* cv)) { cond_var_tracer.Store(fn); } -void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) { - symbolizer.Store(fn); -} - namespace { // Represents the strategy for spin and yield. // See the comment in GetMutexGlobals() for more information. @@ -148,25 +140,24 @@ absl::Duration MeasureTimeToYield() { return absl::Now() - before; } -const MutexGlobals &GetMutexGlobals() { +const MutexGlobals& GetMutexGlobals() { ABSL_CONST_INIT static MutexGlobals data; absl::base_internal::LowLevelCallOnce(&data.once, [&]() { - const int num_cpus = absl::base_internal::NumCPUs(); - data.spinloop_iterations = num_cpus > 1 ? 1500 : 0; - // If this a uniprocessor, only yield/sleep. - // Real-time threads are often unable to yield, so the sleep time needs - // to be long enough to keep the calling thread asleep until scheduling - // happens. - // If this is multiprocessor, allow spinning. If the mode is - // aggressive then spin many times before yielding. If the mode is - // gentle then spin only a few times before yielding. Aggressive spinning - // is used to ensure that an Unlock() call, which must get the spin lock - // for any thread to make progress gets it without undue delay. - if (num_cpus > 1) { + if (absl::base_internal::NumCPUs() > 1) { + // If this is multiprocessor, allow spinning. If the mode is + // aggressive then spin many times before yielding. If the mode is + // gentle then spin only a few times before yielding. Aggressive spinning + // is used to ensure that an Unlock() call, which must get the spin lock + // for any thread to make progress gets it without undue delay. + data.spinloop_iterations = 1500; data.mutex_sleep_spins[AGGRESSIVE] = 5000; data.mutex_sleep_spins[GENTLE] = 250; data.mutex_sleep_time = absl::Microseconds(10); } else { + // If this a uniprocessor, only yield/sleep. Real-time threads are often + // unable to yield, so the sleep time needs to be long enough to keep + // the calling thread asleep until scheduling happens. + data.spinloop_iterations = 0; data.mutex_sleep_spins[AGGRESSIVE] = 0; data.mutex_sleep_spins[GENTLE] = 0; data.mutex_sleep_time = MeasureTimeToYield() * 5; @@ -219,8 +210,7 @@ static void AtomicSetBits(std::atomic<intptr_t>* pv, intptr_t bits, v = pv->load(std::memory_order_relaxed); } while ((v & bits) != bits && ((v & wait_until_clear) != 0 || - !pv->compare_exchange_weak(v, v | bits, - std::memory_order_release, + !pv->compare_exchange_weak(v, v | bits, std::memory_order_release, std::memory_order_relaxed))); } @@ -235,8 +225,7 @@ static void AtomicClearBits(std::atomic<intptr_t>* pv, intptr_t bits, v = pv->load(std::memory_order_relaxed); } while ((v & bits) != 0 && ((v & wait_until_clear) != 0 || - !pv->compare_exchange_weak(v, v & ~bits, - std::memory_order_release, + !pv->compare_exchange_weak(v, v & ~bits, std::memory_order_release, std::memory_order_relaxed))); } @@ -247,7 +236,7 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock deadlock_graph_mu( absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // Graph used to detect deadlocks. -ABSL_CONST_INIT static GraphCycles *deadlock_graph +ABSL_CONST_INIT static GraphCycles* deadlock_graph ABSL_GUARDED_BY(deadlock_graph_mu) ABSL_PT_GUARDED_BY(deadlock_graph_mu); //------------------------------------------------------------------ @@ -291,7 +280,7 @@ enum { // Event flags // Properties of the events. static const struct { int flags; - const char *msg; + const char* msg; } event_properties[] = { {SYNCH_F_LCK_W | SYNCH_F_TRY, "TryLock succeeded "}, {0, "TryLock failed "}, @@ -316,12 +305,12 @@ ABSL_CONST_INIT static absl::base_internal::SpinLock synch_event_mu( // Can't be too small, as it's used for deadlock detection information. static constexpr uint32_t kNSynchEvent = 1031; -static struct SynchEvent { // this is a trivial hash table for the events +static struct SynchEvent { // this is a trivial hash table for the events // struct is freed when refcount reaches 0 int refcount ABSL_GUARDED_BY(synch_event_mu); // buckets have linear, 0-terminated chains - SynchEvent *next ABSL_GUARDED_BY(synch_event_mu); + SynchEvent* next ABSL_GUARDED_BY(synch_event_mu); // Constant after initialization uintptr_t masked_addr; // object at this address is called "name" @@ -329,13 +318,13 @@ static struct SynchEvent { // this is a trivial hash table for the events // No explicit synchronization used. Instead we assume that the // client who enables/disables invariants/logging on a Mutex does so // while the Mutex is not being concurrently accessed by others. - void (*invariant)(void *arg); // called on each event - void *arg; // first arg to (*invariant)() - bool log; // logging turned on + void (*invariant)(void* arg); // called on each event + void* arg; // first arg to (*invariant)() + bool log; // logging turned on // Constant after initialization - char name[1]; // actually longer---NUL-terminated string -} * synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu); + char name[1]; // actually longer---NUL-terminated string +}* synch_event[kNSynchEvent] ABSL_GUARDED_BY(synch_event_mu); // Ensure that the object at "addr" has a SynchEvent struct associated with it, // set "bits" in the word there (waiting until lockbit is clear before doing @@ -344,11 +333,11 @@ static struct SynchEvent { // this is a trivial hash table for the events // the string name is copied into it. // When used with a mutex, the caller should also ensure that kMuEvent // is set in the mutex word, and similarly for condition variables and kCVEvent. -static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, - const char *name, intptr_t bits, +static SynchEvent* EnsureSynchEvent(std::atomic<intptr_t>* addr, + const char* name, intptr_t bits, intptr_t lockbit) { uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; - SynchEvent *e; + SynchEvent* e; // first look for existing SynchEvent struct.. synch_event_mu.Lock(); for (e = synch_event[h]; @@ -360,9 +349,9 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, name = ""; } size_t l = strlen(name); - e = reinterpret_cast<SynchEvent *>( + e = reinterpret_cast<SynchEvent*>( base_internal::LowLevelAlloc::Alloc(sizeof(*e) + l)); - e->refcount = 2; // one for return value, one for linked list + e->refcount = 2; // one for return value, one for linked list e->masked_addr = base_internal::HidePtr(addr); e->invariant = nullptr; e->arg = nullptr; @@ -372,19 +361,19 @@ static SynchEvent *EnsureSynchEvent(std::atomic<intptr_t> *addr, AtomicSetBits(addr, bits, lockbit); synch_event[h] = e; } else { - e->refcount++; // for return value + e->refcount++; // for return value } synch_event_mu.Unlock(); return e; } // Deallocate the SynchEvent *e, whose refcount has fallen to zero. -static void DeleteSynchEvent(SynchEvent *e) { +static void DeleteSynchEvent(SynchEvent* e) { base_internal::LowLevelAlloc::Free(e); } // Decrement the reference count of *e, or do nothing if e==null. -static void UnrefSynchEvent(SynchEvent *e) { +static void UnrefSynchEvent(SynchEvent* e) { if (e != nullptr) { synch_event_mu.Lock(); bool del = (--(e->refcount) == 0); @@ -398,11 +387,11 @@ static void UnrefSynchEvent(SynchEvent *e) { // Forget the mapping from the object (Mutex or CondVar) at address addr // to SynchEvent object, and clear "bits" in its word (waiting until lockbit // is clear before doing so). -static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, +static void ForgetSynchEvent(std::atomic<intptr_t>* addr, intptr_t bits, intptr_t lockbit) { uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; - SynchEvent **pe; - SynchEvent *e; + SynchEvent** pe; + SynchEvent* e; synch_event_mu.Lock(); for (pe = &synch_event[h]; (e = *pe) != nullptr && e->masked_addr != base_internal::HidePtr(addr); @@ -423,9 +412,9 @@ static void ForgetSynchEvent(std::atomic<intptr_t> *addr, intptr_t bits, // Return a refcounted reference to the SynchEvent of the object at address // "addr", if any. The pointer returned is valid until the UnrefSynchEvent() is // called. -static SynchEvent *GetSynchEvent(const void *addr) { +static SynchEvent* GetSynchEvent(const void* addr) { uint32_t h = reinterpret_cast<uintptr_t>(addr) % kNSynchEvent; - SynchEvent *e; + SynchEvent* e; synch_event_mu.Lock(); for (e = synch_event[h]; e != nullptr && e->masked_addr != base_internal::HidePtr(addr); @@ -440,17 +429,17 @@ static SynchEvent *GetSynchEvent(const void *addr) { // Called when an event "ev" occurs on a Mutex of CondVar "obj" // if event recording is on -static void PostSynchEvent(void *obj, int ev) { - SynchEvent *e = GetSynchEvent(obj); +static void PostSynchEvent(void* obj, int ev) { + SynchEvent* e = GetSynchEvent(obj); // logging is on if event recording is on and either there's no event struct, // or it explicitly says to log if (e == nullptr || e->log) { - void *pcs[40]; + void* pcs[40]; int n = absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 1); // A buffer with enough space for the ASCII for all the PCs, even on a // 64-bit machine. char buffer[ABSL_ARRAYSIZE(pcs) * 24]; - int pos = snprintf(buffer, sizeof (buffer), " @"); + int pos = snprintf(buffer, sizeof(buffer), " @"); for (int i = 0; i != n; i++) { int b = snprintf(&buffer[pos], sizeof(buffer) - static_cast<size_t>(pos), " %p", pcs[i]); @@ -472,13 +461,13 @@ static void PostSynchEvent(void *obj, int ev) { // get false positive race reports later. // Reuse EvalConditionAnnotated to properly call into user code. struct local { - static bool pred(SynchEvent *ev) { + static bool pred(SynchEvent* ev) { (*ev->invariant)(ev->arg); return false; } }; Condition cond(&local::pred, e); - Mutex *mu = static_cast<Mutex *>(obj); + Mutex* mu = static_cast<Mutex*>(obj); const bool locking = (flags & SYNCH_F_UNLOCK) == 0; const bool trylock = (flags & SYNCH_F_TRY) != 0; const bool read_lock = (flags & SYNCH_F_R) != 0; @@ -504,32 +493,32 @@ static void PostSynchEvent(void *obj, int ev) { // PerThreadSynch struct points at the most recent SynchWaitParams struct when // the thread is on a Mutex's waiter queue. struct SynchWaitParams { - SynchWaitParams(Mutex::MuHow how_arg, const Condition *cond_arg, - KernelTimeout timeout_arg, Mutex *cvmu_arg, - PerThreadSynch *thread_arg, - std::atomic<intptr_t> *cv_word_arg) + SynchWaitParams(Mutex::MuHow how_arg, const Condition* cond_arg, + KernelTimeout timeout_arg, Mutex* cvmu_arg, + PerThreadSynch* thread_arg, + std::atomic<intptr_t>* cv_word_arg) : how(how_arg), cond(cond_arg), timeout(timeout_arg), cvmu(cvmu_arg), thread(thread_arg), cv_word(cv_word_arg), - contention_start_cycles(base_internal::CycleClock::Now()), + contention_start_cycles(CycleClock::Now()), should_submit_contention_data(false) {} const Mutex::MuHow how; // How this thread needs to wait. - const Condition *cond; // The condition that this thread is waiting for. - // In Mutex, this field is set to zero if a timeout - // expires. + const Condition* cond; // The condition that this thread is waiting for. + // In Mutex, this field is set to zero if a timeout + // expires. KernelTimeout timeout; // timeout expiry---absolute time // In Mutex, this field is set to zero if a timeout // expires. - Mutex *const cvmu; // used for transfer from cond var to mutex - PerThreadSynch *const thread; // thread that is waiting + Mutex* const cvmu; // used for transfer from cond var to mutex + PerThreadSynch* const thread; // thread that is waiting // If not null, thread should be enqueued on the CondVar whose state // word is cv_word instead of queueing normally on the Mutex. - std::atomic<intptr_t> *cv_word; + std::atomic<intptr_t>* cv_word; int64_t contention_start_cycles; // Time (in cycles) when this thread started // to contend for the mutex. @@ -537,12 +526,12 @@ struct SynchWaitParams { }; struct SynchLocksHeld { - int n; // number of valid entries in locks[] - bool overflow; // true iff we overflowed the array at some point + int n; // number of valid entries in locks[] + bool overflow; // true iff we overflowed the array at some point struct { - Mutex *mu; // lock acquired - int32_t count; // times acquired - GraphId id; // deadlock_graph id of acquired lock + Mutex* mu; // lock acquired + int32_t count; // times acquired + GraphId id; // deadlock_graph id of acquired lock } locks[40]; // If a thread overfills the array during deadlock detection, we // continue, discarding information as needed. If no overflow has @@ -552,11 +541,11 @@ struct SynchLocksHeld { // A sentinel value in lists that is not 0. // A 0 value is used to mean "not on a list". -static PerThreadSynch *const kPerThreadSynchNull = - reinterpret_cast<PerThreadSynch *>(1); +static PerThreadSynch* const kPerThreadSynchNull = + reinterpret_cast<PerThreadSynch*>(1); -static SynchLocksHeld *LocksHeldAlloc() { - SynchLocksHeld *ret = reinterpret_cast<SynchLocksHeld *>( +static SynchLocksHeld* LocksHeldAlloc() { + SynchLocksHeld* ret = reinterpret_cast<SynchLocksHeld*>( base_internal::LowLevelAlloc::Alloc(sizeof(SynchLocksHeld))); ret->n = 0; ret->overflow = false; @@ -564,24 +553,24 @@ static SynchLocksHeld *LocksHeldAlloc() { } // Return the PerThreadSynch-struct for this thread. -static PerThreadSynch *Synch_GetPerThread() { - ThreadIdentity *identity = GetOrCreateCurrentThreadIdentity(); +static PerThreadSynch* Synch_GetPerThread() { + ThreadIdentity* identity = GetOrCreateCurrentThreadIdentity(); return &identity->per_thread_synch; } -static PerThreadSynch *Synch_GetPerThreadAnnotated(Mutex *mu) { +static PerThreadSynch* Synch_GetPerThreadAnnotated(Mutex* mu) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); } - PerThreadSynch *w = Synch_GetPerThread(); + PerThreadSynch* w = Synch_GetPerThread(); if (mu) { ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0); } return w; } -static SynchLocksHeld *Synch_GetAllLocks() { - PerThreadSynch *s = Synch_GetPerThread(); +static SynchLocksHeld* Synch_GetAllLocks() { + PerThreadSynch* s = Synch_GetPerThread(); if (s->all_locks == nullptr) { s->all_locks = LocksHeldAlloc(); // Freed by ReclaimThreadIdentity. } @@ -589,7 +578,7 @@ static SynchLocksHeld *Synch_GetAllLocks() { } // Post on "w"'s associated PerThreadSem. -void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { +void Mutex::IncrementSynchSem(Mutex* mu, PerThreadSynch* w) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); // We miss synchronization around passing PerThreadSynch between threads @@ -605,7 +594,7 @@ void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) { } // Wait on "w"'s associated PerThreadSem; returns false if timeout expired. -bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) { +bool Mutex::DecrementSynchSem(Mutex* mu, PerThreadSynch* w, KernelTimeout t) { if (mu) { ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0); } @@ -626,7 +615,7 @@ bool Mutex::DecrementSynchSem(Mutex *mu, PerThreadSynch *w, KernelTimeout t) { // Mutex code checking that the "waitp" field has not been reused. void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() { // Fix the per-thread state only if it exists. - ThreadIdentity *identity = CurrentThreadIdentityIfPresent(); + ThreadIdentity* identity = CurrentThreadIdentityIfPresent(); if (identity != nullptr) { identity->per_thread_synch.suppress_fatal_errors = true; } @@ -635,21 +624,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() { std::memory_order_release); } -// --------------------------time support - -// Return the current time plus the timeout. Use the same clock as -// PerThreadSem::Wait() for consistency. Unfortunately, we don't have -// such a choice when a deadline is given directly. -static absl::Time DeadlineFromTimeout(absl::Duration timeout) { -#ifndef _WIN32 - struct timeval tv; - gettimeofday(&tv, nullptr); - return absl::TimeFromTimeval(tv) + timeout; -#else - return absl::Now() + timeout; -#endif -} - // --------------------------Mutexes // In the layout below, the msb of the bottom byte is currently unused. Also, @@ -660,24 +634,29 @@ static absl::Time DeadlineFromTimeout(absl::Duration timeout) { // bit-twiddling trick in Mutex::Unlock(). // o kMuWriter / kMuReader == kMuWrWait / kMuWait, // to enable the bit-twiddling trick in CheckForMutexCorruption(). -static const intptr_t kMuReader = 0x0001L; // a reader holds the lock -static const intptr_t kMuDesig = 0x0002L; // there's a designated waker -static const intptr_t kMuWait = 0x0004L; // threads are waiting -static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock -static const intptr_t kMuEvent = 0x0010L; // record this mutex's events +static const intptr_t kMuReader = 0x0001L; // a reader holds the lock +// There's a designated waker. // INVARIANT1: there's a thread that was blocked on the mutex, is // no longer, yet has not yet acquired the mutex. If there's a // designated waker, all threads can avoid taking the slow path in // unlock because the designated waker will subsequently acquire // the lock and wake someone. To maintain INVARIANT1 the bit is // set when a thread is unblocked(INV1a), and threads that were -// unblocked reset the bit when they either acquire or re-block -// (INV1b). -static const intptr_t kMuWrWait = 0x0020L; // runnable writer is waiting - // for a reader -static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list -static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits -static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count +// unblocked reset the bit when they either acquire or re-block (INV1b). +static const intptr_t kMuDesig = 0x0002L; +static const intptr_t kMuWait = 0x0004L; // threads are waiting +static const intptr_t kMuWriter = 0x0008L; // a writer holds the lock +static const intptr_t kMuEvent = 0x0010L; // record this mutex's events +// Runnable writer is waiting for a reader. +// If set, new readers will not lock the mutex to avoid writer starvation. +// Note: if a reader has higher priority than the writer, it will still lock +// the mutex ahead of the waiting writer, but in a very inefficient manner: +// the reader will first queue itself and block, but then the last unlocking +// reader will wake it. +static const intptr_t kMuWrWait = 0x0020L; +static const intptr_t kMuSpin = 0x0040L; // spinlock protects wait list +static const intptr_t kMuLow = 0x00ffL; // mask all mutex bits +static const intptr_t kMuHigh = ~kMuLow; // mask pointer/reader count // Hack to make constant values available to gdb pretty printer enum { @@ -773,8 +752,8 @@ Mutex::~Mutex() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } -void Mutex::EnableDebugLog(const char *name) { - SynchEvent *e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin); +void Mutex::EnableDebugLog(const char* name) { + SynchEvent* e = EnsureSynchEvent(&this->mu_, name, kMuEvent, kMuSpin); e->log = true; UnrefSynchEvent(e); } @@ -783,11 +762,10 @@ void EnableMutexInvariantDebugging(bool enabled) { synch_check_invariants.store(enabled, std::memory_order_release); } -void Mutex::EnableInvariantDebugging(void (*invariant)(void *), - void *arg) { +void Mutex::EnableInvariantDebugging(void (*invariant)(void*), void* arg) { if (synch_check_invariants.load(std::memory_order_acquire) && invariant != nullptr) { - SynchEvent *e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin); + SynchEvent* e = EnsureSynchEvent(&this->mu_, nullptr, kMuEvent, kMuSpin); e->invariant = invariant; e->arg = arg; UnrefSynchEvent(e); @@ -803,15 +781,15 @@ void SetMutexDeadlockDetectionMode(OnDeadlockCycle mode) { // waiters with the same condition, type of lock, and thread priority. // // Requires that x and y be waiting on the same Mutex queue. -static bool MuEquivalentWaiter(PerThreadSynch *x, PerThreadSynch *y) { +static bool MuEquivalentWaiter(PerThreadSynch* x, PerThreadSynch* y) { return x->waitp->how == y->waitp->how && x->priority == y->priority && Condition::GuaranteedEqual(x->waitp->cond, y->waitp->cond); } // Given the contents of a mutex word containing a PerThreadSynch pointer, // return the pointer. -static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) { - return reinterpret_cast<PerThreadSynch *>(v & kMuHigh); +static inline PerThreadSynch* GetPerThreadSynch(intptr_t v) { + return reinterpret_cast<PerThreadSynch*>(v & kMuHigh); } // The next several routines maintain the per-thread next and skip fields @@ -869,17 +847,17 @@ static inline PerThreadSynch *GetPerThreadSynch(intptr_t v) { // except those in the added node and the former "head" node. This implies // that the new node is added after head, and so must be the new head or the // new front of the queue. -static PerThreadSynch *Skip(PerThreadSynch *x) { - PerThreadSynch *x0 = nullptr; - PerThreadSynch *x1 = x; - PerThreadSynch *x2 = x->skip; +static PerThreadSynch* Skip(PerThreadSynch* x) { + PerThreadSynch* x0 = nullptr; + PerThreadSynch* x1 = x; + PerThreadSynch* x2 = x->skip; if (x2 != nullptr) { // Each iteration attempts to advance sequence (x0,x1,x2) to next sequence // such that x1 == x0->skip && x2 == x1->skip while ((x0 = x1, x1 = x2, x2 = x2->skip) != nullptr) { - x0->skip = x2; // short-circuit skip from x0 to x2 + x0->skip = x2; // short-circuit skip from x0 to x2 } - x->skip = x1; // short-circuit skip from x to result + x->skip = x1; // short-circuit skip from x to result } return x1; } @@ -888,7 +866,7 @@ static PerThreadSynch *Skip(PerThreadSynch *x) { // The latter is going to be removed out of order, because of a timeout. // Check whether "ancestor" has a skip field pointing to "to_be_removed", // and fix it if it does. -static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) { +static void FixSkip(PerThreadSynch* ancestor, PerThreadSynch* to_be_removed) { if (ancestor->skip == to_be_removed) { // ancestor->skip left dangling if (to_be_removed->skip != nullptr) { ancestor->skip = to_be_removed->skip; // can skip past to_be_removed @@ -900,7 +878,7 @@ static void FixSkip(PerThreadSynch *ancestor, PerThreadSynch *to_be_removed) { } } -static void CondVarEnqueue(SynchWaitParams *waitp); +static void CondVarEnqueue(SynchWaitParams* waitp); // Enqueue thread "waitp->thread" on a waiter queue. // Called with mutex spinlock held if head != nullptr @@ -921,8 +899,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp); // returned. This mechanism is used by CondVar to queue a thread on the // condition variable queue instead of the mutex queue in implementing Wait(). // In this case, Enqueue() can return nullptr (if head==nullptr). -static PerThreadSynch *Enqueue(PerThreadSynch *head, - SynchWaitParams *waitp, intptr_t mu, int flags) { +static PerThreadSynch* Enqueue(PerThreadSynch* head, SynchWaitParams* waitp, + intptr_t mu, int flags) { // If we have been given a cv_word, call CondVarEnqueue() and return // the previous head of the Mutex waiter queue. if (waitp->cv_word != nullptr) { @@ -930,42 +908,43 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, return head; } - PerThreadSynch *s = waitp->thread; + PerThreadSynch* s = waitp->thread; ABSL_RAW_CHECK( s->waitp == nullptr || // normal case s->waitp == waitp || // Fer()---transfer from condition variable s->suppress_fatal_errors, "detected illegal recursion into Mutex code"); s->waitp = waitp; - s->skip = nullptr; // maintain skip invariant (see above) - s->may_skip = true; // always true on entering queue - s->wake = false; // not being woken + s->skip = nullptr; // maintain skip invariant (see above) + s->may_skip = true; // always true on entering queue + s->wake = false; // not being woken s->cond_waiter = ((flags & kMuIsCond) != 0); +#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM + int64_t now_cycles = CycleClock::Now(); + if (s->next_priority_read_cycles < now_cycles) { + // Every so often, update our idea of the thread's priority. + // pthread_getschedparam() is 5% of the block/wakeup time; + // CycleClock::Now() is 0.5%. + int policy; + struct sched_param param; + const int err = pthread_getschedparam(pthread_self(), &policy, ¶m); + if (err != 0) { + ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err); + } else { + s->priority = param.sched_priority; + s->next_priority_read_cycles = + now_cycles + static_cast<int64_t>(CycleClock::Frequency()); + } + } +#endif if (head == nullptr) { // s is the only waiter s->next = s; // it's the only entry in the cycle s->readers = mu; // reader count is from mu word s->maybe_unlocking = false; // no one is searching an empty list head = s; // s is new head } else { - PerThreadSynch *enqueue_after = nullptr; // we'll put s after this element + PerThreadSynch* enqueue_after = nullptr; // we'll put s after this element #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM - int64_t now_cycles = base_internal::CycleClock::Now(); - if (s->next_priority_read_cycles < now_cycles) { - // Every so often, update our idea of the thread's priority. - // pthread_getschedparam() is 5% of the block/wakeup time; - // base_internal::CycleClock::Now() is 0.5%. - int policy; - struct sched_param param; - const int err = pthread_getschedparam(pthread_self(), &policy, ¶m); - if (err != 0) { - ABSL_RAW_LOG(ERROR, "pthread_getschedparam failed: %d", err); - } else { - s->priority = param.sched_priority; - s->next_priority_read_cycles = - now_cycles + - static_cast<int64_t>(base_internal::CycleClock::Frequency()); - } - } if (s->priority > head->priority) { // s's priority is above head's // try to put s in priority-fifo order, or failing that at the front. if (!head->maybe_unlocking) { @@ -975,20 +954,20 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, // Within a skip chain, all waiters have the same priority, so we can // skip forward through the chains until we find one with a lower // priority than the waiter to be enqueued. - PerThreadSynch *advance_to = head; // next value of enqueue_after + PerThreadSynch* advance_to = head; // next value of enqueue_after do { enqueue_after = advance_to; // (side-effect: optimizes skip chain) advance_to = Skip(enqueue_after->next); } while (s->priority <= advance_to->priority); - // termination guaranteed because s->priority > head->priority - // and head is the end of a skip chain + // termination guaranteed because s->priority > head->priority + // and head is the end of a skip chain } else if (waitp->how == kExclusive && Condition::GuaranteedEqual(waitp->cond, nullptr)) { // An unlocker could be scanning the queue, but we know it will recheck // the queue front for writers that have no condition, which is what s // is, so an insert at front is safe. - enqueue_after = head; // add after head, at front + enqueue_after = head; // add after head, at front } } #endif @@ -1013,12 +992,12 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, enqueue_after->skip = enqueue_after->next; } if (MuEquivalentWaiter(s, s->next)) { // s->may_skip is known to be true - s->skip = s->next; // s may skip to its successor + s->skip = s->next; // s may skip to its successor } - } else { // enqueue not done any other way, so - // we're inserting s at the back + } else { // enqueue not done any other way, so + // we're inserting s at the back // s will become new head; copy data from head into it - s->next = head->next; // add s after head + s->next = head->next; // add s after head head->next = s; s->readers = head->readers; // reader count is from previous head s->maybe_unlocking = head->maybe_unlocking; // same for unlock hint @@ -1037,17 +1016,17 @@ static PerThreadSynch *Enqueue(PerThreadSynch *head, // whose last element is head. The new head element is returned, or null // if the list is made empty. // Dequeue is called with both spinlock and Mutex held. -static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) { - PerThreadSynch *w = pw->next; - pw->next = w->next; // snip w out of list - if (head == w) { // we removed the head +static PerThreadSynch* Dequeue(PerThreadSynch* head, PerThreadSynch* pw) { + PerThreadSynch* w = pw->next; + pw->next = w->next; // snip w out of list + if (head == w) { // we removed the head head = (pw == w) ? nullptr : pw; // either emptied list, or pw is new head } else if (pw != head && MuEquivalentWaiter(pw, pw->next)) { // pw can skip to its new successor if (pw->next->skip != nullptr) { // either skip to its successors skip target pw->skip = pw->next->skip; - } else { // or to pw's successor + } else { // or to pw's successor pw->skip = pw->next; } } @@ -1060,27 +1039,27 @@ static PerThreadSynch *Dequeue(PerThreadSynch *head, PerThreadSynch *pw) { // singly-linked list wake_list in the order found. Assumes that // there is only one such element if the element has how == kExclusive. // Return the new head. -static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, - PerThreadSynch *pw, - PerThreadSynch **wake_tail) { - PerThreadSynch *orig_h = head; - PerThreadSynch *w = pw->next; +static PerThreadSynch* DequeueAllWakeable(PerThreadSynch* head, + PerThreadSynch* pw, + PerThreadSynch** wake_tail) { + PerThreadSynch* orig_h = head; + PerThreadSynch* w = pw->next; bool skipped = false; do { - if (w->wake) { // remove this element + if (w->wake) { // remove this element ABSL_RAW_CHECK(pw->skip == nullptr, "bad skip in DequeueAllWakeable"); // we're removing pw's successor so either pw->skip is zero or we should // already have removed pw since if pw->skip!=null, pw has the same // condition as w. head = Dequeue(head, pw); - w->next = *wake_tail; // keep list terminated - *wake_tail = w; // add w to wake_list; - wake_tail = &w->next; // next addition to end + w->next = *wake_tail; // keep list terminated + *wake_tail = w; // add w to wake_list; + wake_tail = &w->next; // next addition to end if (w->waitp->how == kExclusive) { // wake at most 1 writer break; } - } else { // not waking this one; skip - pw = Skip(w); // skip as much as possible + } else { // not waking this one; skip + pw = Skip(w); // skip as much as possible skipped = true; } w = pw->next; @@ -1098,7 +1077,7 @@ static PerThreadSynch *DequeueAllWakeable(PerThreadSynch *head, // Try to remove thread s from the list of waiters on this mutex. // Does nothing if s is not on the waiter list. -void Mutex::TryRemove(PerThreadSynch *s) { +void Mutex::TryRemove(PerThreadSynch* s) { SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); // acquire spinlock & lock @@ -1106,16 +1085,16 @@ void Mutex::TryRemove(PerThreadSynch *s) { mu_.compare_exchange_strong(v, v | kMuSpin | kMuWriter, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = GetPerThreadSynch(v); + PerThreadSynch* h = GetPerThreadSynch(v); if (h != nullptr) { - PerThreadSynch *pw = h; // pw is w's predecessor - PerThreadSynch *w; + PerThreadSynch* pw = h; // pw is w's predecessor + PerThreadSynch* w; if ((w = pw->next) != s) { // search for thread, do { // processing at least one element // If the current element isn't equivalent to the waiter to be // removed, we can skip the entire chain. if (!MuEquivalentWaiter(s, w)) { - pw = Skip(w); // so skip all that won't match + pw = Skip(w); // so skip all that won't match // we don't have to worry about dangling skip fields // in the threads we skipped; none can point to s // because they are in a different equivalence class. @@ -1127,7 +1106,7 @@ void Mutex::TryRemove(PerThreadSynch *s) { // process the first thread again. } while ((w = pw->next) != s && pw != h); } - if (w == s) { // found thread; remove it + if (w == s) { // found thread; remove it // pw->skip may be non-zero here; the loop above ensured that // no ancestor of s can skip to s, so removal is safe anyway. h = Dequeue(h, pw); @@ -1136,16 +1115,15 @@ void Mutex::TryRemove(PerThreadSynch *s) { } } intptr_t nv; - do { // release spinlock and lock + do { // release spinlock and lock v = mu_.load(std::memory_order_relaxed); nv = v & (kMuDesig | kMuEvent); if (h != nullptr) { nv |= kMuWait | reinterpret_cast<intptr_t>(h); - h->readers = 0; // we hold writer lock + h->readers = 0; // we hold writer lock h->maybe_unlocking = false; // finished unlocking } - } while (!mu_.compare_exchange_weak(v, nv, - std::memory_order_release, + } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release, std::memory_order_relaxed)); } } @@ -1155,7 +1133,7 @@ void Mutex::TryRemove(PerThreadSynch *s) { // if the wait extends past the absolute time specified, even if "s" is still // on the mutex queue. In this case, remove "s" from the queue and return // true, otherwise return false. -void Mutex::Block(PerThreadSynch *s) { +void Mutex::Block(PerThreadSynch* s) { while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) { if (!DecrementSynchSem(this, s, s->waitp->timeout)) { // After a timeout, we go into a spin loop until we remove ourselves @@ -1174,7 +1152,7 @@ void Mutex::Block(PerThreadSynch *s) { // is not on the queue. this->TryRemove(s); } - s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied + s->waitp->timeout = KernelTimeout::Never(); // timeout is satisfied s->waitp->cond = nullptr; // condition no longer relevant for wakeups } } @@ -1184,8 +1162,8 @@ void Mutex::Block(PerThreadSynch *s) { } // Wake thread w, and return the next thread in the list. -PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) { - PerThreadSynch *next = w->next; +PerThreadSynch* Mutex::Wakeup(PerThreadSynch* w) { + PerThreadSynch* next = w->next; w->next = nullptr; w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); IncrementSynchSem(this, w); @@ -1193,7 +1171,7 @@ PerThreadSynch *Mutex::Wakeup(PerThreadSynch *w) { return next; } -static GraphId GetGraphIdLocked(Mutex *mu) +static GraphId GetGraphIdLocked(Mutex* mu) ABSL_EXCLUSIVE_LOCKS_REQUIRED(deadlock_graph_mu) { if (!deadlock_graph) { // (re)create the deadlock graph. deadlock_graph = @@ -1203,7 +1181,7 @@ static GraphId GetGraphIdLocked(Mutex *mu) return deadlock_graph->GetId(mu); } -static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { +static GraphId GetGraphId(Mutex* mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { deadlock_graph_mu.Lock(); GraphId id = GetGraphIdLocked(mu); deadlock_graph_mu.Unlock(); @@ -1213,7 +1191,7 @@ static GraphId GetGraphId(Mutex *mu) ABSL_LOCKS_EXCLUDED(deadlock_graph_mu) { // Record a lock acquisition. This is used in debug mode for deadlock // detection. The held_locks pointer points to the relevant data // structure for each case. -static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { +static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) { int n = held_locks->n; int i = 0; while (i != n && held_locks->locks[i].id != id) { @@ -1237,7 +1215,7 @@ static void LockEnter(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { // eventually followed by a call to LockLeave(mu, id, x) by the same thread. // It does not process the event if is not needed when deadlock detection is // disabled. -static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { +static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld* held_locks) { int n = held_locks->n; int i = 0; while (i != n && held_locks->locks[i].id != id) { @@ -1252,11 +1230,11 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { i++; } if (i == n) { // mu missing means releasing unheld lock - SynchEvent *mu_events = GetSynchEvent(mu); + SynchEvent* mu_events = GetSynchEvent(mu); ABSL_RAW_LOG(FATAL, "thread releasing lock it does not hold: %p %s; " , - static_cast<void *>(mu), + static_cast<void*>(mu), mu_events == nullptr ? "" : mu_events->name); } } @@ -1273,7 +1251,7 @@ static void LockLeave(Mutex* mu, GraphId id, SynchLocksHeld *held_locks) { } // Call LockEnter() if in debug mode and deadlock detection is enabled. -static inline void DebugOnlyLockEnter(Mutex *mu) { +static inline void DebugOnlyLockEnter(Mutex* mu) { if (kDebugMode) { if (synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { @@ -1283,7 +1261,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu) { } // Call LockEnter() if in debug mode and deadlock detection is enabled. -static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) { +static inline void DebugOnlyLockEnter(Mutex* mu, GraphId id) { if (kDebugMode) { if (synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { @@ -1293,7 +1271,7 @@ static inline void DebugOnlyLockEnter(Mutex *mu, GraphId id) { } // Call LockLeave() if in debug mode and deadlock detection is enabled. -static inline void DebugOnlyLockLeave(Mutex *mu) { +static inline void DebugOnlyLockLeave(Mutex* mu) { if (kDebugMode) { if (synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { @@ -1302,9 +1280,9 @@ static inline void DebugOnlyLockLeave(Mutex *mu) { } } -static char *StackString(void **pcs, int n, char *buf, int maxlen, +static char* StackString(void** pcs, int n, char* buf, int maxlen, bool symbolize) { - static const int kSymLen = 200; + static constexpr int kSymLen = 200; char sym[kSymLen]; int len = 0; for (int i = 0; i != n; i++) { @@ -1312,7 +1290,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen, return buf; size_t count = static_cast<size_t>(maxlen - len); if (symbolize) { - if (!symbolizer(pcs[i], sym, kSymLen)) { + if (!absl::Symbolize(pcs[i], sym, kSymLen)) { sym[0] = '\0'; } snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i], @@ -1325,15 +1303,17 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen, return buf; } -static char *CurrentStackString(char *buf, int maxlen, bool symbolize) { - void *pcs[40]; +static char* CurrentStackString(char* buf, int maxlen, bool symbolize) { + void* pcs[40]; return StackString(pcs, absl::GetStackTrace(pcs, ABSL_ARRAYSIZE(pcs), 2), buf, maxlen, symbolize); } namespace { -enum { kMaxDeadlockPathLen = 10 }; // maximum length of a deadlock cycle; - // a path this long would be remarkable +enum { + kMaxDeadlockPathLen = 10 +}; // maximum length of a deadlock cycle; + // a path this long would be remarkable // Buffers required to report a deadlock. // We do not allocate them on stack to avoid large stack frame. struct DeadlockReportBuffers { @@ -1343,11 +1323,11 @@ struct DeadlockReportBuffers { struct ScopedDeadlockReportBuffers { ScopedDeadlockReportBuffers() { - b = reinterpret_cast<DeadlockReportBuffers *>( + b = reinterpret_cast<DeadlockReportBuffers*>( base_internal::LowLevelAlloc::Alloc(sizeof(*b))); } ~ScopedDeadlockReportBuffers() { base_internal::LowLevelAlloc::Free(b); } - DeadlockReportBuffers *b; + DeadlockReportBuffers* b; }; // Helper to pass to GraphCycles::UpdateStackTrace. @@ -1358,13 +1338,13 @@ int GetStack(void** stack, int max_depth) { // Called in debug mode when a thread is about to acquire a lock in a way that // may block. -static GraphId DeadlockCheck(Mutex *mu) { +static GraphId DeadlockCheck(Mutex* mu) { if (synch_deadlock_detection.load(std::memory_order_acquire) == OnDeadlockCycle::kIgnore) { return InvalidGraphId(); } - SynchLocksHeld *all_locks = Synch_GetAllLocks(); + SynchLocksHeld* all_locks = Synch_GetAllLocks(); absl::base_internal::SpinLockHolder lock(&deadlock_graph_mu); const GraphId mu_id = GetGraphIdLocked(mu); @@ -1386,8 +1366,8 @@ static GraphId DeadlockCheck(Mutex *mu) { // For each other mutex already held by this thread: for (int i = 0; i != all_locks->n; i++) { const GraphId other_node_id = all_locks->locks[i].id; - const Mutex *other = - static_cast<const Mutex *>(deadlock_graph->Ptr(other_node_id)); + const Mutex* other = + static_cast<const Mutex*>(deadlock_graph->Ptr(other_node_id)); if (other == nullptr) { // Ignore stale lock continue; @@ -1396,7 +1376,7 @@ static GraphId DeadlockCheck(Mutex *mu) { // Add the acquired-before edge to the graph. if (!deadlock_graph->InsertEdge(other_node_id, mu_id)) { ScopedDeadlockReportBuffers scoped_buffers; - DeadlockReportBuffers *b = scoped_buffers.b; + DeadlockReportBuffers* b = scoped_buffers.b; static int number_of_reported_deadlocks = 0; number_of_reported_deadlocks++; // Symbolize only 2 first deadlock report to avoid huge slowdowns. @@ -1407,37 +1387,40 @@ static GraphId DeadlockCheck(Mutex *mu) { for (int j = 0; j != all_locks->n; j++) { void* pr = deadlock_graph->Ptr(all_locks->locks[j].id); if (pr != nullptr) { - snprintf(b->buf + len, sizeof (b->buf) - len, " %p", pr); + snprintf(b->buf + len, sizeof(b->buf) - len, " %p", pr); len += strlen(&b->buf[len]); } } ABSL_RAW_LOG(ERROR, "Acquiring absl::Mutex %p while holding %s; a cycle in the " "historical lock ordering graph has been observed", - static_cast<void *>(mu), b->buf); + static_cast<void*>(mu), b->buf); ABSL_RAW_LOG(ERROR, "Cycle: "); - int path_len = deadlock_graph->FindPath( - mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path); - for (int j = 0; j != path_len; j++) { + int path_len = deadlock_graph->FindPath(mu_id, other_node_id, + ABSL_ARRAYSIZE(b->path), b->path); + for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) { GraphId id = b->path[j]; - Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id)); + Mutex* path_mu = static_cast<Mutex*>(deadlock_graph->Ptr(id)); if (path_mu == nullptr) continue; void** stack; int depth = deadlock_graph->GetStackTrace(id, &stack); snprintf(b->buf, sizeof(b->buf), - "mutex@%p stack: ", static_cast<void *>(path_mu)); + "mutex@%p stack: ", static_cast<void*>(path_mu)); StackString(stack, depth, b->buf + strlen(b->buf), static_cast<int>(sizeof(b->buf) - strlen(b->buf)), symbolize); ABSL_RAW_LOG(ERROR, "%s", b->buf); } + if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) { + ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)"); + } if (synch_deadlock_detection.load(std::memory_order_acquire) == OnDeadlockCycle::kAbort) { deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler ABSL_RAW_LOG(FATAL, "dying due to potential deadlock"); return mu_id; } - break; // report at most one potential deadlock per acquisition + break; // report at most one potential deadlock per acquisition } } @@ -1446,7 +1429,7 @@ static GraphId DeadlockCheck(Mutex *mu) { // Invoke DeadlockCheck() iff we're in debug mode and // deadlock checking has been enabled. -static inline GraphId DebugOnlyDeadlockCheck(Mutex *mu) { +static inline GraphId DebugOnlyDeadlockCheck(Mutex* mu) { if (kDebugMode && synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { return DeadlockCheck(mu); @@ -1473,13 +1456,13 @@ void Mutex::AssertNotHeld() const { (mu_.load(std::memory_order_relaxed) & (kMuWriter | kMuReader)) != 0 && synch_deadlock_detection.load(std::memory_order_acquire) != OnDeadlockCycle::kIgnore) { - GraphId id = GetGraphId(const_cast<Mutex *>(this)); - SynchLocksHeld *locks = Synch_GetAllLocks(); + GraphId id = GetGraphId(const_cast<Mutex*>(this)); + SynchLocksHeld* locks = Synch_GetAllLocks(); for (int i = 0; i != locks->n; i++) { if (locks->locks[i].id == id) { - SynchEvent *mu_events = GetSynchEvent(this); + SynchEvent* mu_events = GetSynchEvent(this); ABSL_RAW_LOG(FATAL, "thread should not hold mutex %p %s", - static_cast<const void *>(this), + static_cast<const void*>(this), (mu_events == nullptr ? "" : mu_events->name)); } } @@ -1492,8 +1475,8 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) { int c = GetMutexGlobals().spinloop_iterations; do { // do/while somewhat faster on AMD intptr_t v = mu->load(std::memory_order_relaxed); - if ((v & (kMuReader|kMuEvent)) != 0) { - return false; // a reader or tracing -> give up + if ((v & (kMuReader | kMuEvent)) != 0) { + return false; // a reader or tracing -> give up } else if (((v & kMuWriter) == 0) && // no holder -> try to acquire mu->compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire, @@ -1510,8 +1493,7 @@ void Mutex::Lock() { intptr_t v = mu_.load(std::memory_order_relaxed); // try fast acquire, then spin loop if ((v & (kMuWriter | kMuReader | kMuEvent)) != 0 || - !mu_.compare_exchange_strong(v, kMuWriter | v, - std::memory_order_acquire, + !mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire, std::memory_order_relaxed)) { // try spin acquire, then slow loop if (!TryAcquireWithSpinning(&this->mu_)) { @@ -1537,7 +1519,7 @@ void Mutex::ReaderLock() { ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); } -void Mutex::LockWhen(const Condition &cond) { +void Mutex::LockWhen(const Condition& cond) { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); GraphId id = DebugOnlyDeadlockCheck(this); this->LockSlow(kExclusive, &cond, 0); @@ -1545,21 +1527,26 @@ void Mutex::LockWhen(const Condition &cond) { ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } -bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) { - return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout)); +bool Mutex::LockWhenWithTimeout(const Condition& cond, absl::Duration timeout) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + GraphId id = DebugOnlyDeadlockCheck(this); + bool res = LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(timeout), 0); + DebugOnlyLockEnter(this, id); + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + return res; } -bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) { +bool Mutex::LockWhenWithDeadline(const Condition& cond, absl::Time deadline) { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); GraphId id = DebugOnlyDeadlockCheck(this); - bool res = LockSlowWithDeadline(kExclusive, &cond, - KernelTimeout(deadline), 0); + bool res = + LockSlowWithDeadline(kExclusive, &cond, KernelTimeout(deadline), 0); DebugOnlyLockEnter(this, id); ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); return res; } -void Mutex::ReaderLockWhen(const Condition &cond) { +void Mutex::ReaderLockWhen(const Condition& cond) { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); GraphId id = DebugOnlyDeadlockCheck(this); this->LockSlow(kShared, &cond, 0); @@ -1567,12 +1554,17 @@ void Mutex::ReaderLockWhen(const Condition &cond) { ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); } -bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond, +bool Mutex::ReaderLockWhenWithTimeout(const Condition& cond, absl::Duration timeout) { - return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout)); + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); + GraphId id = DebugOnlyDeadlockCheck(this); + bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0); + DebugOnlyLockEnter(this, id); + ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0); + return res; } -bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond, +bool Mutex::ReaderLockWhenWithDeadline(const Condition& cond, absl::Time deadline) { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock); GraphId id = DebugOnlyDeadlockCheck(this); @@ -1582,23 +1574,34 @@ bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond, return res; } -void Mutex::Await(const Condition &cond) { - if (cond.Eval()) { // condition already true; nothing to do +void Mutex::Await(const Condition& cond) { + if (cond.Eval()) { // condition already true; nothing to do if (kDebugMode) { this->AssertReaderHeld(); } - } else { // normal case + } else { // normal case ABSL_RAW_CHECK(this->AwaitCommon(cond, KernelTimeout::Never()), "condition untrue on return from Await"); } } -bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) { - return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout)); +bool Mutex::AwaitWithTimeout(const Condition& cond, absl::Duration timeout) { + if (cond.Eval()) { // condition already true; nothing to do + if (kDebugMode) { + this->AssertReaderHeld(); + } + return true; + } + + KernelTimeout t{timeout}; + bool res = this->AwaitCommon(cond, t); + ABSL_RAW_CHECK(res || t.has_timeout(), + "condition untrue on return from Await"); + return res; } -bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) { - if (cond.Eval()) { // condition already true; nothing to do +bool Mutex::AwaitWithDeadline(const Condition& cond, absl::Time deadline) { + if (cond.Eval()) { // condition already true; nothing to do if (kDebugMode) { this->AssertReaderHeld(); } @@ -1612,14 +1615,14 @@ bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) { return res; } -bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) { +bool Mutex::AwaitCommon(const Condition& cond, KernelTimeout t) { this->AssertReaderHeld(); MuHow how = (mu_.load(std::memory_order_relaxed) & kMuWriter) ? kExclusive : kShared; ABSL_TSAN_MUTEX_PRE_UNLOCK(this, TsanFlags(how)); - SynchWaitParams waitp( - how, &cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this), - nullptr /*no cv_word*/); + SynchWaitParams waitp(how, &cond, t, nullptr /*no cvmu*/, + Synch_GetPerThreadAnnotated(this), + nullptr /*no cv_word*/); int flags = kMuHasBlocked; if (!Condition::GuaranteedEqual(&cond, nullptr)) { flags |= kMuIsCond; @@ -1639,14 +1642,13 @@ bool Mutex::TryLock() { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); intptr_t v = mu_.load(std::memory_order_relaxed); if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire - mu_.compare_exchange_strong(v, kMuWriter | v, - std::memory_order_acquire, + mu_.compare_exchange_strong(v, kMuWriter | v, std::memory_order_acquire, std::memory_order_relaxed)) { DebugOnlyLockEnter(this); ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_try_lock, 0); return true; } - if ((v & kMuEvent) != 0) { // we're recording events + if ((v & kMuEvent) != 0) { // we're recording events if ((v & kExclusive->slow_need_zero) == 0 && // try fast acquire mu_.compare_exchange_strong( v, (kExclusive->fast_or | v) + kExclusive->fast_add, @@ -1672,7 +1674,7 @@ bool Mutex::ReaderTryLock() { // changing (typically because the reader count changes) under the CAS. We // limit the number of attempts to avoid having to think about livelock. int loop_limit = 5; - while ((v & (kMuWriter|kMuWait|kMuEvent)) == 0 && loop_limit != 0) { + while ((v & (kMuWriter | kMuWait | kMuEvent)) == 0 && loop_limit != 0) { if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne, std::memory_order_acquire, std::memory_order_relaxed)) { @@ -1684,7 +1686,7 @@ bool Mutex::ReaderTryLock() { loop_limit--; v = mu_.load(std::memory_order_relaxed); } - if ((v & kMuEvent) != 0) { // we're recording events + if ((v & kMuEvent) != 0) { // we're recording events loop_limit = 5; while ((v & kShared->slow_need_zero) == 0 && loop_limit != 0) { if (mu_.compare_exchange_strong(v, (kMuReader | v) + kMuOne, @@ -1723,7 +1725,7 @@ void Mutex::Unlock() { // should_try_cas is whether we'll try a compare-and-swap immediately. // NOTE: optimized out when kDebugMode is false. bool should_try_cas = ((v & (kMuEvent | kMuWriter)) == kMuWriter && - (v & (kMuWait | kMuDesig)) != kMuWait); + (v & (kMuWait | kMuDesig)) != kMuWait); // But, we can use an alternate computation of it, that compilers // currently don't find on their own. When that changes, this function // can be simplified. @@ -1740,10 +1742,9 @@ void Mutex::Unlock() { static_cast<long long>(v), static_cast<long long>(x), static_cast<long long>(y)); } - if (x < y && - mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter), - std::memory_order_release, - std::memory_order_relaxed)) { + if (x < y && mu_.compare_exchange_strong(v, v & ~(kMuWrWait | kMuWriter), + std::memory_order_release, + std::memory_order_relaxed)) { // fast writer release (writer with no waiters or with designated waker) } else { this->UnlockSlow(nullptr /*no waitp*/); // take slow path @@ -1753,7 +1754,7 @@ void Mutex::Unlock() { // Requires v to represent a reader-locked state. static bool ExactlyOneReader(intptr_t v) { - assert((v & (kMuWriter|kMuReader)) == kMuReader); + assert((v & (kMuWriter | kMuReader)) == kMuReader); assert((v & kMuHigh) != 0); // The more straightforward "(v & kMuHigh) == kMuOne" also works, but // on some architectures the following generates slightly smaller code. @@ -1766,12 +1767,11 @@ void Mutex::ReaderUnlock() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock); DebugOnlyLockLeave(this); intptr_t v = mu_.load(std::memory_order_relaxed); - assert((v & (kMuWriter|kMuReader)) == kMuReader); - if ((v & (kMuReader|kMuWait|kMuEvent)) == kMuReader) { + assert((v & (kMuWriter | kMuReader)) == kMuReader); + if ((v & (kMuReader | kMuWait | kMuEvent)) == kMuReader) { // fast reader release (reader with no waiters) - intptr_t clear = ExactlyOneReader(v) ? kMuReader|kMuOne : kMuOne; - if (mu_.compare_exchange_strong(v, v - clear, - std::memory_order_release, + intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne; + if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release, std::memory_order_relaxed)) { ABSL_TSAN_MUTEX_POST_UNLOCK(this, __tsan_mutex_read_lock); return; @@ -1810,7 +1810,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) { } // Internal version of LockWhen(). See LockSlowWithDeadline() -ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, +ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition* cond, int flags) { ABSL_RAW_CHECK( this->LockSlowWithDeadline(how, cond, KernelTimeout::Never(), flags), @@ -1818,7 +1818,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::LockSlow(MuHow how, const Condition *cond, } // Compute cond->Eval() and tell race detectors that we do it under mutex mu. -static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, +static inline bool EvalConditionAnnotated(const Condition* cond, Mutex* mu, bool locking, bool trylock, bool read_lock) { // Delicate annotation dance. @@ -1868,7 +1868,7 @@ static inline bool EvalConditionAnnotated(const Condition *cond, Mutex *mu, // tsan). As the result there is no tsan-visible synchronization between the // addition and this thread. So if we would enable race detection here, // it would race with the predicate initialization. -static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { +static inline bool EvalConditionIgnored(Mutex* mu, const Condition* cond) { // Memory accesses are already ignored inside of lock/unlock operations, // but synchronization operations are also ignored. When we evaluate the // predicate we must ignore only memory accesses but not synchronization, @@ -1893,7 +1893,7 @@ static inline bool EvalConditionIgnored(Mutex *mu, const Condition *cond) { // obstruct this call // - kMuIsCond indicates that this is a conditional acquire (condition variable, // Await, LockWhen) so contention profiling should be suppressed. -bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, +bool Mutex::LockSlowWithDeadline(MuHow how, const Condition* cond, KernelTimeout t, int flags) { intptr_t v = mu_.load(std::memory_order_relaxed); bool unlock = false; @@ -1910,9 +1910,9 @@ bool Mutex::LockSlowWithDeadline(MuHow how, const Condition *cond, } unlock = true; } - SynchWaitParams waitp( - how, cond, t, nullptr /*no cvmu*/, Synch_GetPerThreadAnnotated(this), - nullptr /*no cv_word*/); + SynchWaitParams waitp(how, cond, t, nullptr /*no cvmu*/, + Synch_GetPerThreadAnnotated(this), + nullptr /*no cv_word*/); if (!Condition::GuaranteedEqual(cond, nullptr)) { flags |= kMuIsCond; } @@ -1953,20 +1953,20 @@ static void CheckForMutexCorruption(intptr_t v, const char* label) { if (ABSL_PREDICT_TRUE((w & (w << 3) & (kMuWriter | kMuWrWait)) == 0)) return; RAW_CHECK_FMT((v & (kMuWriter | kMuReader)) != (kMuWriter | kMuReader), "%s: Mutex corrupt: both reader and writer lock held: %p", - label, reinterpret_cast<void *>(v)); + label, reinterpret_cast<void*>(v)); RAW_CHECK_FMT((v & (kMuWait | kMuWrWait)) != kMuWrWait, - "%s: Mutex corrupt: waiting writer with no waiters: %p", - label, reinterpret_cast<void *>(v)); + "%s: Mutex corrupt: waiting writer with no waiters: %p", label, + reinterpret_cast<void*>(v)); assert(false); } -void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { +void Mutex::LockSlowLoop(SynchWaitParams* waitp, int flags) { SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; intptr_t v = mu_.load(std::memory_order_relaxed); if ((v & kMuEvent) != 0) { - PostSynchEvent(this, - waitp->how == kExclusive? SYNCH_EV_LOCK: SYNCH_EV_READERLOCK); + PostSynchEvent( + this, waitp->how == kExclusive ? SYNCH_EV_LOCK : SYNCH_EV_READERLOCK); } ABSL_RAW_CHECK( waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, @@ -1991,11 +1991,11 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { flags |= kMuHasBlocked; c = 0; } - } else { // need to access waiter list + } else { // need to access waiter list bool dowait = false; - if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters + if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters // This thread tries to become the one and only waiter. - PerThreadSynch *new_h = Enqueue(nullptr, waitp, v, flags); + PerThreadSynch* new_h = Enqueue(nullptr, waitp, v, flags); intptr_t nv = (v & ClearDesignatedWakerMask(flags & kMuHasBlocked) & kMuLow) | kMuWait; @@ -2007,7 +2007,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { v, reinterpret_cast<intptr_t>(new_h) | nv, std::memory_order_release, std::memory_order_relaxed)) { dowait = true; - } else { // attempted Enqueue() failed + } else { // attempted Enqueue() failed // zero out the waitp field set by Enqueue() waitp->thread->waitp = nullptr; } @@ -2020,9 +2020,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | kMuSpin | kMuReader, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = GetPerThreadSynch(v); - h->readers += kMuOne; // inc reader count in waiter - do { // release spinlock + PerThreadSynch* h = GetPerThreadSynch(v); + h->readers += kMuOne; // inc reader count in waiter + do { // release spinlock v = mu_.load(std::memory_order_relaxed); } while (!mu_.compare_exchange_weak(v, (v & ~kMuSpin) | kMuReader, std::memory_order_release, @@ -2032,7 +2032,7 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { waitp->how == kShared)) { break; // we timed out, or condition true, so return } - this->UnlockSlow(waitp); // got lock but condition false + this->UnlockSlow(waitp); // got lock but condition false this->Block(waitp->thread); flags |= kMuHasBlocked; c = 0; @@ -2043,18 +2043,19 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { (v & ClearDesignatedWakerMask(flags & kMuHasBlocked)) | kMuSpin | kMuWait, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = GetPerThreadSynch(v); - PerThreadSynch *new_h = Enqueue(h, waitp, v, flags); + PerThreadSynch* h = GetPerThreadSynch(v); + PerThreadSynch* new_h = Enqueue(h, waitp, v, flags); intptr_t wr_wait = 0; ABSL_RAW_CHECK(new_h != nullptr, "Enqueue to list failed"); if (waitp->how == kExclusive && (v & kMuReader) != 0) { - wr_wait = kMuWrWait; // give priority to a waiting writer + wr_wait = kMuWrWait; // give priority to a waiting writer } - do { // release spinlock + do { // release spinlock v = mu_.load(std::memory_order_relaxed); } while (!mu_.compare_exchange_weak( - v, (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait | - reinterpret_cast<intptr_t>(new_h), + v, + (v & (kMuLow & ~kMuSpin)) | kMuWait | wr_wait | + reinterpret_cast<intptr_t>(new_h), std::memory_order_release, std::memory_order_relaxed)); dowait = true; } @@ -2074,9 +2075,9 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, "detected illegal recursion into Mutex code"); if ((v & kMuEvent) != 0) { - PostSynchEvent(this, - waitp->how == kExclusive? SYNCH_EV_LOCK_RETURNING : - SYNCH_EV_READERLOCK_RETURNING); + PostSynchEvent(this, waitp->how == kExclusive + ? SYNCH_EV_LOCK_RETURNING + : SYNCH_EV_READERLOCK_RETURNING); } } @@ -2085,28 +2086,28 @@ void Mutex::LockSlowLoop(SynchWaitParams *waitp, int flags) { // which holds the lock but is not runnable because its condition is false // or it is in the process of blocking on a condition variable; it must requeue // itself on the mutex/condvar to wait for its condition to become true. -ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { +ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams* waitp) { SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v = mu_.load(std::memory_order_relaxed); this->AssertReaderHeld(); CheckForMutexCorruption(v, "Unlock"); if ((v & kMuEvent) != 0) { - PostSynchEvent(this, - (v & kMuWriter) != 0? SYNCH_EV_UNLOCK: SYNCH_EV_READERUNLOCK); + PostSynchEvent( + this, (v & kMuWriter) != 0 ? SYNCH_EV_UNLOCK : SYNCH_EV_READERUNLOCK); } int c = 0; // the waiter under consideration to wake, or zero - PerThreadSynch *w = nullptr; + PerThreadSynch* w = nullptr; // the predecessor to w or zero - PerThreadSynch *pw = nullptr; + PerThreadSynch* pw = nullptr; // head of the list searched previously, or zero - PerThreadSynch *old_h = nullptr; + PerThreadSynch* old_h = nullptr; // a condition that's known to be false. - const Condition *known_false = nullptr; - PerThreadSynch *wake_list = kPerThreadSynchNull; // list of threads to wake - intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a - // later writer could have acquired the lock - // (starvation avoidance) + const Condition* known_false = nullptr; + PerThreadSynch* wake_list = kPerThreadSynchNull; // list of threads to wake + intptr_t wr_wait = 0; // set to kMuWrWait if we wake a reader and a + // later writer could have acquired the lock + // (starvation avoidance) ABSL_RAW_CHECK(waitp == nullptr || waitp->thread->waitp == nullptr || waitp->thread->suppress_fatal_errors, "detected illegal recursion into Mutex code"); @@ -2126,8 +2127,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } else if ((v & (kMuReader | kMuWait)) == kMuReader && waitp == nullptr) { // fast reader release (reader with no waiters) intptr_t clear = ExactlyOneReader(v) ? kMuReader | kMuOne : kMuOne; - if (mu_.compare_exchange_strong(v, v - clear, - std::memory_order_release, + if (mu_.compare_exchange_strong(v, v - clear, std::memory_order_release, std::memory_order_relaxed)) { return; } @@ -2135,16 +2135,16 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { mu_.compare_exchange_strong(v, v | kMuSpin, std::memory_order_acquire, std::memory_order_relaxed)) { - if ((v & kMuWait) == 0) { // no one to wake + if ((v & kMuWait) == 0) { // no one to wake intptr_t nv; bool do_enqueue = true; // always Enqueue() the first time ABSL_RAW_CHECK(waitp != nullptr, "UnlockSlow is confused"); // about to sleep - do { // must loop to release spinlock as reader count may change + do { // must loop to release spinlock as reader count may change v = mu_.load(std::memory_order_relaxed); // decrement reader count if there are readers - intptr_t new_readers = (v >= kMuOne)? v - kMuOne : v; - PerThreadSynch *new_h = nullptr; + intptr_t new_readers = (v >= kMuOne) ? v - kMuOne : v; + PerThreadSynch* new_h = nullptr; if (do_enqueue) { // If we are enqueuing on a CondVar (waitp->cv_word != nullptr) then // we must not retry here. The initial attempt will always have @@ -2168,21 +2168,20 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } // release spinlock & our lock; retry if reader-count changed // (writer count cannot change since we hold lock) - } while (!mu_.compare_exchange_weak(v, nv, - std::memory_order_release, + } while (!mu_.compare_exchange_weak(v, nv, std::memory_order_release, std::memory_order_relaxed)); break; } // There are waiters. // Set h to the head of the circular waiter list. - PerThreadSynch *h = GetPerThreadSynch(v); + PerThreadSynch* h = GetPerThreadSynch(v); if ((v & kMuReader) != 0 && (h->readers & kMuHigh) > kMuOne) { // a reader but not the last - h->readers -= kMuOne; // release our lock - intptr_t nv = v; // normally just release spinlock + h->readers -= kMuOne; // release our lock + intptr_t nv = v; // normally just release spinlock if (waitp != nullptr) { // but waitp!=nullptr => must queue ourselves - PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond); + PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond); ABSL_RAW_CHECK(new_h != nullptr, "waiters disappeared during Enqueue()!"); nv &= kMuLow; @@ -2200,8 +2199,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { // The lock is becoming free, and there's a waiter if (old_h != nullptr && - !old_h->may_skip) { // we used old_h as a terminator - old_h->may_skip = true; // allow old_h to skip once more + !old_h->may_skip) { // we used old_h as a terminator + old_h->may_skip = true; // allow old_h to skip once more ABSL_RAW_CHECK(old_h->skip == nullptr, "illegal skip from head"); if (h != old_h && MuEquivalentWaiter(old_h, old_h->next)) { old_h->skip = old_h->next; // old_h not head & can skip to successor @@ -2210,7 +2209,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { if (h->next->waitp->how == kExclusive && Condition::GuaranteedEqual(h->next->waitp->cond, nullptr)) { // easy case: writer with no condition; no need to search - pw = h; // wake w, the successor of h (=pw) + pw = h; // wake w, the successor of h (=pw) w = h->next; w->wake = true; // We are waking up a writer. This writer may be racing against @@ -2233,13 +2232,13 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { // waiter has a condition or is a reader. We avoid searching over // waiters we've searched on previous iterations by starting at // old_h if it's set. If old_h==h, there's no one to wakeup at all. - if (old_h == h) { // we've searched before, and nothing's new - // so there's no one to wake. - intptr_t nv = (v & ~(kMuReader|kMuWriter|kMuWrWait)); + if (old_h == h) { // we've searched before, and nothing's new + // so there's no one to wake. + intptr_t nv = (v & ~(kMuReader | kMuWriter | kMuWrWait)); h->readers = 0; - h->maybe_unlocking = false; // finished unlocking - if (waitp != nullptr) { // we must queue ourselves and sleep - PerThreadSynch *new_h = Enqueue(h, waitp, v, kMuIsCond); + h->maybe_unlocking = false; // finished unlocking + if (waitp != nullptr) { // we must queue ourselves and sleep + PerThreadSynch* new_h = Enqueue(h, waitp, v, kMuIsCond); nv &= kMuLow; if (new_h != nullptr) { nv |= kMuWait | reinterpret_cast<intptr_t>(new_h); @@ -2253,12 +2252,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } // set up to walk the list - PerThreadSynch *w_walk; // current waiter during list walk - PerThreadSynch *pw_walk; // previous waiter during list walk + PerThreadSynch* w_walk; // current waiter during list walk + PerThreadSynch* pw_walk; // previous waiter during list walk if (old_h != nullptr) { // we've searched up to old_h before pw_walk = old_h; w_walk = old_h->next; - } else { // no prior search, start at beginning + } else { // no prior search, start at beginning pw_walk = nullptr; // h->next's predecessor may change; don't record it w_walk = h->next; @@ -2284,7 +2283,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { // to walk the path from w_walk to h inclusive. (TryRemove() can remove // a waiter anywhere, but it acquires both the spinlock and the Mutex) - old_h = h; // remember we searched to here + old_h = h; // remember we searched to here // Walk the path upto and including h looking for waiters we can wake. while (pw_walk != h) { @@ -2296,24 +2295,24 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { // is in fact true EvalConditionIgnored(this, w_walk->waitp->cond))) { if (w == nullptr) { - w_walk->wake = true; // can wake this waiter + w_walk->wake = true; // can wake this waiter w = w_walk; pw = pw_walk; if (w_walk->waitp->how == kExclusive) { wr_wait = kMuWrWait; - break; // bail if waking this writer + break; // bail if waking this writer } } else if (w_walk->waitp->how == kShared) { // wake if a reader w_walk->wake = true; - } else { // writer with true condition + } else { // writer with true condition wr_wait = kMuWrWait; } - } else { // can't wake; condition false + } else { // can't wake; condition false known_false = w_walk->waitp->cond; // remember last false condition } - if (w_walk->wake) { // we're waking reader w_walk - pw_walk = w_walk; // don't skip similar waiters - } else { // not waking; skip as much as possible + if (w_walk->wake) { // we're waking reader w_walk + pw_walk = w_walk; // don't skip similar waiters + } else { // not waking; skip as much as possible pw_walk = Skip(w_walk); } // If pw_walk == h, then load of pw_walk->next can race with @@ -2340,8 +2339,8 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { h = DequeueAllWakeable(h, pw, &wake_list); intptr_t nv = (v & kMuEvent) | kMuDesig; - // assume no waiters left, - // set kMuDesig for INV1a + // assume no waiters left, + // set kMuDesig for INV1a if (waitp != nullptr) { // we must queue ourselves and sleep h = Enqueue(h, waitp, v, kMuIsCond); @@ -2354,7 +2353,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { if (h != nullptr) { // there are waiters left h->readers = 0; - h->maybe_unlocking = false; // finished unlocking + h->maybe_unlocking = false; // finished unlocking nv |= wr_wait | kMuWait | reinterpret_cast<intptr_t>(h); } @@ -2365,12 +2364,12 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { } // aggressive here; no one can proceed till we do c = synchronization_internal::MutexDelay(c, AGGRESSIVE); - } // end of for(;;)-loop + } // end of for(;;)-loop if (wake_list != kPerThreadSynchNull) { int64_t total_wait_cycles = 0; int64_t max_wait_cycles = 0; - int64_t now = base_internal::CycleClock::Now(); + int64_t now = CycleClock::Now(); do { // Profile lock contention events only if the waiter was trying to acquire // the lock, not waiting on a condition variable or Condition. @@ -2382,7 +2381,7 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) { wake_list->waitp->contention_start_cycles = now; wake_list->waitp->should_submit_contention_data = true; } - wake_list = Wakeup(wake_list); // wake waiters + wake_list = Wakeup(wake_list); // wake waiters } while (wake_list != kPerThreadSynchNull); if (total_wait_cycles > 0) { mutex_tracer("slow release", this, total_wait_cycles); @@ -2410,7 +2409,7 @@ void Mutex::Trans(MuHow how) { // condition variable. If this mutex is free, we simply wake the thread. // It will later acquire the mutex with high probability. Otherwise, we // enqueue thread w on this mutex. -void Mutex::Fer(PerThreadSynch *w) { +void Mutex::Fer(PerThreadSynch* w) { SchedulingGuard::ScopedDisable disable_rescheduling; int c = 0; ABSL_RAW_CHECK(w->waitp->cond == nullptr, @@ -2435,9 +2434,9 @@ void Mutex::Fer(PerThreadSynch *w) { IncrementSynchSem(this, w); return; } else { - if ((v & (kMuSpin|kMuWait)) == 0) { // no waiters + if ((v & (kMuSpin | kMuWait)) == 0) { // no waiters // This thread tries to become the one and only waiter. - PerThreadSynch *new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond); + PerThreadSynch* new_h = Enqueue(nullptr, w->waitp, v, kMuIsCond); ABSL_RAW_CHECK(new_h != nullptr, "Enqueue failed"); // we must queue ourselves if (mu_.compare_exchange_strong( @@ -2447,8 +2446,8 @@ void Mutex::Fer(PerThreadSynch *w) { } } else if ((v & kMuSpin) == 0 && mu_.compare_exchange_strong(v, v | kMuSpin | kMuWait)) { - PerThreadSynch *h = GetPerThreadSynch(v); - PerThreadSynch *new_h = Enqueue(h, w->waitp, v, kMuIsCond); + PerThreadSynch* h = GetPerThreadSynch(v); + PerThreadSynch* new_h = Enqueue(h, w->waitp, v, kMuIsCond); ABSL_RAW_CHECK(new_h != nullptr, "Enqueue failed"); // we must queue ourselves do { @@ -2467,19 +2466,18 @@ void Mutex::Fer(PerThreadSynch *w) { void Mutex::AssertHeld() const { if ((mu_.load(std::memory_order_relaxed) & kMuWriter) == 0) { - SynchEvent *e = GetSynchEvent(this); + SynchEvent* e = GetSynchEvent(this); ABSL_RAW_LOG(FATAL, "thread should hold write lock on Mutex %p %s", - static_cast<const void *>(this), - (e == nullptr ? "" : e->name)); + static_cast<const void*>(this), (e == nullptr ? "" : e->name)); } } void Mutex::AssertReaderHeld() const { if ((mu_.load(std::memory_order_relaxed) & (kMuReader | kMuWriter)) == 0) { - SynchEvent *e = GetSynchEvent(this); - ABSL_RAW_LOG( - FATAL, "thread should hold at least a read lock on Mutex %p %s", - static_cast<const void *>(this), (e == nullptr ? "" : e->name)); + SynchEvent* e = GetSynchEvent(this); + ABSL_RAW_LOG(FATAL, + "thread should hold at least a read lock on Mutex %p %s", + static_cast<const void*>(this), (e == nullptr ? "" : e->name)); } } @@ -2490,13 +2488,17 @@ static const intptr_t kCvEvent = 0x0002L; // record events static const intptr_t kCvLow = 0x0003L; // low order bits of CV // Hack to make constant values available to gdb pretty printer -enum { kGdbCvSpin = kCvSpin, kGdbCvEvent = kCvEvent, kGdbCvLow = kCvLow, }; +enum { + kGdbCvSpin = kCvSpin, + kGdbCvEvent = kCvEvent, + kGdbCvLow = kCvLow, +}; static_assert(PerThreadSynch::kAlignment > kCvLow, "PerThreadSynch::kAlignment must be greater than kCvLow"); -void CondVar::EnableDebugLog(const char *name) { - SynchEvent *e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin); +void CondVar::EnableDebugLog(const char* name) { + SynchEvent* e = EnsureSynchEvent(&this->cv_, name, kCvEvent, kCvSpin); e->log = true; UnrefSynchEvent(e); } @@ -2507,25 +2509,23 @@ CondVar::~CondVar() { } } - // Remove thread s from the list of waiters on this condition variable. -void CondVar::Remove(PerThreadSynch *s) { +void CondVar::Remove(PerThreadSynch* s) { SchedulingGuard::ScopedDisable disable_rescheduling; intptr_t v; int c = 0; for (v = cv_.load(std::memory_order_relaxed);; v = cv_.load(std::memory_order_relaxed)) { if ((v & kCvSpin) == 0 && // attempt to acquire spinlock - cv_.compare_exchange_strong(v, v | kCvSpin, - std::memory_order_acquire, + cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); + PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow); if (h != nullptr) { - PerThreadSynch *w = h; + PerThreadSynch* w = h; while (w->next != s && w->next != h) { // search for thread w = w->next; } - if (w->next == s) { // found thread; remove it + if (w->next == s) { // found thread; remove it w->next = s->next; if (h == s) { h = (w == s) ? nullptr : w; @@ -2534,7 +2534,7 @@ void CondVar::Remove(PerThreadSynch *s) { s->state.store(PerThreadSynch::kAvailable, std::memory_order_release); } } - // release spinlock + // release spinlock cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h), std::memory_order_release); return; @@ -2557,14 +2557,14 @@ void CondVar::Remove(PerThreadSynch *s) { // variable queue just before the mutex is to be unlocked, and (most // importantly) after any call to an external routine that might re-enter the // mutex code. -static void CondVarEnqueue(SynchWaitParams *waitp) { +static void CondVarEnqueue(SynchWaitParams* waitp) { // This thread might be transferred to the Mutex queue by Fer() when // we are woken. To make sure that is what happens, Enqueue() doesn't // call CondVarEnqueue() again but instead uses its normal code. We // must do this before we queue ourselves so that cv_word will be null // when seen by the dequeuer, who may wish immediately to requeue // this thread on another queue. - std::atomic<intptr_t> *cv_word = waitp->cv_word; + std::atomic<intptr_t>* cv_word = waitp->cv_word; waitp->cv_word = nullptr; intptr_t v = cv_word->load(std::memory_order_relaxed); @@ -2577,8 +2577,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { v = cv_word->load(std::memory_order_relaxed); } ABSL_RAW_CHECK(waitp->thread->waitp == nullptr, "waiting when shouldn't be"); - waitp->thread->waitp = waitp; // prepare ourselves for waiting - PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); + waitp->thread->waitp = waitp; // prepare ourselves for waiting + PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow); if (h == nullptr) { // add this thread to waiter list waitp->thread->next = waitp->thread; } else { @@ -2591,8 +2591,8 @@ static void CondVarEnqueue(SynchWaitParams *waitp) { std::memory_order_release); } -bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) { - bool rc = false; // return value; true iff we timed-out +bool CondVar::WaitCommon(Mutex* mutex, KernelTimeout t) { + bool rc = false; // return value; true iff we timed-out intptr_t mutex_v = mutex->mu_.load(std::memory_order_relaxed); Mutex::MuHow mutex_how = ((mutex_v & kMuWriter) != 0) ? kExclusive : kShared; @@ -2659,27 +2659,25 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) { return rc; } -bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) { - return WaitWithDeadline(mu, DeadlineFromTimeout(timeout)); +bool CondVar::WaitWithTimeout(Mutex* mu, absl::Duration timeout) { + return WaitCommon(mu, KernelTimeout(timeout)); } -bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) { +bool CondVar::WaitWithDeadline(Mutex* mu, absl::Time deadline) { return WaitCommon(mu, KernelTimeout(deadline)); } -void CondVar::Wait(Mutex *mu) { - WaitCommon(mu, KernelTimeout::Never()); -} +void CondVar::Wait(Mutex* mu) { WaitCommon(mu, KernelTimeout::Never()); } // Wake thread w // If it was a timed wait, w will be waiting on w->cv // Otherwise, if it was not a Mutex mutex, w will be waiting on w->sem // Otherwise, w is transferred to the Mutex mutex via Mutex::Fer(). -void CondVar::Wakeup(PerThreadSynch *w) { +void CondVar::Wakeup(PerThreadSynch* w) { if (w->waitp->timeout.has_timeout() || w->waitp->cvmu == nullptr) { // The waiting thread only needs to observe "w->state == kAvailable" to be // released, we must cache "cvmu" before clearing "next". - Mutex *mu = w->waitp->cvmu; + Mutex* mu = w->waitp->cvmu; w->next = nullptr; w->state.store(PerThreadSynch::kAvailable, std::memory_order_release); Mutex::IncrementSynchSem(mu, w); @@ -2696,11 +2694,10 @@ void CondVar::Signal() { for (v = cv_.load(std::memory_order_relaxed); v != 0; v = cv_.load(std::memory_order_relaxed)) { if ((v & kCvSpin) == 0 && // attempt to acquire spinlock - cv_.compare_exchange_strong(v, v | kCvSpin, - std::memory_order_acquire, + cv_.compare_exchange_strong(v, v | kCvSpin, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); - PerThreadSynch *w = nullptr; + PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow); + PerThreadSynch* w = nullptr; if (h != nullptr) { // remove first waiter w = h->next; if (w == h) { @@ -2709,11 +2706,11 @@ void CondVar::Signal() { h->next = w->next; } } - // release spinlock + // release spinlock cv_.store((v & kCvEvent) | reinterpret_cast<intptr_t>(h), std::memory_order_release); if (w != nullptr) { - CondVar::Wakeup(w); // wake waiter, if there was one + CondVar::Wakeup(w); // wake waiter, if there was one cond_var_tracer("Signal wakeup", this); } if ((v & kCvEvent) != 0) { @@ -2728,7 +2725,7 @@ void CondVar::Signal() { ABSL_TSAN_MUTEX_POST_SIGNAL(nullptr, 0); } -void CondVar::SignalAll () { +void CondVar::SignalAll() { ABSL_TSAN_MUTEX_PRE_SIGNAL(nullptr, 0); intptr_t v; int c = 0; @@ -2742,11 +2739,11 @@ void CondVar::SignalAll () { if ((v & kCvSpin) == 0 && cv_.compare_exchange_strong(v, v & kCvEvent, std::memory_order_acquire, std::memory_order_relaxed)) { - PerThreadSynch *h = reinterpret_cast<PerThreadSynch *>(v & ~kCvLow); + PerThreadSynch* h = reinterpret_cast<PerThreadSynch*>(v & ~kCvLow); if (h != nullptr) { - PerThreadSynch *w; - PerThreadSynch *n = h->next; - do { // for every thread, wake it up + PerThreadSynch* w; + PerThreadSynch* n = h->next; + do { // for every thread, wake it up w = n; n = n->next; CondVar::Wakeup(w); @@ -2774,42 +2771,41 @@ void ReleasableMutexLock::Release() { } #ifdef ABSL_HAVE_THREAD_SANITIZER -extern "C" void __tsan_read1(void *addr); +extern "C" void __tsan_read1(void* addr); #else #define __tsan_read1(addr) // do nothing if TSan not enabled #endif // A function that just returns its argument, dereferenced -static bool Dereference(void *arg) { +static bool Dereference(void* arg) { // ThreadSanitizer does not instrument this file for memory accesses. // This function dereferences a user variable that can participate // in a data race, so we need to manually tell TSan about this memory access. __tsan_read1(arg); - return *(static_cast<bool *>(arg)); + return *(static_cast<bool*>(arg)); } ABSL_CONST_INIT const Condition Condition::kTrue; -Condition::Condition(bool (*func)(void *), void *arg) - : eval_(&CallVoidPtrFunction), - arg_(arg) { +Condition::Condition(bool (*func)(void*), void* arg) + : eval_(&CallVoidPtrFunction), arg_(arg) { static_assert(sizeof(&func) <= sizeof(callback_), "An overlarge function pointer passed to Condition."); StoreCallback(func); } -bool Condition::CallVoidPtrFunction(const Condition *c) { - using FunctionPointer = bool (*)(void *); +bool Condition::CallVoidPtrFunction(const Condition* c) { + using FunctionPointer = bool (*)(void*); FunctionPointer function_pointer; std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer)); return (*function_pointer)(c->arg_); } -Condition::Condition(const bool *cond) +Condition::Condition(const bool* cond) : eval_(CallVoidPtrFunction), // const_cast is safe since Dereference does not modify arg - arg_(const_cast<bool *>(cond)) { - using FunctionPointer = bool (*)(void *); + arg_(const_cast<bool*>(cond)) { + using FunctionPointer = bool (*)(void*); const FunctionPointer dereference = Dereference; StoreCallback(dereference); } @@ -2819,7 +2815,7 @@ bool Condition::Eval() const { return (this->eval_ == nullptr) || (*this->eval_)(this); } -bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) { +bool Condition::GuaranteedEqual(const Condition* a, const Condition* b) { // kTrue logic. if (a == nullptr || a->eval_ == nullptr) { return b == nullptr || b->eval_ == nullptr; |