aboutsummaryrefslogtreecommitdiff
path: root/absl/synchronization/mutex.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/synchronization/mutex.cc')
-rw-r--r--absl/synchronization/mutex.cc180
1 files changed, 110 insertions, 70 deletions
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index c0268b62..16a8fbff 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -37,6 +37,8 @@
#include <atomic>
#include <cinttypes>
#include <cstddef>
+#include <cstring>
+#include <iterator>
#include <thread> // NOLINT(build/c++11)
#include "absl/base/attributes.h"
@@ -52,6 +54,7 @@
#include "absl/base/internal/sysinfo.h"
#include "absl/base/internal/thread_identity.h"
#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/debugging/stacktrace.h"
#include "absl/debugging/symbolize.h"
@@ -100,9 +103,6 @@ ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<void (*)(
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
absl::base_internal::AtomicHook<void (*)(const char *msg, const void *cv)>
cond_var_tracer;
-ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<
- bool (*)(const void *pc, char *out, int out_size)>
- symbolizer(absl::Symbolize);
} // namespace
@@ -123,10 +123,6 @@ void RegisterCondVarTracer(void (*fn)(const char *msg, const void *cv)) {
cond_var_tracer.Store(fn);
}
-void RegisterSymbolizer(bool (*fn)(const void *pc, char *out, int out_size)) {
- symbolizer.Store(fn);
-}
-
namespace {
// Represents the strategy for spin and yield.
// See the comment in GetMutexGlobals() for more information.
@@ -135,25 +131,42 @@ enum DelayMode { AGGRESSIVE, GENTLE };
struct ABSL_CACHELINE_ALIGNED MutexGlobals {
absl::once_flag once;
int spinloop_iterations = 0;
- int32_t mutex_sleep_limit[2] = {};
+ int32_t mutex_sleep_spins[2] = {};
+ absl::Duration mutex_sleep_time;
};
+absl::Duration MeasureTimeToYield() {
+ absl::Time before = absl::Now();
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalMutexYield)();
+ return absl::Now() - before;
+}
+
const MutexGlobals &GetMutexGlobals() {
ABSL_CONST_INIT static MutexGlobals data;
absl::base_internal::LowLevelCallOnce(&data.once, [&]() {
const int num_cpus = absl::base_internal::NumCPUs();
data.spinloop_iterations = num_cpus > 1 ? 1500 : 0;
- // If this a uniprocessor, only yield/sleep. Otherwise, if the mode is
+ // If this a uniprocessor, only yield/sleep.
+ // Real-time threads are often unable to yield, so the sleep time needs
+ // to be long enough to keep the calling thread asleep until scheduling
+ // happens.
+ // If this is multiprocessor, allow spinning. If the mode is
// aggressive then spin many times before yielding. If the mode is
// gentle then spin only a few times before yielding. Aggressive spinning
// is used to ensure that an Unlock() call, which must get the spin lock
// for any thread to make progress gets it without undue delay.
if (num_cpus > 1) {
- data.mutex_sleep_limit[AGGRESSIVE] = 5000;
- data.mutex_sleep_limit[GENTLE] = 250;
+ data.mutex_sleep_spins[AGGRESSIVE] = 5000;
+ data.mutex_sleep_spins[GENTLE] = 250;
+ data.mutex_sleep_time = absl::Microseconds(10);
} else {
- data.mutex_sleep_limit[AGGRESSIVE] = 0;
- data.mutex_sleep_limit[GENTLE] = 0;
+ data.mutex_sleep_spins[AGGRESSIVE] = 0;
+ data.mutex_sleep_spins[GENTLE] = 0;
+ data.mutex_sleep_time = MeasureTimeToYield() * 5;
+ data.mutex_sleep_time =
+ std::min(data.mutex_sleep_time, absl::Milliseconds(1));
+ data.mutex_sleep_time =
+ std::max(data.mutex_sleep_time, absl::Microseconds(10));
}
});
return data;
@@ -164,7 +177,8 @@ namespace synchronization_internal {
// Returns the Mutex delay on iteration `c` depending on the given `mode`.
// The returned value should be used as `c` for the next call to `MutexDelay`.
int MutexDelay(int32_t c, int mode) {
- const int32_t limit = GetMutexGlobals().mutex_sleep_limit[mode];
+ const int32_t limit = GetMutexGlobals().mutex_sleep_spins[mode];
+ const absl::Duration sleep_time = GetMutexGlobals().mutex_sleep_time;
if (c < limit) {
// Spin.
c++;
@@ -177,7 +191,7 @@ int MutexDelay(int32_t c, int mode) {
c++;
} else {
// Then wait.
- absl::SleepFor(absl::Microseconds(10));
+ absl::SleepFor(sleep_time);
c = 0;
}
ABSL_TSAN_MUTEX_POST_DIVERT(nullptr, 0);
@@ -571,10 +585,15 @@ static SynchLocksHeld *Synch_GetAllLocks() {
void Mutex::IncrementSynchSem(Mutex *mu, PerThreadSynch *w) {
if (mu) {
ABSL_TSAN_MUTEX_PRE_DIVERT(mu, 0);
- }
- PerThreadSem::Post(w->thread_identity());
- if (mu) {
+ // We miss synchronization around passing PerThreadSynch between threads
+ // since it happens inside of the Mutex code, so we need to ignore all
+ // accesses to the object.
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
+ PerThreadSem::Post(w->thread_identity());
+ ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END();
ABSL_TSAN_MUTEX_POST_DIVERT(mu, 0);
+ } else {
+ PerThreadSem::Post(w->thread_identity());
}
}
@@ -609,21 +628,6 @@ void Mutex::InternalAttemptToUseMutexInFatalSignalHandler() {
std::memory_order_release);
}
-// --------------------------time support
-
-// Return the current time plus the timeout. Use the same clock as
-// PerThreadSem::Wait() for consistency. Unfortunately, we don't have
-// such a choice when a deadline is given directly.
-static absl::Time DeadlineFromTimeout(absl::Duration timeout) {
-#ifndef _WIN32
- struct timeval tv;
- gettimeofday(&tv, nullptr);
- return absl::TimeFromTimeval(tv) + timeout;
-#else
- return absl::Now() + timeout;
-#endif
-}
-
// --------------------------Mutexes
// In the layout below, the msb of the bottom byte is currently unused. Also,
@@ -1129,7 +1133,7 @@ void Mutex::TryRemove(PerThreadSynch *s) {
// if the wait extends past the absolute time specified, even if "s" is still
// on the mutex queue. In this case, remove "s" from the queue and return
// true, otherwise return false.
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Block(PerThreadSynch *s) {
+void Mutex::Block(PerThreadSynch *s) {
while (s->state.load(std::memory_order_acquire) == PerThreadSynch::kQueued) {
if (!DecrementSynchSem(this, s, s->waitp->timeout)) {
// After a timeout, we go into a spin loop until we remove ourselves
@@ -1278,7 +1282,7 @@ static inline void DebugOnlyLockLeave(Mutex *mu) {
static char *StackString(void **pcs, int n, char *buf, int maxlen,
bool symbolize) {
- static const int kSymLen = 200;
+ static constexpr int kSymLen = 200;
char sym[kSymLen];
int len = 0;
for (int i = 0; i != n; i++) {
@@ -1286,7 +1290,7 @@ static char *StackString(void **pcs, int n, char *buf, int maxlen,
return buf;
size_t count = static_cast<size_t>(maxlen - len);
if (symbolize) {
- if (!symbolizer(pcs[i], sym, kSymLen)) {
+ if (!absl::Symbolize(pcs[i], sym, kSymLen)) {
sym[0] = '\0';
}
snprintf(buf + len, count, "%s\t@ %p %s\n", (i == 0 ? "\n" : ""), pcs[i],
@@ -1392,7 +1396,7 @@ static GraphId DeadlockCheck(Mutex *mu) {
ABSL_RAW_LOG(ERROR, "Cycle: ");
int path_len = deadlock_graph->FindPath(
mu_id, other_node_id, ABSL_ARRAYSIZE(b->path), b->path);
- for (int j = 0; j != path_len; j++) {
+ for (int j = 0; j != path_len && j != ABSL_ARRAYSIZE(b->path); j++) {
GraphId id = b->path[j];
Mutex *path_mu = static_cast<Mutex *>(deadlock_graph->Ptr(id));
if (path_mu == nullptr) continue;
@@ -1405,6 +1409,9 @@ static GraphId DeadlockCheck(Mutex *mu) {
symbolize);
ABSL_RAW_LOG(ERROR, "%s", b->buf);
}
+ if (path_len > static_cast<int>(ABSL_ARRAYSIZE(b->path))) {
+ ABSL_RAW_LOG(ERROR, "(long cycle; list truncated)");
+ }
if (synch_deadlock_detection.load(std::memory_order_acquire) ==
OnDeadlockCycle::kAbort) {
deadlock_graph_mu.Unlock(); // avoid deadlock in fatal sighandler
@@ -1478,7 +1485,7 @@ static bool TryAcquireWithSpinning(std::atomic<intptr_t>* mu) {
return false;
}
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
+void Mutex::Lock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1496,7 +1503,7 @@ ABSL_XRAY_LOG_ARGS(1) void Mutex::Lock() {
ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
}
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderLock() {
+void Mutex::ReaderLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
GraphId id = DebugOnlyDeadlockCheck(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1520,7 +1527,13 @@ void Mutex::LockWhen(const Condition &cond) {
}
bool Mutex::LockWhenWithTimeout(const Condition &cond, absl::Duration timeout) {
- return LockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kExclusive, &cond,
+ KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+ return res;
}
bool Mutex::LockWhenWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -1543,7 +1556,12 @@ void Mutex::ReaderLockWhen(const Condition &cond) {
bool Mutex::ReaderLockWhenWithTimeout(const Condition &cond,
absl::Duration timeout) {
- return ReaderLockWhenWithDeadline(cond, DeadlineFromTimeout(timeout));
+ ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_read_lock);
+ GraphId id = DebugOnlyDeadlockCheck(this);
+ bool res = LockSlowWithDeadline(kShared, &cond, KernelTimeout(timeout), 0);
+ DebugOnlyLockEnter(this, id);
+ ABSL_TSAN_MUTEX_POST_LOCK(this, __tsan_mutex_read_lock, 0);
+ return res;
}
bool Mutex::ReaderLockWhenWithDeadline(const Condition &cond,
@@ -1568,7 +1586,18 @@ void Mutex::Await(const Condition &cond) {
}
bool Mutex::AwaitWithTimeout(const Condition &cond, absl::Duration timeout) {
- return AwaitWithDeadline(cond, DeadlineFromTimeout(timeout));
+ if (cond.Eval()) { // condition already true; nothing to do
+ if (kDebugMode) {
+ this->AssertReaderHeld();
+ }
+ return true;
+ }
+
+ KernelTimeout t{timeout};
+ bool res = this->AwaitCommon(cond, t);
+ ABSL_RAW_CHECK(res || t.has_timeout(),
+ "condition untrue on return from Await");
+ return res;
}
bool Mutex::AwaitWithDeadline(const Condition &cond, absl::Time deadline) {
@@ -1609,7 +1638,7 @@ bool Mutex::AwaitCommon(const Condition &cond, KernelTimeout t) {
return res;
}
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
+bool Mutex::TryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
if ((v & (kMuWriter | kMuReader | kMuEvent)) == 0 && // try fast acquire
@@ -1638,7 +1667,7 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::TryLock() {
return false;
}
-ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
+bool Mutex::ReaderTryLock() {
ABSL_TSAN_MUTEX_PRE_LOCK(this,
__tsan_mutex_read_lock | __tsan_mutex_try_lock);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1684,7 +1713,7 @@ ABSL_XRAY_LOG_ARGS(1) bool Mutex::ReaderTryLock() {
return false;
}
-ABSL_XRAY_LOG_ARGS(1) void Mutex::Unlock() {
+void Mutex::Unlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1736,7 +1765,7 @@ static bool ExactlyOneReader(intptr_t v) {
return (v & kMuMultipleWaitersMask) == 0;
}
-ABSL_XRAY_LOG_ARGS(1) void Mutex::ReaderUnlock() {
+void Mutex::ReaderUnlock() {
ABSL_TSAN_MUTEX_PRE_UNLOCK(this, __tsan_mutex_read_lock);
DebugOnlyLockLeave(this);
intptr_t v = mu_.load(std::memory_order_relaxed);
@@ -1766,7 +1795,7 @@ static intptr_t ClearDesignatedWakerMask(int flag) {
case 1: // blocked; turn off the designated waker bit
return ~static_cast<intptr_t>(kMuDesig);
}
- ABSL_INTERNAL_UNREACHABLE;
+ ABSL_UNREACHABLE();
}
// Conditionally ignores the existence of waiting writers if a reader that has
@@ -1780,7 +1809,7 @@ static intptr_t IgnoreWaitingWritersMask(int flag) {
case 1: // blocked; pretend there are no waiting writers
return ~static_cast<intptr_t>(kMuWrWait);
}
- ABSL_INTERNAL_UNREACHABLE;
+ ABSL_UNREACHABLE();
}
// Internal version of LockWhen(). See LockSlowWithDeadline()
@@ -2342,22 +2371,26 @@ ABSL_ATTRIBUTE_NOINLINE void Mutex::UnlockSlow(SynchWaitParams *waitp) {
} // end of for(;;)-loop
if (wake_list != kPerThreadSynchNull) {
- int64_t wait_cycles = 0;
+ int64_t total_wait_cycles = 0;
+ int64_t max_wait_cycles = 0;
int64_t now = base_internal::CycleClock::Now();
do {
- // Sample lock contention events only if the waiter was trying to acquire
+ // Profile lock contention events only if the waiter was trying to acquire
// the lock, not waiting on a condition variable or Condition.
if (!wake_list->cond_waiter) {
- wait_cycles += (now - wake_list->waitp->contention_start_cycles);
+ int64_t cycles_waited =
+ (now - wake_list->waitp->contention_start_cycles);
+ total_wait_cycles += cycles_waited;
+ if (max_wait_cycles == 0) max_wait_cycles = cycles_waited;
wake_list->waitp->contention_start_cycles = now;
wake_list->waitp->should_submit_contention_data = true;
}
wake_list = Wakeup(wake_list); // wake waiters
} while (wake_list != kPerThreadSynchNull);
- if (wait_cycles > 0) {
- mutex_tracer("slow release", this, wait_cycles);
+ if (total_wait_cycles > 0) {
+ mutex_tracer("slow release", this, total_wait_cycles);
ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
- submit_profile_data(wait_cycles);
+ submit_profile_data(total_wait_cycles);
ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
}
}
@@ -2630,7 +2663,7 @@ bool CondVar::WaitCommon(Mutex *mutex, KernelTimeout t) {
}
bool CondVar::WaitWithTimeout(Mutex *mu, absl::Duration timeout) {
- return WaitWithDeadline(mu, DeadlineFromTimeout(timeout));
+ return WaitCommon(mu, KernelTimeout(timeout));
}
bool CondVar::WaitWithDeadline(Mutex *mu, absl::Time deadline) {
@@ -2758,25 +2791,31 @@ static bool Dereference(void *arg) {
return *(static_cast<bool *>(arg));
}
-Condition::Condition() {} // null constructor, used for kTrue only
-const Condition Condition::kTrue;
+ABSL_CONST_INIT const Condition Condition::kTrue;
Condition::Condition(bool (*func)(void *), void *arg)
: eval_(&CallVoidPtrFunction),
- function_(func),
- method_(nullptr),
- arg_(arg) {}
+ arg_(arg) {
+ static_assert(sizeof(&func) <= sizeof(callback_),
+ "An overlarge function pointer passed to Condition.");
+ StoreCallback(func);
+}
bool Condition::CallVoidPtrFunction(const Condition *c) {
- return (*c->function_)(c->arg_);
+ using FunctionPointer = bool (*)(void *);
+ FunctionPointer function_pointer;
+ std::memcpy(&function_pointer, c->callback_, sizeof(function_pointer));
+ return (*function_pointer)(c->arg_);
}
Condition::Condition(const bool *cond)
: eval_(CallVoidPtrFunction),
- function_(Dereference),
- method_(nullptr),
// const_cast is safe since Dereference does not modify arg
- arg_(const_cast<bool *>(cond)) {}
+ arg_(const_cast<bool *>(cond)) {
+ using FunctionPointer = bool (*)(void *);
+ const FunctionPointer dereference = Dereference;
+ StoreCallback(dereference);
+}
bool Condition::Eval() const {
// eval_ == null for kTrue
@@ -2784,14 +2823,15 @@ bool Condition::Eval() const {
}
bool Condition::GuaranteedEqual(const Condition *a, const Condition *b) {
- if (a == nullptr) {
+ // kTrue logic.
+ if (a == nullptr || a->eval_ == nullptr) {
return b == nullptr || b->eval_ == nullptr;
+ } else if (b == nullptr || b->eval_ == nullptr) {
+ return false;
}
- if (b == nullptr || b->eval_ == nullptr) {
- return a->eval_ == nullptr;
- }
- return a->eval_ == b->eval_ && a->function_ == b->function_ &&
- a->arg_ == b->arg_ && a->method_ == b->method_;
+ // Check equality of the representative fields.
+ return a->eval_ == b->eval_ && a->arg_ == b->arg_ &&
+ !memcmp(a->callback_, b->callback_, sizeof(a->callback_));
}
ABSL_NAMESPACE_END