mirror of
git://sourceware.org/git/glibc.git
synced 2025-03-06 20:58:33 +01:00
This fixes the lost wakeup (from a bug in signal stealing) with a change in the usage of g_signals[] in the condition variable internal state. It also completely eliminates the concept and handling of signal stealing, as well as the need for signalers to block to wait for waiters to wake up every time there is a G1/G2 switch. This greatly reduces the average and maximum latency for pthread_cond_signal. The g_signals[] field now contains a signal count that is relative to the current g1_start value. Since it is a 32-bit field, and the LSB is still reserved (though not currently used anymore), it has a 31-bit value that corresponds to the low 31 bits of the sequence number in g1_start. (since g1_start also has an LSB flag, this means bits 31:1 in g_signals correspond to bits 31:1 in g1_start, plus the current signal count) By making the signal count relative to g1_start, there is no longer any ambiguity or A/B/A issue, and thus any checks before blocking, including the futex call itself, are guaranteed not to block if the G1/G2 switch occurs, even if the signal count remains the same. This allows initially safely blocking in G2 until the switch to G1 occurs, and then transitioning from G1 to a new G1 or G2, and always being able to distinguish the state change. This removes the race condition and A/B/A problems that otherwise ocurred if a late (pre-empted) waiter were to resume just as the futex call attempted to block on g_signal since otherwise there was no last opportunity to re-check things like whether the current G1 group was already closed. By fixing these issues, the signal stealing code can be eliminated, since there is no concept of signal stealing anymore. The code to block for all waiters to exit g_refs can also be removed, since any waiters that are still in the g_refs region can be guaranteed to safely wake up and exit. If there are still any left at this time, they are all sent one final futex wakeup to ensure that they are not blocked any longer, but there is no need for the signaller to block and wait for them to wake up and exit the g_refs region. The signal count is then effectively "zeroed" but since it is now relative to g1_start, this is done by advancing it to a new value that can be observed by any pending blocking waiters. Any late waiters can always tell the difference, and can thus just cleanly exit if they are in a stale G1 or G2. They can never steal a signal from the current G1 if they are not in the current G1, since the signal value that has to match in the cmpxchg has the low 31 bits of the g1_start value contained in it, and that's first checked, and then it won't match if there's a G1/G2 change. Note: the 31-bit sequence number used in g_signals is designed to handle wrap-around when checking the signal count, but if the entire 31-bit wraparound (2 billion signals) occurs while there is still a late waiter that has not yet resumed, and it happens to then match the current g1_start low bits, and the pre-emption occurs after the normal "closed group" checks (which are 64-bit) but then hits the futex syscall and signal consuming code, then an A/B/A issue could still result and cause an incorrect assumption about whether it should block. This particular scenario seems unlikely in practice. Note that once awake from the futex, the waiter would notice the closed group before consuming the signal (since that's still a 64-bit check that would not be aliased in the wrap-around in g_signals), so the biggest impact would be blocking on the futex until the next full wakeup from a G1/G2 switch. Signed-off-by: Frank Barrus <frankbarrus_sw@shaggy.cc> Reviewed-by: Carlos O'Donell <carlos@redhat.com>
294 lines
12 KiB
C
294 lines
12 KiB
C
/* pthread_cond_common -- shared code for condition variable.
|
|
Copyright (C) 2016-2025 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#include <atomic.h>
|
|
#include <atomic_wide_counter.h>
|
|
#include <stdint.h>
|
|
#include <pthread.h>
|
|
|
|
/* We need 3 least-significant bits on __wrefs for something else.
|
|
This also matches __atomic_wide_counter requirements: The highest
|
|
value we add is __PTHREAD_COND_MAX_GROUP_SIZE << 2 to __g1_start
|
|
(the two extra bits are for the lock in the two LSBs of
|
|
__g1_start). */
|
|
#define __PTHREAD_COND_MAX_GROUP_SIZE ((unsigned) 1 << 29)
|
|
|
|
static inline uint64_t
|
|
__condvar_load_wseq_relaxed (pthread_cond_t *cond)
|
|
{
|
|
return __atomic_wide_counter_load_relaxed (&cond->__data.__wseq);
|
|
}
|
|
|
|
static inline uint64_t
|
|
__condvar_fetch_add_wseq_acquire (pthread_cond_t *cond, unsigned int val)
|
|
{
|
|
return __atomic_wide_counter_fetch_add_acquire (&cond->__data.__wseq, val);
|
|
}
|
|
|
|
static inline uint64_t
|
|
__condvar_load_g1_start_relaxed (pthread_cond_t *cond)
|
|
{
|
|
return __atomic_wide_counter_load_relaxed (&cond->__data.__g1_start);
|
|
}
|
|
|
|
static inline void
|
|
__condvar_add_g1_start_relaxed (pthread_cond_t *cond, unsigned int val)
|
|
{
|
|
__atomic_wide_counter_add_relaxed (&cond->__data.__g1_start, val);
|
|
}
|
|
|
|
#if __HAVE_64B_ATOMICS == 1
|
|
|
|
static inline uint64_t
|
|
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
|
|
{
|
|
return atomic_fetch_xor_release (&cond->__data.__wseq.__value64, val);
|
|
}
|
|
|
|
#else /* !__HAVE_64B_ATOMICS */
|
|
|
|
/* The xor operation needs to be an atomic read-modify-write. The write
|
|
itself is not an issue as it affects just the lower-order half but not bits
|
|
used in the add operation. To make the full fetch-and-xor atomic, we
|
|
exploit that concurrently, the value can increase by at most 1<<31 (*): The
|
|
xor operation is only called while having acquired the lock, so not more
|
|
than __PTHREAD_COND_MAX_GROUP_SIZE waiters can enter concurrently and thus
|
|
increment __wseq. Therefore, if the xor operation observes a value of
|
|
__wseq, then the value it applies the modification to later on can be
|
|
derived. */
|
|
|
|
static uint64_t __attribute__ ((unused))
|
|
__condvar_fetch_xor_wseq_release (pthread_cond_t *cond, unsigned int val)
|
|
{
|
|
/* First, get the current value. See __atomic_wide_counter_load_relaxed. */
|
|
unsigned int h, l, h2;
|
|
do
|
|
{
|
|
h = atomic_load_acquire (&cond->__data.__wseq.__value32.__high);
|
|
l = atomic_load_acquire (&cond->__data.__wseq.__value32.__low);
|
|
h2 = atomic_load_relaxed (&cond->__data.__wseq.__value32.__high);
|
|
}
|
|
while (h != h2);
|
|
if (((l >> 31) > 0) && ((h >> 31) == 0))
|
|
h++;
|
|
h &= ~((unsigned int) 1 << 31);
|
|
l &= ~((unsigned int) 1 << 31);
|
|
|
|
/* Now modify. Due to the coherence rules, the prior load will read a value
|
|
earlier in modification order than the following fetch-xor.
|
|
This uses release MO to make the full operation have release semantics
|
|
(all other operations access the lower-order half). */
|
|
unsigned int l2
|
|
= (atomic_fetch_xor_release (&cond->__data.__wseq.__value32.__low, val)
|
|
& ~((unsigned int) 1 << 31));
|
|
if (l2 < l)
|
|
/* The lower-order half overflowed in the meantime. This happened exactly
|
|
once due to the limit on concurrent waiters (see above). */
|
|
h++;
|
|
return ((uint64_t) h << 31) + l2;
|
|
}
|
|
|
|
#endif /* !__HAVE_64B_ATOMICS */
|
|
|
|
/* The lock that signalers use. See pthread_cond_wait_common for uses.
|
|
The lock is our normal three-state lock: not acquired (0) / acquired (1) /
|
|
acquired-with-futex_wake-request (2). However, we need to preserve the
|
|
other bits in the unsigned int used for the lock, and therefore it is a
|
|
little more complex. */
|
|
static void __attribute__ ((unused))
|
|
__condvar_acquire_lock (pthread_cond_t *cond, int private)
|
|
{
|
|
unsigned int s = atomic_load_relaxed (&cond->__data.__g1_orig_size);
|
|
while ((s & 3) == 0)
|
|
{
|
|
if (atomic_compare_exchange_weak_acquire (&cond->__data.__g1_orig_size,
|
|
&s, s | 1))
|
|
return;
|
|
/* TODO Spinning and back-off. */
|
|
}
|
|
/* We can't change from not acquired to acquired, so try to change to
|
|
acquired-with-futex-wake-request and do a futex wait if we cannot change
|
|
from not acquired. */
|
|
while (1)
|
|
{
|
|
while ((s & 3) != 2)
|
|
{
|
|
if (atomic_compare_exchange_weak_acquire
|
|
(&cond->__data.__g1_orig_size, &s, (s & ~(unsigned int) 3) | 2))
|
|
{
|
|
if ((s & 3) == 0)
|
|
return;
|
|
break;
|
|
}
|
|
/* TODO Back off. */
|
|
}
|
|
futex_wait_simple (&cond->__data.__g1_orig_size,
|
|
(s & ~(unsigned int) 3) | 2, private);
|
|
/* Reload so we see a recent value. */
|
|
s = atomic_load_relaxed (&cond->__data.__g1_orig_size);
|
|
}
|
|
}
|
|
|
|
/* See __condvar_acquire_lock. */
|
|
static void __attribute__ ((unused))
|
|
__condvar_release_lock (pthread_cond_t *cond, int private)
|
|
{
|
|
if ((atomic_fetch_and_release (&cond->__data.__g1_orig_size,
|
|
~(unsigned int) 3) & 3)
|
|
== 2)
|
|
futex_wake (&cond->__data.__g1_orig_size, 1, private);
|
|
}
|
|
|
|
/* Only use this when having acquired the lock. */
|
|
static unsigned int __attribute__ ((unused))
|
|
__condvar_get_orig_size (pthread_cond_t *cond)
|
|
{
|
|
return atomic_load_relaxed (&cond->__data.__g1_orig_size) >> 2;
|
|
}
|
|
|
|
/* Only use this when having acquired the lock. */
|
|
static void __attribute__ ((unused))
|
|
__condvar_set_orig_size (pthread_cond_t *cond, unsigned int size)
|
|
{
|
|
/* We have acquired the lock, but might get one concurrent update due to a
|
|
lock state change from acquired to acquired-with-futex_wake-request.
|
|
The store with relaxed MO is fine because there will be no further
|
|
changes to the lock bits nor the size, and we will subsequently release
|
|
the lock with release MO. */
|
|
unsigned int s;
|
|
s = (atomic_load_relaxed (&cond->__data.__g1_orig_size) & 3)
|
|
| (size << 2);
|
|
if ((atomic_exchange_relaxed (&cond->__data.__g1_orig_size, s) & 3)
|
|
!= (s & 3))
|
|
atomic_store_relaxed (&cond->__data.__g1_orig_size, (size << 2) | 2);
|
|
}
|
|
|
|
/* Returns FUTEX_SHARED or FUTEX_PRIVATE based on the provided __wrefs
|
|
value. */
|
|
static int __attribute__ ((unused))
|
|
__condvar_get_private (int flags)
|
|
{
|
|
if ((flags & __PTHREAD_COND_SHARED_MASK) == 0)
|
|
return FUTEX_PRIVATE;
|
|
else
|
|
return FUTEX_SHARED;
|
|
}
|
|
|
|
/* This closes G1 (whose index is in G1INDEX), waits for all futex waiters to
|
|
leave G1, converts G1 into a fresh G2, and then switches group roles so that
|
|
the former G2 becomes the new G1 ending at the current __wseq value when we
|
|
eventually make the switch (WSEQ is just an observation of __wseq by the
|
|
signaler).
|
|
If G2 is empty, it will not switch groups because then it would create an
|
|
empty G1 which would require switching groups again on the next signal.
|
|
Returns false iff groups were not switched because G2 was empty. */
|
|
static bool __attribute__ ((unused))
|
|
__condvar_quiesce_and_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
|
|
unsigned int *g1index, int private)
|
|
{
|
|
unsigned int g1 = *g1index;
|
|
|
|
/* If there is no waiter in G2, we don't do anything. The expression may
|
|
look odd but remember that __g_size might hold a negative value, so
|
|
putting the expression this way avoids relying on implementation-defined
|
|
behavior.
|
|
Note that this works correctly for a zero-initialized condvar too. */
|
|
unsigned int old_orig_size = __condvar_get_orig_size (cond);
|
|
uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
|
|
if (((unsigned) (wseq - old_g1_start - old_orig_size)
|
|
+ cond->__data.__g_size[g1 ^ 1]) == 0)
|
|
return false;
|
|
|
|
/* Now try to close and quiesce G1. We have to consider the following kinds
|
|
of waiters:
|
|
* Waiters from less recent groups than G1 are not affected because
|
|
nothing will change for them apart from __g1_start getting larger.
|
|
* New waiters arriving concurrently with the group switching will all go
|
|
into G2 until we atomically make the switch. Waiters existing in G2
|
|
are not affected.
|
|
* Waiters in G1 will be closed out immediately by the advancing of
|
|
__g_signals to the next "lowseq" (low 31 bits of the new g1_start),
|
|
which will prevent waiters from blocking using a futex on
|
|
__g_signals since it provides enough signals for all possible
|
|
remaining waiters. As a result, they can each consume a signal
|
|
and they will eventually remove their group reference. */
|
|
|
|
/* Update __g1_start, which finishes closing this group. The value we add
|
|
will never be negative because old_orig_size can only be zero when we
|
|
switch groups the first time after a condvar was initialized, in which
|
|
case G1 will be at index 1 and we will add a value of 1.
|
|
Relaxed MO is fine because the change comes with no additional
|
|
constraints that others would have to observe. */
|
|
__condvar_add_g1_start_relaxed (cond,
|
|
(old_orig_size << 1) + (g1 == 1 ? 1 : - 1));
|
|
|
|
unsigned int lowseq = ((old_g1_start + old_orig_size) << 1) & ~1U;
|
|
|
|
/* If any waiters still hold group references (and thus could be blocked),
|
|
then wake them all up now and prevent any running ones from blocking.
|
|
This is effectively a catch-all for any possible current or future
|
|
bugs that can allow the group size to reach 0 before all G1 waiters
|
|
have been awakened or at least given signals to consume, or any
|
|
other case that can leave blocked (or about to block) older waiters.. */
|
|
if ((atomic_fetch_or_release (cond->__data.__g_refs + g1, 0) >> 1) > 0)
|
|
{
|
|
/* First advance signals to the end of the group (i.e. enough signals
|
|
for the entire G1 group) to ensure that waiters which have not
|
|
yet blocked in the futex will not block.
|
|
Note that in the vast majority of cases, this should never
|
|
actually be necessary, since __g_signals will have enough
|
|
signals for the remaining g_refs waiters. As an optimization,
|
|
we could check this first before proceeding, although that
|
|
could still leave the potential for futex lost wakeup bugs
|
|
if the signal count was non-zero but the futex wakeup
|
|
was somehow lost. */
|
|
atomic_store_release (cond->__data.__g_signals + g1, lowseq);
|
|
|
|
futex_wake (cond->__data.__g_signals + g1, INT_MAX, private);
|
|
}
|
|
|
|
/* At this point, the old G1 is now a valid new G2 (but not in use yet).
|
|
No old waiter can neither grab a signal nor acquire a reference without
|
|
noticing that __g1_start is larger.
|
|
We can now publish the group switch by flipping the G2 index in __wseq.
|
|
Release MO so that this synchronizes with the acquire MO operation
|
|
waiters use to obtain a position in the waiter sequence. */
|
|
wseq = __condvar_fetch_xor_wseq_release (cond, 1) >> 1;
|
|
g1 ^= 1;
|
|
*g1index ^= 1;
|
|
|
|
/* Now advance the new G1 g_signals to the new lowseq, giving it
|
|
an effective signal count of 0 to start. */
|
|
atomic_store_release (cond->__data.__g_signals + g1, lowseq);
|
|
|
|
/* These values are just observed by signalers, and thus protected by the
|
|
lock. */
|
|
unsigned int orig_size = wseq - (old_g1_start + old_orig_size);
|
|
__condvar_set_orig_size (cond, orig_size);
|
|
/* Use and addition to not loose track of cancellations in what was
|
|
previously G2. */
|
|
cond->__data.__g_size[g1] += orig_size;
|
|
|
|
/* The new G1's size may be zero because of cancellations during its time
|
|
as G2. If this happens, there are no waiters that have to receive a
|
|
signal, so we do not need to add any and return false. */
|
|
if (cond->__data.__g_size[g1] == 0)
|
|
return false;
|
|
|
|
return true;
|
|
}
|