mirror of
git://sourceware.org/git/glibc.git
synced 2025-03-06 20:58:33 +01:00
nptl: Fix indentation
In my previous change I turned a nested loop into a simple loop. I'm doing the resulting indentation changes in a separate commit to make the diff on the previous commit easier to review. Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> Reviewed-by: Carlos O'Donell <carlos@redhat.com>
This commit is contained in:
parent
929a4764ac
commit
ee6c14ed59
1 changed files with 53 additions and 53 deletions
|
@ -383,65 +383,65 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
while (1)
|
while (1)
|
||||||
{
|
{
|
||||||
/* Now wait until a signal is available in our group or it is closed.
|
/* Now wait until a signal is available in our group or it is closed.
|
||||||
Acquire MO so that if we observe (signals == lowseq) after group
|
Acquire MO so that if we observe (signals == lowseq) after group
|
||||||
switching in __condvar_quiesce_and_switch_g1, we synchronize with that
|
switching in __condvar_quiesce_and_switch_g1, we synchronize with that
|
||||||
store and will see the prior update of __g1_start done while switching
|
store and will see the prior update of __g1_start done while switching
|
||||||
groups too. */
|
groups too. */
|
||||||
unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
|
unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
|
||||||
uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
|
uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
|
||||||
unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U;
|
unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U;
|
||||||
|
|
||||||
if (seq < (g1_start >> 1))
|
if (seq < (g1_start >> 1))
|
||||||
{
|
{
|
||||||
/* If the group is closed already,
|
/* If the group is closed already,
|
||||||
then this waiter originally had enough extra signals to
|
then this waiter originally had enough extra signals to
|
||||||
consume, up until the time its group was closed. */
|
consume, up until the time its group was closed. */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If there is an available signal, don't block.
|
/* If there is an available signal, don't block.
|
||||||
If __g1_start has advanced at all, then we must be in G1
|
If __g1_start has advanced at all, then we must be in G1
|
||||||
by now, perhaps in the process of switching back to an older
|
by now, perhaps in the process of switching back to an older
|
||||||
G2, but in either case we're allowed to consume the available
|
G2, but in either case we're allowed to consume the available
|
||||||
signal and should not block anymore. */
|
signal and should not block anymore. */
|
||||||
if ((int)(signals - lowseq) >= 2)
|
if ((int)(signals - lowseq) >= 2)
|
||||||
{
|
{
|
||||||
/* Try to grab a signal. See above for MO. (if we do another loop
|
/* Try to grab a signal. See above for MO. (if we do another loop
|
||||||
iteration we need to see the correct value of g1_start) */
|
iteration we need to see the correct value of g1_start) */
|
||||||
if (atomic_compare_exchange_weak_acquire (
|
if (atomic_compare_exchange_weak_acquire (
|
||||||
cond->__data.__g_signals + g,
|
cond->__data.__g_signals + g,
|
||||||
&signals, signals - 2))
|
&signals, signals - 2))
|
||||||
break;
|
|
||||||
else
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now block.
|
|
||||||
struct _pthread_cleanup_buffer buffer;
|
|
||||||
struct _condvar_cleanup_buffer cbuffer;
|
|
||||||
cbuffer.wseq = wseq;
|
|
||||||
cbuffer.cond = cond;
|
|
||||||
cbuffer.mutex = mutex;
|
|
||||||
cbuffer.private = private;
|
|
||||||
__pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
|
|
||||||
|
|
||||||
err = __futex_abstimed_wait_cancelable64 (
|
|
||||||
cond->__data.__g_signals + g, signals, clockid, abstime, private);
|
|
||||||
|
|
||||||
__pthread_cleanup_pop (&buffer, 0);
|
|
||||||
|
|
||||||
if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
|
|
||||||
{
|
|
||||||
/* If we timed out, we effectively cancel waiting. */
|
|
||||||
__condvar_cancel_waiting (cond, seq, g, private);
|
|
||||||
result = err;
|
|
||||||
break;
|
break;
|
||||||
}
|
else
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Now block.
|
||||||
|
struct _pthread_cleanup_buffer buffer;
|
||||||
|
struct _condvar_cleanup_buffer cbuffer;
|
||||||
|
cbuffer.wseq = wseq;
|
||||||
|
cbuffer.cond = cond;
|
||||||
|
cbuffer.mutex = mutex;
|
||||||
|
cbuffer.private = private;
|
||||||
|
__pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
|
||||||
|
|
||||||
|
err = __futex_abstimed_wait_cancelable64 (
|
||||||
|
cond->__data.__g_signals + g, signals, clockid, abstime, private);
|
||||||
|
|
||||||
|
__pthread_cleanup_pop (&buffer, 0);
|
||||||
|
|
||||||
|
if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
|
||||||
|
{
|
||||||
|
/* If we timed out, we effectively cancel waiting. */
|
||||||
|
__condvar_cancel_waiting (cond, seq, g, private);
|
||||||
|
result = err;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Confirm that we have been woken. We do that before acquiring the mutex
|
/* Confirm that we have been woken. We do that before acquiring the mutex
|
||||||
to allow for execution of pthread_cond_destroy while having acquired the
|
to allow for execution of pthread_cond_destroy while having acquired the
|
||||||
mutex. */
|
mutex. */
|
||||||
|
|
Loading…
Add table
Reference in a new issue