aboutsummaryrefslogtreecommitdiff
path: root/nptl/pthread_cond_common.c
diff options
context:
space:
mode:
authorMalte Skarupke <malteskarupke@fastmail.fm>2024-12-04 08:05:40 -0500
committerCarlos O'Donell <carlos@redhat.com>2025-01-17 14:56:58 -0500
commit91bb902f58264a2fd50fbce8f39a9a290dd23706 (patch)
tree908d07252271d0325dbbceebd40ae67d104aaddc /nptl/pthread_cond_common.c
parent4b79e27a5073c02f6bff9aa8f4791230a0ab1867 (diff)
downloadglibc-91bb902f58264a2fd50fbce8f39a9a290dd23706.tar.xz
glibc-91bb902f58264a2fd50fbce8f39a9a290dd23706.zip
nptl: Use all of g1_start and g_signals
The LSB of g_signals was unused. The LSB of g1_start was used to indicate which group is G2. This was used to always go to sleep in pthread_cond_wait if a waiter is in G2. A comment earlier in the file says that this is not correct to do: "Waiters cannot determine whether they are currently in G2 or G1 -- but they do not have to because all they are interested in is whether there are available signals" I either would have had to update the comment, or get rid of the check. I chose to get rid of the check. In fact I don't quite know why it was there. There will never be available signals for group G2, so we didn't need the special case. Even if there were, this would just be a spurious wake. This might have caught some cases where the count has wrapped around, but it wouldn't reliably do that, (and even if it did, why would you want to force a sleep in that case?) and we don't support that many concurrent waiters anyway. Getting rid of it allows us to use one more bit, making us more robust to wraparound. Signed-off-by: Malte Skarupke <malteskarupke@fastmail.fm> Reviewed-by: Carlos O'Donell <carlos@redhat.com>
Diffstat (limited to 'nptl/pthread_cond_common.c')
-rw-r--r--nptl/pthread_cond_common.c26
1 files changed, 10 insertions, 16 deletions
diff --git a/nptl/pthread_cond_common.c b/nptl/pthread_cond_common.c
index b8737b31ca..2708d26295 100644
--- a/nptl/pthread_cond_common.c
+++ b/nptl/pthread_cond_common.c
@@ -208,9 +208,9 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
behavior.
Note that this works correctly for a zero-initialized condvar too. */
unsigned int old_orig_size = __condvar_get_orig_size (cond);
- uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond) >> 1;
- if (((unsigned) (wseq - old_g1_start - old_orig_size)
- + cond->__data.__g_size[g1 ^ 1]) == 0)
+ uint64_t old_g1_start = __condvar_load_g1_start_relaxed (cond);
+ uint64_t new_g1_start = old_g1_start + old_orig_size;
+ if (((unsigned) (wseq - new_g1_start) + cond->__data.__g_size[g1 ^ 1]) == 0)
return false;
/* We have to consider the following kinds of waiters:
@@ -221,16 +221,10 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
are not affected.
* Waiters in G1 have already received a signal and been woken. */
- /* Update __g1_start, which closes this group. The value we add will never
- be negative because old_orig_size can only be zero when we switch groups
- the first time after a condvar was initialized, in which case G1 will be
- at index 1 and we will add a value of 1. Relaxed MO is fine because the
- change comes with no additional constraints that others would have to
- observe. */
- __condvar_add_g1_start_relaxed (cond,
- (old_orig_size << 1) + (g1 == 1 ? 1 : - 1));
-
- unsigned int lowseq = ((old_g1_start + old_orig_size) << 1) & ~1U;
+ /* Update __g1_start, which closes this group. Relaxed MO is fine because
+ the change comes with no additional constraints that others would have
+ to observe. */
+ __condvar_add_g1_start_relaxed (cond, old_orig_size);
/* At this point, the old G1 is now a valid new G2 (but not in use yet).
No old waiter can neither grab a signal nor acquire a reference without
@@ -242,13 +236,13 @@ __condvar_switch_g1 (pthread_cond_t *cond, uint64_t wseq,
g1 ^= 1;
*g1index ^= 1;
- /* Now advance the new G1 g_signals to the new lowseq, giving it
+ /* Now advance the new G1 g_signals to the new g1_start, giving it
an effective signal count of 0 to start. */
- atomic_store_release (cond->__data.__g_signals + g1, lowseq);
+ atomic_store_release (cond->__data.__g_signals + g1, (unsigned)new_g1_start);
/* These values are just observed by signalers, and thus protected by the
lock. */
- unsigned int orig_size = wseq - (old_g1_start + old_orig_size);
+ unsigned int orig_size = wseq - new_g1_start;
__condvar_set_orig_size (cond, orig_size);
/* Use and addition to not loose track of cancellations in what was
previously G2. */