aboutsummaryrefslogtreecommitdiff
path: root/csu
diff options
context:
space:
mode:
authorMichael Jeanson <mjeanson@efficios.com>2024-07-10 15:48:11 -0400
committerMichael Jeanson <mjeanson@efficios.com>2025-01-10 20:19:28 +0000
commit0e411c5d3098982d67cd2d7a233eaa6c977a1869 (patch)
treee69eef11d9d92dc76c6115eef5512c3aae6878aa /csu
parentc813c1490d5d8640a94fced10fc7674a48737b96 (diff)
downloadglibc-0e411c5d3098982d67cd2d7a233eaa6c977a1869.tar.xz
glibc-0e411c5d3098982d67cd2d7a233eaa6c977a1869.zip
Add generic 'extra TLS'
Add the logic to append an 'extra TLS' block in the TLS block allocator with a generic stub implementation. The duplicated code in 'csu/libc-tls.c' and 'elf/dl-tls.c' is to handle both statically linked applications and the ELF dynamic loader. Signed-off-by: Michael Jeanson <mjeanson@efficios.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Reviewed-by: Florian Weimer <fweimer@redhat.com>
Diffstat (limited to 'csu')
-rw-r--r--csu/libc-tls.c82
1 files changed, 73 insertions, 9 deletions
diff --git a/csu/libc-tls.c b/csu/libc-tls.c
index 15f470aa87..5ffebc6fc2 100644
--- a/csu/libc-tls.c
+++ b/csu/libc-tls.c
@@ -20,12 +20,14 @@
#include <errno.h>
#include <ldsodefs.h>
#include <tls.h>
+#include <dl-tls.h>
#include <unistd.h>
#include <stdio.h>
#include <sys/param.h>
#include <array_length.h>
#include <pthreadP.h>
#include <dl-call_tls_init_tp.h>
+#include <dl-extra_tls.h>
#ifdef SHARED
#error makefile bug, this file is for static only
@@ -110,6 +112,7 @@ __libc_setup_tls (void)
size_t filesz = 0;
void *initimage = NULL;
size_t align = 0;
+ size_t tls_blocks_size = 0;
size_t max_align = TCB_ALIGNMENT;
size_t tcb_offset;
const ElfW(Phdr) *phdr;
@@ -135,22 +138,89 @@ __libc_setup_tls (void)
/* Calculate the size of the static TLS surplus, with 0 auditors. */
_dl_tls_static_surplus_init (0);
+ /* Extra TLS block for internal usage to append at the end of the TLS blocks
+ (in allocation order). The address at which the block is allocated must
+ be aligned to 'extra_tls_align'. The size of the block as returned by
+ '_dl_extra_tls_get_size ()' is always a multiple of the aligment.
+
+ On Linux systems this is where the rseq area will be allocated. On other
+ systems it is currently unused and both values will be '0'. */
+ size_t extra_tls_size = _dl_extra_tls_get_size ();
+ size_t extra_tls_align = _dl_extra_tls_get_align ();
+
+ /* Increase the maximum alignment with the extra TLS alignment requirements
+ if necessary. */
+ max_align = MAX (max_align, extra_tls_align);
+
/* We have to set up the TCB block which also (possibly) contains
'errno'. Therefore we avoid 'malloc' which might touch 'errno'.
Instead we use 'sbrk' which would only uses 'errno' if it fails.
In this case we are right away out of memory and the user gets
what she/he deserves. */
#if TLS_TCB_AT_TP
+ /* In this layout the TLS blocks are located before the thread pointer. */
+
+ /* Record the size of the combined TLS blocks.
+
+ First reserve space for 'memsz' while respecting both its alignment
+ requirements and those of the extra TLS blocks. Then add the size of
+ the extra TLS block. Both values respect the extra TLS alignment
+ requirements and so does the resulting size and the offset that will
+ be derived from it. */
+ tls_blocks_size = roundup (memsz, MAX (align, extra_tls_align) ?: 1)
+ + extra_tls_size;
+
+ /* Record the extra TLS block offset from the thread pointer.
+
+ With TLS_TCB_AT_TP the TLS blocks are allocated before the thread pointer
+ in reverse order. Our block is added last which results in it being the
+ first in the static TLS block, thus record the most negative offset.
+
+ The alignment requirements of the pointer resulting from this offset and
+ the thread pointer are enforced by 'max_align' which is used to align the
+ tcb_offset. */
+ _dl_extra_tls_set_offset (-tls_blocks_size);
+
/* Align the TCB offset to the maximum alignment, as
_dl_allocate_tls_storage (in elf/dl-tls.c) does using __libc_memalign
and dl_tls_static_align. */
- tcb_offset = roundup (memsz + GLRO(dl_tls_static_surplus), max_align);
+ tcb_offset = roundup (tls_blocks_size + GLRO(dl_tls_static_surplus), max_align);
tlsblock = _dl_early_allocate (tcb_offset + TLS_INIT_TCB_SIZE + max_align);
if (tlsblock == NULL)
_startup_fatal_tls_error ();
#elif TLS_DTV_AT_TP
+ /* In this layout the TLS blocks are located after the thread pointer. */
+
+ /* Record the tcb_offset including the aligment requirements of 'memsz'
+ that comes after it. */
tcb_offset = roundup (TLS_INIT_TCB_SIZE, align ?: 1);
- tlsblock = _dl_early_allocate (tcb_offset + memsz + max_align
+
+ /* Record the size of the combined TLS blocks.
+
+ First reserve space for TLS_INIT_TCB_SIZE and 'memsz' while respecting
+ both its alignment requirements and those of the extra TLS blocks. Then
+ add the size of the extra TLS block. Both values respect the extra TLS
+ alignment requirements and so does the resulting size and the offset that
+ will be derived from it. */
+ tls_blocks_size = roundup (TLS_INIT_TCB_SIZE + memsz,
+ MAX (align, extra_tls_align) ?: 1) + extra_tls_size;
+
+ /* Record the extra TLS block offset from the thread pointer.
+
+ With TLS_DTV_AT_TP the TLS blocks are allocated after the thread pointer in
+ order. Our block is added last which results in it being the last in the
+ static TLS block, thus record the offset as the size of the static TLS
+ block minus the size of our block.
+
+ On some architectures the TLS blocks are offset from the thread pointer,
+ include this offset in the extra TLS block offset.
+
+ The alignment requirements of the pointer resulting from this offset and
+ the thread pointer are enforced by 'max_align' which is used to align the
+ tcb_offset. */
+ _dl_extra_tls_set_offset (tls_blocks_size - extra_tls_size - TLS_TP_OFFSET);
+
+ tlsblock = _dl_early_allocate (tls_blocks_size + max_align
+ TLS_PRE_TCB_SIZE
+ GLRO(dl_tls_static_surplus));
if (tlsblock == NULL)
@@ -209,11 +279,5 @@ __libc_setup_tls (void)
/* static_slotinfo.slotinfo[1].gen = 0; -- Already zero. */
static_slotinfo.slotinfo[1].map = main_map;
- memsz = roundup (memsz, align ?: 1);
-
-#if TLS_DTV_AT_TP
- memsz += tcb_offset;
-#endif
-
- init_static_tls (memsz, MAX (TCB_ALIGNMENT, max_align));
+ init_static_tls (tls_blocks_size, MAX (TCB_ALIGNMENT, max_align));
}