diff options
| author | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2025-02-21 12:53:07 -0300 |
|---|---|---|
| committer | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2025-03-07 08:46:49 -0300 |
| commit | 5624d626dbf1dfa072941befeaf687cdabba4893 (patch) | |
| tree | 0593e11debd571066fbfe56a5326208730b37cc4 /elf/dl-reloc.c | |
| parent | 815beee47c1c42e1b7bd5a1ce7b6a34c2013d3dc (diff) | |
| download | glibc-azanella/memory-seal-v6.tar.xz glibc-azanella/memory-seal-v6.zip | |
linux: Add support for PT_GNU_MUTABLEazanella/memory-seal-v6
The section mark a memory region that should not be sealed if
GNU_PROPERTY_MEMORY_SEAL attribute is present. PT_GNU_MUTABLE
section names start with ".gnu.mutable" and are maximum page
aligned and have a size of maximum page size.
For instance the code:
#define GNU_MUTABLE_SECTION_NAME ".gnu.mutable"
unsigned char mutable_array1[64]
__attribute__ ((section (GNU_MUTABLE_SECTION_NAME)))
= { 0 };
unsigned char mutable_array2[32]
__attribute__ ((section (GNU_MUTABLE_SECTION_NAME)))
= { 0 };
places both 'mutable_array1' and 'mutable_array2' on a page
aligned memory region in a size of a page (the alignment and size
can be change with -Wl,-z,max-page-size= linker option).
The linker sets the alignment and size to make it easier to
loader to avoid sealing the area (since mseal only work on
multiple of page size areas), and to simplify the userland
process to change protection of either seal the area after
initialization.
Diffstat (limited to 'elf/dl-reloc.c')
| -rw-r--r-- | elf/dl-reloc.c | 29 |
1 files changed, 25 insertions, 4 deletions
diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c index 2b37676182..d706a57101 100644 --- a/elf/dl-reloc.c +++ b/elf/dl-reloc.c @@ -37,7 +37,6 @@ # define bump_num_cache_relocations() ((void) 0) #endif - /* We are trying to perform a static TLS relocation in MAP, but it was dynamically loaded. This can only work if there is enough surplus in the static TLS area already allocated for each running thread. If this @@ -372,6 +371,29 @@ cannot apply additional memory protection after relocation"); } static void +_dl_mseal_map_2 (const struct link_map *l, ElfW(Addr) map_start, + ElfW(Addr) map_end) +{ + ElfW(Addr) mutable_start = 0, mutable_end = 0; + if (l->l_mutable_size != 0) + { + mutable_start = l->l_addr + l->l_mutable_addr; + mutable_end = mutable_start + l->l_mutable_size; + } + + if (mutable_start >= map_start && mutable_end < map_end) + { + size_t seg1_size = mutable_start - map_start; + size_t seg2_size = map_end - mutable_end; + _dl_mseal ((void *) map_start, seg1_size, l->l_name); + if (seg2_size != 0) + _dl_mseal ((void *) mutable_end, seg2_size, l->l_name); + } + else + _dl_mseal ((void *) map_start, map_end - map_start, l->l_name); +} + +static void _dl_mseal_map_1 (struct link_map *l, bool dep) { if (!GLRO(dl_enable_seal)) @@ -388,8 +410,7 @@ _dl_mseal_map_1 (struct link_map *l, bool dep) return; if (l->l_contiguous) - _dl_mseal ((void *) l->l_map_start, l->l_map_end - l->l_map_start, - l->l_name); + _dl_mseal_map_2 (l, l->l_map_start, l->l_map_end); else { /* We can use the PT_LOAD segments because even if relro splits the @@ -404,7 +425,7 @@ _dl_mseal_map_1 (struct link_map *l, bool dep) ElfW(Addr) mapstart = l->l_addr + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)); ElfW(Addr) allocend = l->l_addr + ph->p_vaddr + ph->p_memsz; - _dl_mseal ((void *) mapstart, allocend - mapstart, l->l_name); + _dl_mseal_map_2 (l, mapstart, allocend); } break; } |
