diff options
| author | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2025-02-21 12:53:07 -0300 |
|---|---|---|
| committer | Adhemerval Zanella <adhemerval.zanella@linaro.org> | 2025-03-07 08:46:49 -0300 |
| commit | 5624d626dbf1dfa072941befeaf687cdabba4893 (patch) | |
| tree | 0593e11debd571066fbfe56a5326208730b37cc4 /elf | |
| parent | 815beee47c1c42e1b7bd5a1ce7b6a34c2013d3dc (diff) | |
| download | glibc-azanella/memory-seal-v6.tar.xz glibc-azanella/memory-seal-v6.zip | |
linux: Add support for PT_GNU_MUTABLEazanella/memory-seal-v6
The section mark a memory region that should not be sealed if
GNU_PROPERTY_MEMORY_SEAL attribute is present. PT_GNU_MUTABLE
section names start with ".gnu.mutable" and are maximum page
aligned and have a size of maximum page size.
For instance the code:
#define GNU_MUTABLE_SECTION_NAME ".gnu.mutable"
unsigned char mutable_array1[64]
__attribute__ ((section (GNU_MUTABLE_SECTION_NAME)))
= { 0 };
unsigned char mutable_array2[32]
__attribute__ ((section (GNU_MUTABLE_SECTION_NAME)))
= { 0 };
places both 'mutable_array1' and 'mutable_array2' on a page
aligned memory region in a size of a page (the alignment and size
can be change with -Wl,-z,max-page-size= linker option).
The linker sets the alignment and size to make it easier to
loader to avoid sealing the area (since mseal only work on
multiple of page size areas), and to simplify the userland
process to change protection of either seal the area after
initialization.
Diffstat (limited to 'elf')
| -rw-r--r-- | elf/dl-load.c | 5 | ||||
| -rw-r--r-- | elf/dl-reloc.c | 29 | ||||
| -rw-r--r-- | elf/dl-support.c | 5 | ||||
| -rw-r--r-- | elf/elf.h | 2 | ||||
| -rw-r--r-- | elf/rtld.c | 5 |
5 files changed, 42 insertions, 4 deletions
diff --git a/elf/dl-load.c b/elf/dl-load.c index f104cc7544..a0d7d30e58 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c @@ -1220,6 +1220,11 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd, l->l_relro_addr = ph->p_vaddr; l->l_relro_size = ph->p_memsz; break; + + case PT_GNU_MUTABLE: + l->l_mutable_addr = ph->p_vaddr; + l->l_mutable_size = ph->p_memsz; + break; } if (__glibc_unlikely (nloadcmds == 0)) diff --git a/elf/dl-reloc.c b/elf/dl-reloc.c index 2b37676182..d706a57101 100644 --- a/elf/dl-reloc.c +++ b/elf/dl-reloc.c @@ -37,7 +37,6 @@ # define bump_num_cache_relocations() ((void) 0) #endif - /* We are trying to perform a static TLS relocation in MAP, but it was dynamically loaded. This can only work if there is enough surplus in the static TLS area already allocated for each running thread. If this @@ -372,6 +371,29 @@ cannot apply additional memory protection after relocation"); } static void +_dl_mseal_map_2 (const struct link_map *l, ElfW(Addr) map_start, + ElfW(Addr) map_end) +{ + ElfW(Addr) mutable_start = 0, mutable_end = 0; + if (l->l_mutable_size != 0) + { + mutable_start = l->l_addr + l->l_mutable_addr; + mutable_end = mutable_start + l->l_mutable_size; + } + + if (mutable_start >= map_start && mutable_end < map_end) + { + size_t seg1_size = mutable_start - map_start; + size_t seg2_size = map_end - mutable_end; + _dl_mseal ((void *) map_start, seg1_size, l->l_name); + if (seg2_size != 0) + _dl_mseal ((void *) mutable_end, seg2_size, l->l_name); + } + else + _dl_mseal ((void *) map_start, map_end - map_start, l->l_name); +} + +static void _dl_mseal_map_1 (struct link_map *l, bool dep) { if (!GLRO(dl_enable_seal)) @@ -388,8 +410,7 @@ _dl_mseal_map_1 (struct link_map *l, bool dep) return; if (l->l_contiguous) - _dl_mseal ((void *) l->l_map_start, l->l_map_end - l->l_map_start, - l->l_name); + _dl_mseal_map_2 (l, l->l_map_start, l->l_map_end); else { /* We can use the PT_LOAD segments because even if relro splits the @@ -404,7 +425,7 @@ _dl_mseal_map_1 (struct link_map *l, bool dep) ElfW(Addr) mapstart = l->l_addr + (ph->p_vaddr & ~(GLRO(dl_pagesize) - 1)); ElfW(Addr) allocend = l->l_addr + ph->p_vaddr + ph->p_memsz; - _dl_mseal ((void *) mapstart, allocend - mapstart, l->l_name); + _dl_mseal_map_2 (l, mapstart, allocend); } break; } diff --git a/elf/dl-support.c b/elf/dl-support.c index ab74f3b51c..6227397237 100644 --- a/elf/dl-support.c +++ b/elf/dl-support.c @@ -334,6 +334,11 @@ _dl_non_dynamic_init (void) _dl_main_map.l_relro_addr = ph->p_vaddr; _dl_main_map.l_relro_size = ph->p_memsz; break; + + case PT_GNU_MUTABLE: + _dl_main_map.l_mutable_addr = ph->p_vaddr; + _dl_main_map.l_mutable_size = ph->p_memsz; + break; } /* Process program headers again, but scan them backwards so that PT_NOTE can be skipped if PT_GNU_PROPERTY exits. */ @@ -729,6 +729,7 @@ typedef struct #define PT_GNU_RELRO 0x6474e552 /* Read-only after relocation */ #define PT_GNU_PROPERTY 0x6474e553 /* GNU property */ #define PT_GNU_SFRAME 0x6474e554 /* SFrame segment. */ +#define PT_GNU_MUTABLE 0x6474f555 /* Like bss, but not immutable. */ #define PT_LOSUNW 0x6ffffffa #define PT_SUNWBSS 0x6ffffffa /* Sun Specific segment */ #define PT_SUNWSTACK 0x6ffffffb /* Stack segment */ @@ -1352,6 +1353,7 @@ typedef struct /* Note section name of program property. */ #define NOTE_GNU_PROPERTY_SECTION_NAME ".note.gnu.property" +#define GNU_MUTABLE_SECTION_NAME ".gnu.mutable" /* Values used in GNU .note.gnu.property notes (NT_GNU_PROPERTY_TYPE_0). */ diff --git a/elf/rtld.c b/elf/rtld.c index 305fbda713..1fba19db75 100644 --- a/elf/rtld.c +++ b/elf/rtld.c @@ -1209,6 +1209,11 @@ rtld_setup_main_map (struct link_map *main_map) main_map->l_relro_addr = ph->p_vaddr; main_map->l_relro_size = ph->p_memsz; break; + + case PT_GNU_MUTABLE: + main_map->l_mutable_addr = ph->p_vaddr; + main_map->l_mutable_size = ph->p_memsz; + break; } /* Process program headers again, but scan them backwards so that PT_NOTE can be skipped if PT_GNU_PROPERTY exits. */ |
