aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Turner <andrew@FreeBSD.org>2022-04-04 15:05:40 +0000
committerAndrew Turner <andrew@FreeBSD.org>2022-05-03 14:04:04 +0000
commitf9bea2bdff9fcb16841e9a6ffb764d8556976708 (patch)
tree3d4ac1c88907e090c460ea0338a8e15c38216306
parentfc5ab0227bbaa265aa8e4e0247cf816040ac4b44 (diff)
downloadsrc-f9bea2bdff9fcb16841e9a6ffb764d8556976708.tar.gz
src-f9bea2bdff9fcb16841e9a6ffb764d8556976708.zip
Have rtld query the page size from the kernel
To allow for a dynamic page size on arm64 have the runtime linker query the kernel for the currentl page size. Reviewed by: kib Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D34765 (cherry picked from commit e85eaa930862d5b4dc917bc31e8d7254a693635d)
-rw-r--r--libexec/rtld-elf/map_object.c54
-rw-r--r--libexec/rtld-elf/rtld-libc/rtld_libc.c24
-rw-r--r--libexec/rtld-elf/rtld.c35
-rw-r--r--libexec/rtld-elf/rtld.h3
4 files changed, 58 insertions, 58 deletions
diff --git a/libexec/rtld-elf/map_object.c b/libexec/rtld-elf/map_object.c
index 2da323c115cc..7c637fe1d6ba 100644
--- a/libexec/rtld-elf/map_object.c
+++ b/libexec/rtld-elf/map_object.c
@@ -49,8 +49,7 @@ int __getosreldate(void);
static bool
phdr_in_zero_page(const Elf_Ehdr *hdr)
{
- return (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) <=
- (size_t)PAGE_SIZE);
+ return (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) <= page_size);
}
/*
@@ -134,14 +133,15 @@ map_object(int fd, const char *path, const struct stat *sb)
case PT_LOAD:
segs[++nsegs] = phdr;
- if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
+ if ((segs[nsegs]->p_align & (page_size - 1)) != 0) {
_rtld_error("%s: PT_LOAD segment %d not page-aligned",
path, nsegs);
goto error;
}
if ((segs[nsegs]->p_flags & PF_X) == PF_X) {
text_end = MAX(text_end,
- round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz));
+ rtld_round_page(segs[nsegs]->p_vaddr +
+ segs[nsegs]->p_memsz));
}
break;
@@ -168,18 +168,18 @@ map_object(int fd, const char *path, const struct stat *sb)
break;
case PT_NOTE:
- if (phdr->p_offset > PAGE_SIZE ||
- phdr->p_offset + phdr->p_filesz > PAGE_SIZE) {
- note_map_len = round_page(phdr->p_offset +
- phdr->p_filesz) - trunc_page(phdr->p_offset);
+ if (phdr->p_offset > page_size ||
+ phdr->p_offset + phdr->p_filesz > page_size) {
+ note_map_len = rtld_round_page(phdr->p_offset +
+ phdr->p_filesz) - rtld_trunc_page(phdr->p_offset);
note_map = mmap(NULL, note_map_len, PROT_READ,
- MAP_PRIVATE, fd, trunc_page(phdr->p_offset));
+ MAP_PRIVATE, fd, rtld_trunc_page(phdr->p_offset));
if (note_map == MAP_FAILED) {
_rtld_error("%s: error mapping PT_NOTE (%d)", path, errno);
goto error;
}
note_start = (Elf_Addr)(note_map + phdr->p_offset -
- trunc_page(phdr->p_offset));
+ rtld_trunc_page(phdr->p_offset));
} else {
note_start = (Elf_Addr)(char *)hdr + phdr->p_offset;
}
@@ -203,13 +203,13 @@ map_object(int fd, const char *path, const struct stat *sb)
* Map the entire address space of the object, to stake out our
* contiguous region, and to establish the base address for relocation.
*/
- base_vaddr = trunc_page(segs[0]->p_vaddr);
- base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
+ base_vaddr = rtld_trunc_page(segs[0]->p_vaddr);
+ base_vlimit = rtld_round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
mapsize = base_vlimit - base_vaddr;
base_addr = (caddr_t) base_vaddr;
base_flags = __getosreldate() >= P_OSREL_MAP_GUARD ? MAP_GUARD :
MAP_PRIVATE | MAP_ANON | MAP_NOCORE;
- if (npagesizes > 1 && round_page(segs[0]->p_filesz) >= pagesizes[1])
+ if (npagesizes > 1 && rtld_round_page(segs[0]->p_filesz) >= pagesizes[1])
base_flags |= MAP_ALIGNED_SUPER;
if (base_vaddr != 0)
base_flags |= MAP_FIXED | MAP_EXCL;
@@ -228,9 +228,9 @@ map_object(int fd, const char *path, const struct stat *sb)
for (i = 0; i <= nsegs; i++) {
/* Overlay the segment onto the proper region. */
- data_offset = trunc_page(segs[i]->p_offset);
- data_vaddr = trunc_page(segs[i]->p_vaddr);
- data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
+ data_offset = rtld_trunc_page(segs[i]->p_offset);
+ data_vaddr = rtld_trunc_page(segs[i]->p_vaddr);
+ data_vlimit = rtld_round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
data_addr = mapbase + (data_vaddr - base_vaddr);
data_prot = convert_prot(segs[i]->p_flags);
data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
@@ -248,12 +248,12 @@ map_object(int fd, const char *path, const struct stat *sb)
/* Clear any BSS in the last page of the segment. */
clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
clear_addr = mapbase + (clear_vaddr - base_vaddr);
- clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);
+ clear_page = mapbase + (rtld_trunc_page(clear_vaddr) - base_vaddr);
if ((nclear = data_vlimit - clear_vaddr) > 0) {
/* Make sure the end of the segment is writable */
if ((data_prot & PROT_WRITE) == 0 && -1 ==
- mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
+ mprotect(clear_page, page_size, data_prot|PROT_WRITE)) {
_rtld_error("%s: mprotect failed: %s", path,
rtld_strerror(errno));
goto error1;
@@ -263,12 +263,12 @@ map_object(int fd, const char *path, const struct stat *sb)
/* Reset the data protection back */
if ((data_prot & PROT_WRITE) == 0)
- mprotect(clear_page, PAGE_SIZE, data_prot);
+ mprotect(clear_page, page_size, data_prot);
}
/* Overlay the BSS segment onto the proper region. */
bss_vaddr = data_vlimit;
- bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
+ bss_vlimit = rtld_round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
bss_addr = mapbase + (bss_vaddr - base_vaddr);
if (bss_vlimit > bss_vaddr) { /* There is something to do */
if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
@@ -324,14 +324,14 @@ map_object(int fd, const char *path, const struct stat *sb)
obj->tlsinit = mapbase + phtls->p_vaddr;
}
obj->stack_flags = stack_flags;
- obj->relro_page = obj->relocbase + trunc_page(relro_page);
- obj->relro_size = trunc_page(relro_page + relro_size) -
- trunc_page(relro_page);
+ obj->relro_page = obj->relocbase + rtld_trunc_page(relro_page);
+ obj->relro_size = rtld_trunc_page(relro_page + relro_size) -
+ rtld_trunc_page(relro_page);
if (note_start < note_end)
digest_notes(obj, note_start, note_end);
if (note_map != NULL)
munmap(note_map, note_map_len);
- munmap(hdr, PAGE_SIZE);
+ munmap(hdr, page_size);
return (obj);
error1:
@@ -341,7 +341,7 @@ error:
munmap(note_map, note_map_len);
if (!phdr_in_zero_page(hdr))
munmap(phdr, hdr->e_phnum * sizeof(phdr[0]));
- munmap(hdr, PAGE_SIZE);
+ munmap(hdr, page_size);
return (NULL);
}
@@ -391,7 +391,7 @@ get_elf_header(int fd, const char *path, const struct stat *sbp,
return (NULL);
}
- hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ,
+ hdr = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ,
fd, 0);
if (hdr == MAP_FAILED) {
_rtld_error("%s: read error: %s", path, rtld_strerror(errno));
@@ -423,7 +423,7 @@ get_elf_header(int fd, const char *path, const struct stat *sbp,
return (hdr);
error:
- munmap(hdr, PAGE_SIZE);
+ munmap(hdr, page_size);
return (NULL);
}
diff --git a/libexec/rtld-elf/rtld-libc/rtld_libc.c b/libexec/rtld-elf/rtld-libc/rtld_libc.c
index dfad9465f4e8..69e801191db4 100644
--- a/libexec/rtld-elf/rtld-libc/rtld_libc.c
+++ b/libexec/rtld-elf/rtld-libc/rtld_libc.c
@@ -98,31 +98,13 @@ __assert(const char *func, const char *file, int line, const char *failedexpr)
/*
* Avoid pulling in all of pthreads from getpagesize().
* It normally uses libc/gen/auxv.c which pulls in pthread_once().
+ * This relies on init_pagesizes setting page_size so must not be called
+ * before that.
*/
int
getpagesize(void)
{
- int mib[2], value;
- size_t size;
- static int pagesize;
-
- if (pagesize != 0)
- return (pagesize);
-
- if (npagesizes > 0)
- pagesize = pagesizes[0];
-
- if (pagesize == 0) {
- mib[0] = CTL_HW;
- mib[1] = HW_PAGESIZE;
- size = sizeof(value);
- if (sysctl(mib, nitems(mib), &value, &size, NULL, 0) == -1)
- pagesize = PAGE_SIZE;
- else
- pagesize = value;
- }
-
- return (pagesize);
+ return (page_size);
}
extern int __sys___sysctl(const int *name, u_int namelen, void *oldp,
diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c
index 8920522a50c4..ef600b3e52ca 100644
--- a/libexec/rtld-elf/rtld.c
+++ b/libexec/rtld-elf/rtld.c
@@ -268,6 +268,7 @@ Elf_Addr _rtld_bind(Obj_Entry *obj, Elf_Size reloff);
int npagesizes;
static int osreldate;
size_t *pagesizes;
+size_t page_size;
static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
static int max_stack_flags;
@@ -477,6 +478,18 @@ set_ld_elf_hints_path(void)
ld_elf_hints_path = ld_elf_hints_default;
}
+uintptr_t
+rtld_round_page(uintptr_t x)
+{
+ return (roundup2(x, page_size));
+}
+
+uintptr_t
+rtld_trunc_page(uintptr_t x)
+{
+ return (rounddown2(x, page_size));
+}
+
/*
* Main entry point for dynamic linking. The first argument is the
* stack pointer. The stack is expected to be laid out as described
@@ -1688,10 +1701,10 @@ digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
case PT_LOAD:
if (nsegs == 0) { /* First load segment */
- obj->vaddrbase = trunc_page(ph->p_vaddr);
+ obj->vaddrbase = rtld_trunc_page(ph->p_vaddr);
obj->mapbase = obj->vaddrbase + obj->relocbase;
} else { /* Last load segment */
- obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
+ obj->mapsize = rtld_round_page(ph->p_vaddr + ph->p_memsz) -
obj->vaddrbase;
}
nsegs++;
@@ -1715,9 +1728,9 @@ digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
break;
case PT_GNU_RELRO:
- obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
- obj->relro_size = trunc_page(ph->p_vaddr + ph->p_memsz) -
- trunc_page(ph->p_vaddr);
+ obj->relro_page = obj->relocbase + rtld_trunc_page(ph->p_vaddr);
+ obj->relro_size = rtld_trunc_page(ph->p_vaddr + ph->p_memsz) -
+ rtld_trunc_page(ph->p_vaddr);
break;
case PT_NOTE:
@@ -2366,8 +2379,8 @@ parse_rtld_phdr(Obj_Entry *obj)
break;
case PT_GNU_RELRO:
obj->relro_page = obj->relocbase +
- trunc_page(ph->p_vaddr);
- obj->relro_size = round_page(ph->p_memsz);
+ rtld_trunc_page(ph->p_vaddr);
+ obj->relro_size = rtld_round_page(ph->p_memsz);
break;
case PT_NOTE:
note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
@@ -2499,6 +2512,8 @@ psa_filled:
/* Discard any invalid entries at the end of the array. */
while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
npagesizes--;
+
+ page_size = pagesizes[0];
}
/*
@@ -3252,9 +3267,9 @@ reloc_textrel_prot(Obj_Entry *obj, bool before)
l--, ph++) {
if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
continue;
- base = obj->relocbase + trunc_page(ph->p_vaddr);
- sz = round_page(ph->p_vaddr + ph->p_filesz) -
- trunc_page(ph->p_vaddr);
+ base = obj->relocbase + rtld_trunc_page(ph->p_vaddr);
+ sz = rtld_round_page(ph->p_vaddr + ph->p_filesz) -
+ rtld_trunc_page(ph->p_vaddr);
prot = before ? (PROT_READ | PROT_WRITE) :
convert_prot(ph->p_flags);
if (mprotect(base, sz, prot) == -1) {
diff --git a/libexec/rtld-elf/rtld.h b/libexec/rtld-elf/rtld.h
index 0dab41c9b7d6..52ff8de911e2 100644
--- a/libexec/rtld-elf/rtld.h
+++ b/libexec/rtld-elf/rtld.h
@@ -55,6 +55,7 @@ extern int tls_max_index;
extern int npagesizes;
extern size_t *pagesizes;
+extern size_t page_size;
extern int main_argc;
extern char **main_argv;
@@ -382,6 +383,8 @@ void dump_Elf_Rela(Obj_Entry *, const Elf_Rela *, u_long);
/*
* Function declarations.
*/
+uintptr_t rtld_round_page(uintptr_t);
+uintptr_t rtld_trunc_page(uintptr_t);
unsigned long elf_hash(const char *);
const Elf_Sym *find_symdef(unsigned long, const Obj_Entry *,
const Obj_Entry **, int, SymCache *, struct Struct_RtldLockState *);