From 7c34d798a4d6ae46122607d9f54d30bd09006395 Mon Sep 17 00:00:00 2001 From: Simon Schubert Date: Thu, 29 Oct 2009 02:38:13 +0100 Subject: [PATCH] imgact_elf: accept .note.ABI-tag sections located anywhere This follows up on commit d9f47b9b82dc9e762119fd00267eef9a0268680a. While our current build infrastructure now produces binaries which can be read properly by the kernel, this does not hold true for any non-base linker, particularly binutils compiled directly from source, or even the recent binutils-2.20 import. Instead of relying on local modifications to the Elf linker scripts, bite the bullet and make the kernel deal with PT_NOTE sections that lie outside the first page. --- sys/kern/imgact_elf.c | 111 ++++++++++++++++++++++++++++++++---------- sys/kern/kern_exec.c | 59 +++++++++++++++------- sys/sys/exec.h | 4 ++ 3 files changed, 129 insertions(+), 45 deletions(-) diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index 4c0b0b9968..2113799b9b 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -249,6 +249,87 @@ elf_check_header(const Elf_Ehdr *hdr) return 0; } +static Elf_Brandinfo * +elf_check_abi_note(struct image_params *imgp, const Elf_Phdr *ph) +{ + Elf_Brandinfo *match = NULL; + const Elf_Note *tmp_note; + struct sf_buf *sfb; + const char *page; + char *data = NULL; + Elf_Off off; + size_t firstoff; + size_t len; + size_t firstlen; + + len = ph->p_filesz; + off = ph->p_offset; + + firstoff = off & PAGE_MASK; + firstlen = PAGE_SIZE - firstoff; + + if (len < sizeof(Elf_Note) || len > PAGE_SIZE) + return NULL; /* ENOEXEC? */ + + if (exec_map_page(imgp, off >> PAGE_SHIFT, &sfb, &page)) + return NULL; + + /* + * Crosses page boundary? Is that allowed? + */ + if (firstlen < len) { + data = kmalloc(len, M_TEMP, M_WAITOK); + + bcopy(page + firstoff, data, firstlen); + + exec_unmap_page(sfb); + if (exec_map_page(imgp, (off >> PAGE_SHIFT) + 1, &sfb, &page)) { + kfree(data, M_TEMP); + return NULL; + } + bcopy(page, data + firstoff, len - firstlen); + tmp_note = (void *)data; + } else { + tmp_note = (const void *)(page + firstoff); + } + + while (len >= sizeof(Elf_Note)) { + int i; + size_t nlen = roundup(tmp_note->n_namesz, sizeof(Elf_Word)) + + roundup(tmp_note->n_descsz, sizeof(Elf_Word)) + + sizeof(Elf_Note); + + if (nlen > len) + break; + + if (tmp_note->n_type != 1) + goto next; + + for (i = 0; i < MAX_BRANDS; i++) { + Elf_Brandinfo *bi = elf_brand_list[i]; + + if (bi != NULL && bi->match_abi_note != NULL && + bi->match_abi_note(tmp_note)) { + match = bi; + break; + } + } + + if (match != NULL) + break; + +next: + len -= nlen; + tmp_note += nlen; + } + + if (data != NULL) + kfree(data, M_TEMP); + exec_unmap_page(sfb); + + return (match); +} + static int elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, vm_offset_t offset, caddr_t vmaddr, size_t memsz, @@ -545,7 +626,7 @@ exec_elf_imgact(struct image_params *imgp) int error, i; const char *interp = NULL; const Elf_Note *abi_note = NULL; - Elf_Brandinfo *brand_info; + Elf_Brandinfo *brand_info = NULL; char *path; error = 0; @@ -671,21 +752,8 @@ exec_elf_imgact(struct image_params *imgp) interp = imgp->image_header + phdr[i].p_offset; break; case PT_NOTE: /* Check for .note.ABI-tag */ - { - const Elf_Note *tmp_note; - /* XXX handle anything outside the first page */ - if (phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) - continue; - if (phdr[i].p_filesz < sizeof(Elf_Note)) - continue; /* ENOEXEC? */ - tmp_note = (const Elf_Note *)(imgp->image_header + phdr[i].p_offset); - if (tmp_note->n_type != 1) - continue; - if (tmp_note->n_namesz + sizeof(Elf_Note) + - tmp_note->n_descsz > phdr[i].p_filesz) - continue; /* ENOEXEC? */ - abi_note = tmp_note; - } + if (brand_info == NULL) + brand_info = elf_check_abi_note(imgp, &phdr[i]); break; case PT_PHDR: /* Program header table info */ proghdr = phdr[i].p_vaddr; @@ -704,8 +772,6 @@ exec_elf_imgact(struct image_params *imgp) imgp->entry_addr = entry; - brand_info = NULL; - /* We support three types of branding -- (1) the ELF EI_OSABI field * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string * branding w/in the ELF header, and (3) path of the `interp_path' @@ -731,15 +797,6 @@ exec_elf_imgact(struct image_params *imgp) /* Search for a recognized ABI. */ if (brand_info == NULL && abi_note != NULL) { - for (i = 0; i < MAX_BRANDS; i++) { - Elf_Brandinfo *bi = elf_brand_list[i]; - - if (bi != NULL && bi->match_abi_note != NULL && - (*bi->match_abi_note)(abi_note)) { - brand_info = bi; - break; - } - } } /* diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 75ed6ecd12..9974137a4a 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -569,7 +569,8 @@ sys_execve(struct execve_args *uap) } int -exec_map_first_page(struct image_params *imgp) +exec_map_page(struct image_params *imgp, vm_pindex_t pageno, + struct sf_buf **psfb, const char **pdata) { int rv, i; int initial_pagein; @@ -577,37 +578,37 @@ exec_map_first_page(struct image_params *imgp) vm_page_t m; vm_object_t object; - if (imgp->firstpage) - exec_unmap_first_page(imgp); - /* * The file has to be mappable. */ if ((object = imgp->vp->v_object) == NULL) return (EIO); + if (pageno >= object->size) + return (EIO); + /* * We shouldn't need protection for vm_page_grab() but we certainly * need it for the lookup loop below (lookup/busy race), since * an interrupt can unbusy and free the page before our busy check. */ crit_enter(); - m = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); + m = vm_page_grab(object, pageno, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { ma[0] = m; initial_pagein = VM_INITIAL_PAGEIN; - if (initial_pagein > object->size) - initial_pagein = object->size; + if (initial_pagein + pageno > object->size) + initial_pagein = object->size - pageno; for (i = 1; i < initial_pagein; i++) { - if ((m = vm_page_lookup(object, i)) != NULL) { + if ((m = vm_page_lookup(object, i + pageno)) != NULL) { if ((m->flags & PG_BUSY) || m->busy) break; if (m->valid) break; vm_page_busy(m); } else { - m = vm_page_alloc(object, i, VM_ALLOC_NORMAL); + m = vm_page_alloc(object, i + pageno, VM_ALLOC_NORMAL); if (m == NULL) break; } @@ -623,7 +624,7 @@ exec_map_first_page(struct image_params *imgp) * used to properly release it. */ rv = vm_pager_get_pages(object, ma, initial_pagein, 0); - m = vm_page_lookup(object, 0); + m = vm_page_lookup(object, pageno); if (rv != VM_PAGER_OK || m == NULL || m->valid == 0) { if (m) { @@ -638,28 +639,50 @@ exec_map_first_page(struct image_params *imgp) vm_page_wakeup(m); /* unbusy the page */ crit_exit(); - imgp->firstpage = sf_buf_alloc(m, SFB_CPUPRIVATE); - imgp->image_header = (void *)sf_buf_kva(imgp->firstpage); + *psfb = sf_buf_alloc(m, SFB_CPUPRIVATE); + *pdata = (void *)sf_buf_kva(*psfb); + + return (0); +} + +int +exec_map_first_page(struct image_params *imgp) +{ + int err; + + if (imgp->firstpage) + exec_unmap_first_page(imgp); + + err = exec_map_page(imgp, 0, &imgp->firstpage, &imgp->image_header); + + if (err) + return err; return 0; } void -exec_unmap_first_page(struct image_params *imgp) +exec_unmap_page(struct sf_buf *sfb) { vm_page_t m; crit_enter(); - if (imgp->firstpage != NULL) { - m = sf_buf_page(imgp->firstpage); - sf_buf_free(imgp->firstpage); - imgp->firstpage = NULL; - imgp->image_header = NULL; + if (sfb != NULL) { + m = sf_buf_page(sfb); + sf_buf_free(sfb); vm_page_unhold(m); } crit_exit(); } +void +exec_unmap_first_page(struct image_params *imgp) +{ + exec_unmap_page(imgp->firstpage); + imgp->firstpage = NULL; + imgp->image_header = NULL; +} + /* * Destroy old address space, and allocate a new stack * The new stack is only SGROWSIZ large because it is grown diff --git a/sys/sys/exec.h b/sys/sys/exec.h index a5431bf0de..788c6c27fa 100644 --- a/sys/sys/exec.h +++ b/sys/sys/exec.h @@ -76,6 +76,10 @@ struct execsw { #ifdef _KERNEL #include +struct sf_buf; +int exec_map_page(struct image_params *, vm_pindex_t, struct sf_buf **, + const char **); +void exec_unmap_page(struct sf_buf *); int exec_map_first_page (struct image_params *); void exec_unmap_first_page (struct image_params *); -- 2.41.0