2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/kern/imgact_elf.c,v 1.73.2.13 2002/12/28 19:49:41 dillon Exp $
33 #include <sys/param.h>
35 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/imgact_elf.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
42 #include <sys/systm.h>
44 #include <sys/nlookup.h>
45 #include <sys/pioctl.h>
46 #include <sys/procfs.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
50 #include <sys/syscall.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysent.h>
53 #include <sys/vnode.h>
54 #include <sys/eventhandler.h>
56 #include <cpu/lwbuf.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_param.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
67 #include <machine/elf.h>
68 #include <machine/md_var.h>
69 #include <sys/mount.h>
72 #define OLD_EI_BRAND 8
73 #define truncps(va,ps) ((va) & ~(ps - 1))
74 #define aligned(a,t) (truncps((u_long)(a), sizeof(t)) == (u_long)(a))
76 static int __elfN(check_header)(const Elf_Ehdr *hdr);
77 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
78 const char *interp, int32_t *osrel);
79 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
81 static int __elfN(load_section)(struct proc *p,
82 struct vmspace *vmspace, struct vnode *vp,
83 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
85 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
86 static boolean_t __elfN(check_note)(struct image_params *imgp,
87 Elf_Brandnote *checknote, int32_t *osrel);
88 static boolean_t check_PT_NOTE(struct image_params *imgp,
89 Elf_Brandnote *checknote, int32_t *osrel, const Elf_Phdr * pnote);
91 static int elf_legacy_coredump = 0;
92 static int __elfN(fallback_brand) = -1;
93 #if defined(__x86_64__)
94 SYSCTL_NODE(_kern, OID_AUTO, elf64, CTLFLAG_RW, 0, "");
95 SYSCTL_INT(_debug, OID_AUTO, elf64_legacy_coredump, CTLFLAG_RW,
96 &elf_legacy_coredump, 0, "legacy coredump mode");
97 SYSCTL_INT(_kern_elf64, OID_AUTO, fallback_brand, CTLFLAG_RW,
98 &elf64_fallback_brand, 0, "ELF64 brand of last resort");
99 TUNABLE_INT("kern.elf64.fallback_brand", &elf64_fallback_brand);
100 #else /* i386 assumed */
101 SYSCTL_NODE(_kern, OID_AUTO, elf32, CTLFLAG_RW, 0, "");
102 SYSCTL_INT(_debug, OID_AUTO, elf32_legacy_coredump, CTLFLAG_RW,
103 &elf_legacy_coredump, 0, "legacy coredump mode");
104 SYSCTL_INT(_kern_elf32, OID_AUTO, fallback_brand, CTLFLAG_RW,
105 &elf32_fallback_brand, 0, "ELF32 brand of last resort");
106 TUNABLE_INT("kern.elf32.fallback_brand", &elf32_fallback_brand);
109 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
111 static const char DRAGONFLY_ABI_VENDOR[] = "DragonFly";
112 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
114 Elf_Brandnote __elfN(dragonfly_brandnote) = {
115 .hdr.n_namesz = sizeof(DRAGONFLY_ABI_VENDOR),
116 .hdr.n_descsz = sizeof(int32_t),
118 .vendor = DRAGONFLY_ABI_VENDOR,
119 .flags = BN_CAN_FETCH_OSREL,
122 Elf_Brandnote __elfN(freebsd_brandnote) = {
123 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
124 .hdr.n_descsz = sizeof(int32_t),
126 .vendor = FREEBSD_ABI_VENDOR,
127 .flags = BN_CAN_FETCH_OSREL,
131 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
135 for (i = 0; i < MAX_BRANDS; i++) {
136 if (elf_brand_list[i] == NULL) {
137 elf_brand_list[i] = entry;
141 if (i == MAX_BRANDS) {
142 uprintf("WARNING: %s: could not insert brandinfo entry: %p\n",
150 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
154 for (i = 0; i < MAX_BRANDS; i++) {
155 if (elf_brand_list[i] == entry) {
156 elf_brand_list[i] = NULL;
166 * Check if an elf brand is being used anywhere in the system.
168 * Used by the linux emulation module unloader. This isn't safe from
171 struct elf_brand_inuse_info {
173 Elf_Brandinfo *entry;
176 static int elf_brand_inuse_callback(struct proc *p, void *data);
179 __elfN(brand_inuse)(Elf_Brandinfo *entry)
181 struct elf_brand_inuse_info info;
185 allproc_scan(elf_brand_inuse_callback, entry);
191 elf_brand_inuse_callback(struct proc *p, void *data)
193 struct elf_brand_inuse_info *info = data;
195 if (p->p_sysent == info->entry->sysvec) {
203 __elfN(check_header)(const Elf_Ehdr *hdr)
209 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
210 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
211 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
212 hdr->e_phentsize != sizeof(Elf_Phdr) ||
213 hdr->e_ehsize != sizeof(Elf_Ehdr) ||
214 hdr->e_version != ELF_TARG_VER)
218 * Make sure we have at least one brand for this machine.
221 for (i = 0; i < MAX_BRANDS; i++) {
222 bi = elf_brand_list[i];
223 if (bi != NULL && bi->machine == hdr->e_machine)
233 __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp,
234 vm_offset_t offset, caddr_t vmaddr, size_t memsz,
235 size_t filsz, vm_prot_t prot)
238 vm_offset_t map_addr;
243 vm_offset_t file_addr;
245 object = vp->v_object;
249 * It's necessary to fail if the filsz + offset taken from the
250 * header is greater than the actual file pager object's size.
251 * If we were to allow this, then the vm_map_find() below would
252 * walk right off the end of the file object and into the ether.
254 * While I'm here, might as well check for something else that
255 * is invalid: filsz cannot be greater than memsz.
257 if ((off_t)filsz + offset > vp->v_filesize || filsz > memsz) {
258 uprintf("elf_load_section: truncated ELF file\n");
262 map_addr = trunc_page((vm_offset_t)vmaddr);
263 file_addr = trunc_page(offset);
266 * We have two choices. We can either clear the data in the last page
267 * of an oversized mapping, or we can start the anon mapping a page
268 * early and copy the initialized data into that first page. We
269 * choose the second..
272 map_len = trunc_page(offset+filsz) - file_addr;
274 map_len = round_page(offset+filsz) - file_addr;
277 vm_object_reference(object);
279 /* cow flags: don't dump readonly sections in core */
280 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
281 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
283 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
284 vm_map_lock(&vmspace->vm_map);
285 rv = vm_map_insert(&vmspace->vm_map, &count,
287 file_addr, /* file offset */
288 map_addr, /* virtual start */
289 map_addr + map_len,/* virtual end */
293 vm_map_unlock(&vmspace->vm_map);
294 vm_map_entry_release(count);
295 if (rv != KERN_SUCCESS) {
296 vm_object_deallocate(object);
300 /* we can stop now if we've covered it all */
301 if (memsz == filsz) {
308 * We have to get the remaining bit of the file into the first part
309 * of the oversized map segment. This is normally because the .data
310 * segment in the file is extended to provide bss. It's a neat idea
311 * to try and save a page, but it's a pain in the behind to implement.
313 copy_len = (offset + filsz) - trunc_page(offset + filsz);
314 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
315 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
317 /* This had damn well better be true! */
319 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
320 vm_map_lock(&vmspace->vm_map);
321 rv = vm_map_insert(&vmspace->vm_map, &count,
323 map_addr, map_addr + map_len,
325 VM_PROT_ALL, VM_PROT_ALL,
327 vm_map_unlock(&vmspace->vm_map);
328 vm_map_entry_release(count);
329 if (rv != KERN_SUCCESS) {
337 struct lwbuf lwb_cache;
339 m = vm_fault_object_page(object, trunc_page(offset + filsz),
340 VM_PROT_READ, 0, &error);
342 lwb = lwbuf_alloc(m, &lwb_cache);
343 error = copyout((caddr_t)lwbuf_kva(lwb),
344 (caddr_t)map_addr, copy_len);
354 * set it to the specified protection
356 vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot,
363 * Load the file "file" into memory. It may be either a shared object
366 * The "addr" reference parameter is in/out. On entry, it specifies
367 * the address where a shared object should be loaded. If the file is
368 * an executable, this value is ignored. On exit, "addr" specifies
369 * where the file was actually loaded.
371 * The "entry" reference parameter is out only. On exit, it specifies
372 * the entry point for the loaded file.
375 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry)
378 struct nlookupdata nd;
380 struct image_params image_params;
382 const Elf_Ehdr *hdr = NULL;
383 const Elf_Phdr *phdr = NULL;
384 struct nlookupdata *nd;
385 struct vmspace *vmspace = p->p_vmspace;
387 struct image_params *imgp;
388 struct mount *topmnt;
391 u_long base_addr = 0;
392 int error, i, numsegs;
394 tempdata = kmalloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
396 attr = &tempdata->attr;
397 imgp = &tempdata->image_params;
400 * Initialize part of the common data
404 imgp->firstpage = NULL;
405 imgp->image_header = NULL;
408 error = nlookup_init(nd, file, UIO_SYSSPACE, NLC_FOLLOW);
412 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &imgp->vp);
413 topmnt = nd->nl_nch.mount;
419 * Check permissions, modes, uid, etc on the file, and "open" it.
421 error = exec_check_permissions(imgp, topmnt);
427 error = exec_map_first_page(imgp);
429 * Also make certain that the interpreter stays the same, so set
430 * its VTEXT flag, too.
433 vsetflags(imgp->vp, VTEXT);
438 hdr = (const Elf_Ehdr *)imgp->image_header;
439 if ((error = __elfN(check_header)(hdr)) != 0)
441 if (hdr->e_type == ET_DYN)
443 else if (hdr->e_type == ET_EXEC)
450 /* Only support headers that fit within first page for now */
451 /* (multiplication of two Elf_Half fields will not overflow) */
452 if ((hdr->e_phoff > PAGE_SIZE) ||
453 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
458 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
459 if (!aligned(phdr, Elf_Addr)) {
464 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
465 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
466 /* Loadable segment */
468 if (phdr[i].p_flags & PF_X)
469 prot |= VM_PROT_EXECUTE;
470 if (phdr[i].p_flags & PF_W)
471 prot |= VM_PROT_WRITE;
472 if (phdr[i].p_flags & PF_R)
473 prot |= VM_PROT_READ;
475 error = __elfN(load_section)(
476 p, vmspace, imgp->vp,
478 (caddr_t)phdr[i].p_vaddr +
481 phdr[i].p_filesz, prot);
485 * Establish the base address if this is the
489 base_addr = trunc_page(phdr[i].p_vaddr + rbase);
494 *entry = (unsigned long)hdr->e_entry + rbase;
498 exec_unmap_first_page(imgp);
503 kfree(tempdata, M_TEMP);
508 static Elf_Brandinfo *
509 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
512 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
517 /* We support four types of branding -- (1) the ELF EI_OSABI field
518 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
519 * branding within the ELF header, (3) path of the `interp_path' field,
520 * and (4) the ".note.ABI-tag" ELF section.
523 /* Look for an ".note.ABI-tag" ELF section */
524 for (i = 0; i < MAX_BRANDS; i++) {
525 bi = elf_brand_list[i];
529 if (hdr->e_machine == bi->machine && (bi->flags &
530 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
531 ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
537 /* If the executable has a brand, search for it in the brand list. */
538 for (i = 0; i < MAX_BRANDS; i++) {
539 bi = elf_brand_list[i];
541 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
543 if (hdr->e_machine == bi->machine &&
544 (hdr->e_ident[EI_OSABI] == bi->brand ||
545 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
546 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
550 /* Lacking a known brand, search for a recognized interpreter. */
551 if (interp != NULL) {
552 for (i = 0; i < MAX_BRANDS; i++) {
553 bi = elf_brand_list[i];
555 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
557 if (hdr->e_machine == bi->machine &&
558 strcmp(interp, bi->interp_path) == 0)
563 /* Lacking a recognized interpreter, try the default brand */
564 for (i = 0; i < MAX_BRANDS; i++) {
565 bi = elf_brand_list[i];
567 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
569 if (hdr->e_machine == bi->machine &&
570 __elfN(fallback_brand) == bi->brand)
577 __CONCAT(exec_,__elfN(imgact))(struct image_params *imgp)
579 const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header;
580 const Elf_Phdr *phdr;
581 Elf_Auxargs *elf_auxargs;
582 struct vmspace *vmspace;
584 u_long text_size = 0, data_size = 0, total_size = 0;
585 u_long text_addr = 0, data_addr = 0;
586 u_long seg_size, seg_addr;
587 u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
590 const char *interp = NULL, *newinterp = NULL;
591 Elf_Brandinfo *brand_info;
595 * Do we have a valid ELF header ?
597 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later if a particular
598 * brand doesn't support it. Both DragonFly platforms do by default.
600 if (__elfN(check_header)(hdr) != 0 ||
601 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
605 * From here on down, we return an errno, not -1, as we've
606 * detected an ELF file.
609 if ((hdr->e_phoff > PAGE_SIZE) ||
610 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
611 /* Only support headers in first page for now */
614 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
615 if (!aligned(phdr, Elf_Addr))
619 for (i = 0; i < hdr->e_phnum; i++) {
620 if (phdr[i].p_type == PT_LOAD) {
622 baddr = phdr[i].p_vaddr;
626 if (phdr[i].p_type == PT_INTERP) {
627 /* Path to interpreter */
628 if (phdr[i].p_filesz > MAXPATHLEN ||
629 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
631 interp = imgp->image_header + phdr[i].p_offset;
636 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
637 if (brand_info == NULL) {
638 uprintf("ELF binary type \"%u\" not known.\n",
639 hdr->e_ident[EI_OSABI]);
642 if (hdr->e_type == ET_DYN) {
643 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
646 * Honour the base load address from the dso if it is
647 * non-zero for some reason.
650 et_dyn_addr = ET_DYN_LOAD_ADDR;
656 if (interp != NULL && brand_info->interp_newpath != NULL)
657 newinterp = brand_info->interp_newpath;
659 exec_new_vmspace(imgp, NULL);
662 * Yeah, I'm paranoid. There is every reason in the world to get
663 * VTEXT now since from here on out, there are places we can have
664 * a context switch. Better safe than sorry; I really don't want
665 * the file to change while it's being loaded.
667 vsetflags(imgp->vp, VTEXT);
669 vmspace = imgp->proc->p_vmspace;
671 for (i = 0; i < hdr->e_phnum; i++) {
672 switch (phdr[i].p_type) {
674 case PT_LOAD: /* Loadable segment */
675 if (phdr[i].p_memsz == 0)
678 if (phdr[i].p_flags & PF_X)
679 prot |= VM_PROT_EXECUTE;
680 if (phdr[i].p_flags & PF_W)
681 prot |= VM_PROT_WRITE;
682 if (phdr[i].p_flags & PF_R)
683 prot |= VM_PROT_READ;
685 if ((error = __elfN(load_section)(
690 (caddr_t)phdr[i].p_vaddr + et_dyn_addr,
697 * If this segment contains the program headers,
698 * remember their virtual address for the AT_PHDR
699 * aux entry. Static binaries don't usually include
702 if (phdr[i].p_offset == 0 &&
703 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
705 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
708 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
709 seg_size = round_page(phdr[i].p_memsz +
710 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
713 * Is this .text or .data? We can't use
714 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
715 * alpha terribly and possibly does other bad
716 * things so we stick to the old way of figuring
717 * it out: If the segment contains the program
718 * entry point, it's a text segment, otherwise it
721 * Note that obreak() assumes that data_addr +
722 * data_size == end of data load area, and the ELF
723 * file format expects segments to be sorted by
724 * address. If multiple data segments exist, the
725 * last one will be used.
727 if (hdr->e_entry >= phdr[i].p_vaddr &&
728 hdr->e_entry < (phdr[i].p_vaddr +
730 text_size = seg_size;
731 text_addr = seg_addr;
732 entry = (u_long)hdr->e_entry + et_dyn_addr;
734 data_size = seg_size;
735 data_addr = seg_addr;
737 total_size += seg_size;
740 * Check limits. It should be safe to check the
741 * limits after loading the segment since we do
742 * not actually fault in all the segment's pages.
745 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
746 text_size > maxtsiz ||
748 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
753 case PT_PHDR: /* Program header table info */
754 proghdr = phdr[i].p_vaddr + et_dyn_addr;
761 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
762 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
763 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
764 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
766 addr = ELF_RTLD_ADDR(vmspace);
768 imgp->entry_addr = entry;
770 imgp->proc->p_sysent = brand_info->sysvec;
771 EVENTHANDLER_INVOKE(process_exec, imgp);
773 if (interp != NULL) {
774 int have_interp = FALSE;
775 if (brand_info->emul_path != NULL &&
776 brand_info->emul_path[0] != '\0') {
777 path = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
778 ksnprintf(path, MAXPATHLEN, "%s%s",
779 brand_info->emul_path, interp);
780 error = __elfN(load_file)(imgp->proc, path, &addr,
786 if (!have_interp && newinterp != NULL) {
787 error = __elfN(load_file)(imgp->proc, newinterp,
788 &addr, &imgp->entry_addr);
793 error = __elfN(load_file)(imgp->proc, interp, &addr,
797 uprintf("ELF interpreter %s not found\n", interp);
804 * Construct auxargs table (used by the fixup routine)
806 elf_auxargs = kmalloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
807 elf_auxargs->execfd = -1;
808 elf_auxargs->phdr = proghdr;
809 elf_auxargs->phent = hdr->e_phentsize;
810 elf_auxargs->phnum = hdr->e_phnum;
811 elf_auxargs->pagesz = PAGE_SIZE;
812 elf_auxargs->base = addr;
813 elf_auxargs->flags = 0;
814 elf_auxargs->entry = entry;
816 imgp->auxargs = elf_auxargs;
817 imgp->interpreted = 0;
818 imgp->proc->p_osrel = osrel;
824 __elfN(dragonfly_fixup)(register_t **stack_base, struct image_params *imgp)
826 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
830 base = (Elf_Addr *)*stack_base;
831 pos = base + (imgp->args->argc + imgp->args->envc + 2);
833 if (args->execfd != -1)
834 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
835 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
836 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
837 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
838 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
839 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
840 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
841 AUXARGS_ENTRY(pos, AT_BASE, args->base);
842 if (imgp->execpathp != 0)
843 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
844 AUXARGS_ENTRY(pos, AT_NULL, 0);
846 kfree(imgp->auxargs, M_TEMP);
847 imgp->auxargs = NULL;
850 suword(base, (long)imgp->args->argc);
851 *stack_base = (register_t *)base;
856 * Code for generating ELF core dumps.
859 typedef int (*segment_callback)(vm_map_entry_t, void *);
861 /* Closure for cb_put_phdr(). */
862 struct phdr_closure {
863 Elf_Phdr *phdr; /* Program header to fill in (incremented) */
864 Elf_Phdr *phdr_max; /* Pointer bound for error check */
865 Elf_Off offset; /* Offset of segment in core file */
868 /* Closure for cb_size_segment(). */
869 struct sseg_closure {
870 int count; /* Count of writable segments. */
871 size_t vsize; /* Total size of all writable segments. */
874 /* Closure for cb_put_fp(). */
877 struct vn_hdr *vnh_max;
882 typedef struct elf_buf {
888 static void *target_reserve(elf_buf_t target, size_t bytes, int *error);
890 static int cb_put_phdr (vm_map_entry_t, void *);
891 static int cb_size_segment (vm_map_entry_t, void *);
892 static int cb_fpcount_segment(vm_map_entry_t, void *);
893 static int cb_put_fp(vm_map_entry_t, void *);
896 static int each_segment (struct proc *, segment_callback, void *, int);
897 static int __elfN(corehdr)(struct lwp *, int, struct file *, struct ucred *,
899 enum putmode { WRITE, DRYRUN };
900 static int __elfN(puthdr)(struct lwp *, elf_buf_t, int sig, enum putmode,
902 static int elf_putallnotes(struct lwp *, elf_buf_t, int, enum putmode);
903 static int __elfN(putnote)(elf_buf_t, const char *, int, const void *, size_t);
905 static int elf_putsigs(struct lwp *, elf_buf_t);
906 static int elf_puttextvp(struct proc *, elf_buf_t);
907 static int elf_putfiles(struct proc *, elf_buf_t, struct file *);
910 __elfN(coredump)(struct lwp *lp, int sig, struct vnode *vp, off_t limit)
915 if ((error = falloc(NULL, &fp, NULL)) != 0)
917 fsetcred(fp, lp->lwp_proc->p_ucred);
922 fp->f_type = DTYPE_VNODE;
923 fp->f_flag = O_CREAT|O_WRONLY|O_NOFOLLOW;
924 fp->f_ops = &vnode_fileops;
928 error = generic_elf_coredump(lp, sig, fp, limit);
932 fp->f_ops = &badfileops;
939 generic_elf_coredump(struct lwp *lp, int sig, struct file *fp, off_t limit)
941 struct proc *p = lp->lwp_proc;
942 struct ucred *cred = p->p_ucred;
944 struct sseg_closure seginfo;
945 struct elf_buf target;
948 kprintf("can't dump core - null fp\n");
951 * Size the program segments
955 each_segment(p, cb_size_segment, &seginfo, 1);
958 * Calculate the size of the core file header area by making
959 * a dry run of generating it. Nothing is written, but the
960 * size is calculated.
962 bzero(&target, sizeof(target));
963 __elfN(puthdr)(lp, &target, sig, DRYRUN, seginfo.count, fp);
965 if (target.off + seginfo.vsize >= limit)
969 * Allocate memory for building the header, fill it up,
972 target.off_max = target.off;
974 target.buf = kmalloc(target.off_max, M_TEMP, M_WAITOK|M_ZERO);
976 error = __elfN(corehdr)(lp, sig, fp, cred, seginfo.count, &target);
978 /* Write the contents of all of the writable segments. */
984 php = (Elf_Phdr *)(target.buf + sizeof(Elf_Ehdr)) + 1;
985 for (i = 0; i < seginfo.count; i++) {
986 error = fp_write(fp, (caddr_t)php->p_vaddr,
987 php->p_filesz, &nbytes, UIO_USERSPACE);
993 kfree(target.buf, M_TEMP);
999 * A callback for each_segment() to write out the segment's
1000 * program header entry.
1003 cb_put_phdr(vm_map_entry_t entry, void *closure)
1005 struct phdr_closure *phc = closure;
1006 Elf_Phdr *phdr = phc->phdr;
1008 if (phc->phdr == phc->phdr_max)
1011 phc->offset = round_page(phc->offset);
1013 phdr->p_type = PT_LOAD;
1014 phdr->p_offset = phc->offset;
1015 phdr->p_vaddr = entry->start;
1017 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1018 phdr->p_align = PAGE_SIZE;
1020 if (entry->protection & VM_PROT_READ)
1021 phdr->p_flags |= PF_R;
1022 if (entry->protection & VM_PROT_WRITE)
1023 phdr->p_flags |= PF_W;
1024 if (entry->protection & VM_PROT_EXECUTE)
1025 phdr->p_flags |= PF_X;
1027 phc->offset += phdr->p_filesz;
1033 * A callback for each_writable_segment() to gather information about
1034 * the number of segments and their total size.
1037 cb_size_segment(vm_map_entry_t entry, void *closure)
1039 struct sseg_closure *ssc = closure;
1042 ssc->vsize += entry->end - entry->start;
1047 * A callback for each_segment() to gather information about
1048 * the number of text segments.
1051 cb_fpcount_segment(vm_map_entry_t entry, void *closure)
1053 int *count = closure;
1056 if (entry->object.vm_object->type == OBJT_VNODE) {
1057 vp = (struct vnode *)entry->object.vm_object->handle;
1058 if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp)
1066 cb_put_fp(vm_map_entry_t entry, void *closure)
1068 struct fp_closure *fpc = closure;
1069 struct vn_hdr *vnh = fpc->vnh;
1070 Elf_Phdr *phdr = &vnh->vnh_phdr;
1075 * If an entry represents a vnode then write out a file handle.
1077 * If we are checkpointing a checkpoint-restored program we do
1078 * NOT record the filehandle for the old checkpoint vnode (which
1079 * is mapped all over the place). Instead we rely on the fact
1080 * that a checkpoint-restored program does not mmap() the checkpt
1081 * vnode NOCORE, so its contents will be written out to the
1082 * new checkpoint file. This is necessary because the 'old'
1083 * checkpoint file is typically destroyed when a new one is created
1084 * and thus cannot be used to restore the new checkpoint.
1086 * Theoretically we could create a chain of checkpoint files and
1087 * operate the checkpointing operation kinda like an incremental
1088 * checkpoint, but a checkpoint restore would then likely wind up
1089 * referencing many prior checkpoint files and that is a bit over
1090 * the top for the purpose of the checkpoint API.
1092 if (entry->object.vm_object->type == OBJT_VNODE) {
1093 vp = (struct vnode *)entry->object.vm_object->handle;
1094 if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp)
1096 if (vnh == fpc->vnh_max)
1100 vnh->vnh_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
1101 error = VFS_VPTOFH(vp, &vnh->vnh_fh.fh_fid);
1103 char *freepath, *fullpath;
1105 if (vn_fullpath(curproc, vp, &fullpath, &freepath, 0)) {
1106 kprintf("Warning: coredump, error %d: cannot store file handle for vnode %p\n", error, vp);
1108 kprintf("Warning: coredump, error %d: cannot store file handle for %s\n", error, fullpath);
1109 kfree(freepath, M_TEMP);
1114 phdr->p_type = PT_LOAD;
1115 phdr->p_offset = 0; /* not written to core */
1116 phdr->p_vaddr = entry->start;
1118 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1119 phdr->p_align = PAGE_SIZE;
1121 if (entry->protection & VM_PROT_READ)
1122 phdr->p_flags |= PF_R;
1123 if (entry->protection & VM_PROT_WRITE)
1124 phdr->p_flags |= PF_W;
1125 if (entry->protection & VM_PROT_EXECUTE)
1126 phdr->p_flags |= PF_X;
1134 * For each writable segment in the process's memory map, call the given
1135 * function with a pointer to the map entry and some arbitrary
1136 * caller-supplied data.
1139 each_segment(struct proc *p, segment_callback func, void *closure, int writable)
1142 vm_map_t map = &p->p_vmspace->vm_map;
1143 vm_map_entry_t entry;
1145 for (entry = map->header.next; error == 0 && entry != &map->header;
1146 entry = entry->next) {
1150 * Don't dump inaccessible mappings, deal with legacy
1153 * Note that read-only segments related to the elf binary
1154 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1155 * need to arbitrarily ignore such segments.
1157 if (elf_legacy_coredump) {
1158 if (writable && (entry->protection & VM_PROT_RW) != VM_PROT_RW)
1161 if (writable && (entry->protection & VM_PROT_ALL) == 0)
1166 * Dont include memory segment in the coredump if
1167 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1170 * Currently we only dump normal VM object maps. We do
1171 * not dump submaps or virtual page tables.
1173 if (writable && (entry->eflags & MAP_ENTRY_NOCOREDUMP))
1175 if (entry->maptype != VM_MAPTYPE_NORMAL)
1177 if ((obj = entry->object.vm_object) == NULL)
1180 /* Find the deepest backing object. */
1181 while (obj->backing_object != NULL)
1182 obj = obj->backing_object;
1184 /* Ignore memory-mapped devices and such things. */
1185 if (obj->type != OBJT_DEFAULT &&
1186 obj->type != OBJT_SWAP &&
1187 obj->type != OBJT_VNODE)
1190 error = (*func)(entry, closure);
1197 target_reserve(elf_buf_t target, size_t bytes, int *error)
1202 if (target->off + bytes > target->off_max)
1205 res = target->buf + target->off;
1207 target->off += bytes;
1212 * Write the core file header to the file, including padding up to
1213 * the page boundary.
1216 __elfN(corehdr)(struct lwp *lp, int sig, struct file *fp, struct ucred *cred,
1217 int numsegs, elf_buf_t target)
1223 * Fill in the header. The fp is passed so we can detect and flag
1224 * a checkpoint file pointer within the core file itself, because
1225 * it may not be restored from the same file handle.
1227 error = __elfN(puthdr)(lp, target, sig, WRITE, numsegs, fp);
1229 /* Write it to the core file. */
1231 error = fp_write(fp, target->buf, target->off, &nbytes,
1238 __elfN(puthdr)(struct lwp *lp, elf_buf_t target, int sig, enum putmode mode,
1239 int numsegs, struct file *fp)
1241 struct proc *p = lp->lwp_proc;
1249 ehdr = target_reserve(target, sizeof(Elf_Ehdr), &error);
1251 phoff = target->off;
1252 phdr = target_reserve(target, (numsegs + 1) * sizeof(Elf_Phdr), &error);
1254 noteoff = target->off;
1256 elf_putallnotes(lp, target, sig, mode);
1257 notesz = target->off - noteoff;
1260 * put extra cruft for dumping process state here
1261 * - we really want it be before all the program
1263 * - we just need to update the offset accordingly
1264 * and GDB will be none the wiser.
1267 error = elf_puttextvp(p, target);
1269 error = elf_putsigs(lp, target);
1271 error = elf_putfiles(p, target, fp);
1274 * Align up to a page boundary for the program segments. The
1275 * actual data will be written to the outptu file, not to elf_buf_t,
1276 * so we do not have to do any further bounds checking.
1278 target->off = round_page(target->off);
1279 if (error == 0 && ehdr != NULL) {
1281 * Fill in the ELF header.
1283 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1284 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1285 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1286 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1287 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1288 ehdr->e_ident[EI_DATA] = ELF_DATA;
1289 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1290 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
1291 ehdr->e_ident[EI_ABIVERSION] = 0;
1292 ehdr->e_ident[EI_PAD] = 0;
1293 ehdr->e_type = ET_CORE;
1294 ehdr->e_machine = ELF_ARCH;
1295 ehdr->e_version = EV_CURRENT;
1297 ehdr->e_phoff = phoff;
1299 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1300 ehdr->e_phentsize = sizeof(Elf_Phdr);
1301 ehdr->e_phnum = numsegs + 1;
1302 ehdr->e_shentsize = sizeof(Elf_Shdr);
1304 ehdr->e_shstrndx = SHN_UNDEF;
1306 if (error == 0 && phdr != NULL) {
1308 * Fill in the program header entries.
1310 struct phdr_closure phc;
1312 /* The note segement. */
1313 phdr->p_type = PT_NOTE;
1314 phdr->p_offset = noteoff;
1317 phdr->p_filesz = notesz;
1323 /* All the writable segments from the program. */
1325 phc.phdr_max = phdr + numsegs;
1326 phc.offset = target->off;
1327 each_segment(p, cb_put_phdr, &phc, 1);
1333 * Append core dump notes to target ELF buffer or simply update target size
1334 * if dryrun selected.
1337 elf_putallnotes(struct lwp *corelp, elf_buf_t target, int sig,
1340 struct proc *p = corelp->lwp_proc;
1344 prfpregset_t fpregs;
1348 prfpregset_t *fpregs;
1353 * Allocate temporary storage for notes on heap to avoid stack overflow.
1355 if (mode != DRYRUN) {
1356 tmpdata = kmalloc(sizeof(*tmpdata), M_TEMP, M_ZERO | M_WAITOK);
1357 status = &tmpdata->status;
1358 fpregs = &tmpdata->fpregs;
1359 psinfo = &tmpdata->psinfo;
1368 * Append LWP-agnostic note.
1370 if (mode != DRYRUN) {
1371 psinfo->pr_version = PRPSINFO_VERSION;
1372 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1373 strlcpy(psinfo->pr_fname, p->p_comm,
1374 sizeof(psinfo->pr_fname));
1376 * XXX - We don't fill in the command line arguments
1379 strlcpy(psinfo->pr_psargs, p->p_comm,
1380 sizeof(psinfo->pr_psargs));
1383 __elfN(putnote)(target, "CORE", NT_PRPSINFO, psinfo, sizeof *psinfo);
1388 * Append first note for LWP that triggered core so that it is
1389 * the selected one when the debugger starts.
1391 if (mode != DRYRUN) {
1392 status->pr_version = PRSTATUS_VERSION;
1393 status->pr_statussz = sizeof(prstatus_t);
1394 status->pr_gregsetsz = sizeof(gregset_t);
1395 status->pr_fpregsetsz = sizeof(fpregset_t);
1396 status->pr_osreldate = osreldate;
1397 status->pr_cursig = sig;
1399 * XXX GDB needs unique pr_pid for each LWP and does not
1400 * not support pr_pid==0 but lwp_tid can be 0, so hack unique
1403 status->pr_pid = corelp->lwp_tid;
1404 fill_regs(corelp, &status->pr_reg);
1405 fill_fpregs(corelp, fpregs);
1408 __elfN(putnote)(target, "CORE", NT_PRSTATUS, status, sizeof *status);
1412 __elfN(putnote)(target, "CORE", NT_FPREGSET, fpregs, sizeof *fpregs);
1417 * Then append notes for other LWPs.
1419 FOREACH_LWP_IN_PROC(lp, p) {
1422 /* skip lwps being created */
1423 if (lp->lwp_thread == NULL)
1425 if (mode != DRYRUN) {
1426 status->pr_pid = lp->lwp_tid;
1427 fill_regs(lp, &status->pr_reg);
1428 fill_fpregs(lp, fpregs);
1430 error = __elfN(putnote)(target, "CORE", NT_PRSTATUS,
1431 status, sizeof *status);
1434 error = __elfN(putnote)(target, "CORE", NT_FPREGSET,
1435 fpregs, sizeof *fpregs);
1441 if (tmpdata != NULL)
1442 kfree(tmpdata, M_TEMP);
1447 * Generate a note sub-structure.
1449 * NOTE: 4-byte alignment.
1452 __elfN(putnote)(elf_buf_t target, const char *name, int type,
1453 const void *desc, size_t descsz)
1459 note.n_namesz = strlen(name) + 1;
1460 note.n_descsz = descsz;
1462 dst = target_reserve(target, sizeof(note), &error);
1464 bcopy(¬e, dst, sizeof note);
1465 dst = target_reserve(target, note.n_namesz, &error);
1467 bcopy(name, dst, note.n_namesz);
1468 target->off = roundup2(target->off, sizeof(Elf_Word));
1469 dst = target_reserve(target, note.n_descsz, &error);
1471 bcopy(desc, dst, note.n_descsz);
1472 target->off = roundup2(target->off, sizeof(Elf_Word));
1478 elf_putsigs(struct lwp *lp, elf_buf_t target)
1480 /* XXX lwp handle more than one lwp */
1481 struct proc *p = lp->lwp_proc;
1483 struct ckpt_siginfo *csi;
1485 csi = target_reserve(target, sizeof(struct ckpt_siginfo), &error);
1487 csi->csi_ckptpisz = sizeof(struct ckpt_siginfo);
1488 bcopy(p->p_sigacts, &csi->csi_sigacts, sizeof(*p->p_sigacts));
1489 bcopy(&p->p_realtimer, &csi->csi_itimerval, sizeof(struct itimerval));
1490 bcopy(&lp->lwp_sigmask, &csi->csi_sigmask,
1492 csi->csi_sigparent = p->p_sigparent;
1498 elf_putfiles(struct proc *p, elf_buf_t target, struct file *ckfp)
1502 struct ckpt_filehdr *cfh = NULL;
1503 struct ckpt_fileinfo *cfi;
1507 * the duplicated loop is gross, but it was the only way
1508 * to eliminate uninitialized variable warnings
1510 cfh = target_reserve(target, sizeof(struct ckpt_filehdr), &error);
1512 cfh->cfh_nfiles = 0;
1516 * ignore STDIN/STDERR/STDOUT.
1518 for (i = 3; error == 0 && i < p->p_fd->fd_nfiles; i++) {
1519 fp = holdfp(p->p_fd, i, -1);
1523 * XXX Only checkpoint vnodes for now.
1525 if (fp->f_type != DTYPE_VNODE) {
1529 cfi = target_reserve(target, sizeof(struct ckpt_fileinfo),
1535 cfi->cfi_index = -1;
1536 cfi->cfi_type = fp->f_type;
1537 cfi->cfi_flags = fp->f_flag;
1538 cfi->cfi_offset = fp->f_offset;
1539 cfi->cfi_ckflags = 0;
1542 cfi->cfi_ckflags |= CKFIF_ISCKPTFD;
1543 /* f_count and f_msgcount should not be saved/restored */
1544 /* XXX save cred info */
1546 switch(fp->f_type) {
1548 vp = (struct vnode *)fp->f_data;
1550 * it looks like a bug in ptrace is marking
1551 * a non-vnode as a vnode - until we find the
1552 * root cause this will at least prevent
1553 * further panics from truss
1555 if (vp == NULL || vp->v_mount == NULL)
1559 cfi->cfi_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
1560 error = VFS_VPTOFH(vp, &cfi->cfi_fh.fh_fid);
1571 elf_puttextvp(struct proc *p, elf_buf_t target)
1575 struct fp_closure fpc;
1576 struct ckpt_vminfo *vminfo;
1578 vminfo = target_reserve(target, sizeof(struct ckpt_vminfo), &error);
1579 if (vminfo != NULL) {
1580 vminfo->cvm_dsize = p->p_vmspace->vm_dsize;
1581 vminfo->cvm_tsize = p->p_vmspace->vm_tsize;
1582 vminfo->cvm_daddr = p->p_vmspace->vm_daddr;
1583 vminfo->cvm_taddr = p->p_vmspace->vm_taddr;
1587 vn_count = target_reserve(target, sizeof(int), &error);
1588 if (target->buf != NULL) {
1589 fpc.vnh = (struct vn_hdr *)(target->buf + target->off);
1590 fpc.vnh_max = fpc.vnh +
1591 (target->off_max - target->off) / sizeof(struct vn_hdr);
1592 error = each_segment(p, cb_put_fp, &fpc, 0);
1594 *vn_count = fpc.count;
1596 error = each_segment(p, cb_fpcount_segment, &fpc.count, 0);
1598 target->off += fpc.count * sizeof(struct vn_hdr);
1603 * Try to find the appropriate ABI-note section for checknote,
1604 * The entire image is searched if necessary, not only the first page.
1607 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1610 boolean_t valid_note_found;
1611 const Elf_Phdr *phdr, *pnote;
1612 const Elf_Ehdr *hdr;
1615 valid_note_found = FALSE;
1616 hdr = (const Elf_Ehdr *)imgp->image_header;
1617 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1619 for (i = 0; i < hdr->e_phnum; i++) {
1620 if (phdr[i].p_type == PT_NOTE) {
1622 valid_note_found = check_PT_NOTE (imgp, checknote,
1624 if (valid_note_found)
1628 return valid_note_found;
1632 check_PT_NOTE(struct image_params *imgp, Elf_Brandnote *checknote,
1633 int32_t *osrel, const Elf_Phdr * pnote)
1635 boolean_t limited_to_first_page;
1636 boolean_t found = FALSE;
1637 const Elf_Note *note, *note0, *note_end;
1638 const char *note_name;
1639 __ElfN(Off) noteloc, firstloc;
1640 __ElfN(Size) notesz, firstlen, endbyte;
1642 struct lwbuf lwb_cache;
1647 notesz = pnote->p_filesz;
1648 noteloc = pnote->p_offset;
1649 endbyte = noteloc + notesz;
1650 limited_to_first_page = noteloc < PAGE_SIZE && endbyte < PAGE_SIZE;
1652 if (limited_to_first_page) {
1653 note = (const Elf_Note *)(imgp->image_header + noteloc);
1654 note_end = (const Elf_Note *)(imgp->image_header + endbyte);
1657 firstloc = noteloc & PAGE_MASK;
1658 firstlen = PAGE_SIZE - firstloc;
1659 if (notesz < sizeof(Elf_Note) || notesz > PAGE_SIZE)
1663 if (exec_map_page(imgp, noteloc >> PAGE_SHIFT, &lwb, &page))
1665 if (firstlen < notesz) { /* crosses page boundary */
1666 data = kmalloc(notesz, M_TEMP, M_WAITOK);
1667 bcopy(page + firstloc, data, firstlen);
1669 exec_unmap_page(lwb);
1671 if (exec_map_page(imgp, (noteloc >> PAGE_SHIFT) + 1,
1673 kfree(data, M_TEMP);
1676 bcopy(page, data + firstlen, notesz - firstlen);
1677 note = note0 = (const Elf_Note *)(data);
1678 note_end = (const Elf_Note *)(data + notesz);
1680 note = note0 = (const Elf_Note *)(page + firstloc);
1681 note_end = (const Elf_Note *)(page + firstloc +
1686 for (n = 0; n < 100 && note >= note0 && note < note_end; n++) {
1687 if (!aligned(note, Elf32_Addr))
1689 note_name = (const char *)(note + 1);
1691 if (note->n_namesz == checknote->hdr.n_namesz
1692 && note->n_descsz == checknote->hdr.n_descsz
1693 && note->n_type == checknote->hdr.n_type
1694 && (strncmp(checknote->vendor, note_name,
1695 checknote->hdr.n_namesz) == 0)) {
1696 /* Fetch osreldata from ABI.note-tag */
1697 if ((checknote->flags & BN_CAN_FETCH_OSREL) != 0 &&
1699 *osrel = *(const int32_t *) (note_name +
1700 roundup2(checknote->hdr.n_namesz,
1701 sizeof(Elf32_Addr)));
1705 note = (const Elf_Note *)((const char *)(note + 1) +
1706 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1707 roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1710 if (!limited_to_first_page) {
1712 kfree(data, M_TEMP);
1713 exec_unmap_page(lwb);
1720 * Tell kern_execve.c about it, with a little help from the linker.
1722 #if defined(__x86_64__)
1723 static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"};
1724 EXEC_SET_ORDERED(elf64, elf_execsw, SI_ORDER_FIRST);
1725 #else /* i386 assumed */
1726 static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"};
1727 EXEC_SET_ORDERED(elf32, elf_execsw, SI_ORDER_FIRST);