2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * $FreeBSD: src/sys/kern/imgact_elf.c,v 1.73.2.13 2002/12/28 19:49:41 dillon Exp $
33 #include <sys/param.h>
35 #include <sys/fcntl.h>
37 #include <sys/imgact.h>
38 #include <sys/imgact_elf.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
42 #include <sys/systm.h>
44 #include <sys/nlookup.h>
45 #include <sys/pioctl.h>
46 #include <sys/procfs.h>
47 #include <sys/resourcevar.h>
48 #include <sys/signalvar.h>
50 #include <sys/syscall.h>
51 #include <sys/sysctl.h>
52 #include <sys/sysent.h>
53 #include <sys/vnode.h>
54 #include <sys/eventhandler.h>
56 #include <cpu/lwbuf.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_param.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_object.h>
65 #include <vm/vm_extern.h>
67 #include <machine/elf.h>
68 #include <machine/md_var.h>
69 #include <sys/mount.h>
72 #define OLD_EI_BRAND 8
73 #define truncps(va,ps) ((va) & ~(ps - 1))
74 #define aligned(a,t) (truncps((u_long)(a), sizeof(t)) == (u_long)(a))
76 static int __elfN(check_header)(const Elf_Ehdr *hdr);
77 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
78 const char *interp, int32_t *osrel);
79 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
81 static int __elfN(load_section)(struct proc *p,
82 struct vmspace *vmspace, struct vnode *vp,
83 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
85 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
86 static boolean_t __elfN(check_note)(struct image_params *imgp,
87 Elf_Brandnote *checknote, int32_t *osrel);
89 static int elf_legacy_coredump = 0;
90 static int __elfN(fallback_brand) = -1;
91 #if defined(__x86_64__)
92 SYSCTL_NODE(_kern, OID_AUTO, elf64, CTLFLAG_RW, 0, "");
93 SYSCTL_INT(_debug, OID_AUTO, elf64_legacy_coredump, CTLFLAG_RW,
94 &elf_legacy_coredump, 0, "legacy coredump mode");
95 SYSCTL_INT(_kern_elf64, OID_AUTO, fallback_brand, CTLFLAG_RW,
96 &elf64_fallback_brand, 0, "ELF64 brand of last resort");
97 TUNABLE_INT("kern.elf64.fallback_brand", &elf64_fallback_brand);
98 #else /* i386 assumed */
99 SYSCTL_NODE(_kern, OID_AUTO, elf32, CTLFLAG_RW, 0, "");
100 SYSCTL_INT(_debug, OID_AUTO, elf32_legacy_coredump, CTLFLAG_RW,
101 &elf_legacy_coredump, 0, "legacy coredump mode");
102 SYSCTL_INT(_kern_elf32, OID_AUTO, fallback_brand, CTLFLAG_RW,
103 &elf32_fallback_brand, 0, "ELF32 brand of last resort");
104 TUNABLE_INT("kern.elf32.fallback_brand", &elf32_fallback_brand);
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
109 static const char DRAGONFLY_ABI_VENDOR[] = "DragonFly";
110 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
112 Elf_Brandnote __elfN(dragonfly_brandnote) = {
113 .hdr.n_namesz = sizeof(DRAGONFLY_ABI_VENDOR),
114 .hdr.n_descsz = sizeof(int32_t),
116 .vendor = DRAGONFLY_ABI_VENDOR,
117 .flags = BN_CAN_FETCH_OSREL,
120 Elf_Brandnote __elfN(freebsd_brandnote) = {
121 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
122 .hdr.n_descsz = sizeof(int32_t),
124 .vendor = FREEBSD_ABI_VENDOR,
125 .flags = BN_CAN_FETCH_OSREL,
129 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
133 for (i = 0; i < MAX_BRANDS; i++) {
134 if (elf_brand_list[i] == NULL) {
135 elf_brand_list[i] = entry;
139 if (i == MAX_BRANDS) {
140 uprintf("WARNING: %s: could not insert brandinfo entry: %p\n",
148 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
152 for (i = 0; i < MAX_BRANDS; i++) {
153 if (elf_brand_list[i] == entry) {
154 elf_brand_list[i] = NULL;
164 * Check if an elf brand is being used anywhere in the system.
166 * Used by the linux emulation module unloader. This isn't safe from
169 struct elf_brand_inuse_info {
171 Elf_Brandinfo *entry;
174 static int elf_brand_inuse_callback(struct proc *p, void *data);
177 __elfN(brand_inuse)(Elf_Brandinfo *entry)
179 struct elf_brand_inuse_info info;
183 allproc_scan(elf_brand_inuse_callback, entry);
189 elf_brand_inuse_callback(struct proc *p, void *data)
191 struct elf_brand_inuse_info *info = data;
193 if (p->p_sysent == info->entry->sysvec) {
201 __elfN(check_header)(const Elf_Ehdr *hdr)
207 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
208 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
209 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
210 hdr->e_phentsize != sizeof(Elf_Phdr) ||
211 hdr->e_ehsize != sizeof(Elf_Ehdr) ||
212 hdr->e_version != ELF_TARG_VER)
216 * Make sure we have at least one brand for this machine.
219 for (i = 0; i < MAX_BRANDS; i++) {
220 bi = elf_brand_list[i];
221 if (bi != NULL && bi->machine == hdr->e_machine)
231 __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp,
232 vm_offset_t offset, caddr_t vmaddr, size_t memsz,
233 size_t filsz, vm_prot_t prot)
236 vm_offset_t map_addr;
241 vm_offset_t file_addr;
243 object = vp->v_object;
247 * It's necessary to fail if the filsz + offset taken from the
248 * header is greater than the actual file pager object's size.
249 * If we were to allow this, then the vm_map_find() below would
250 * walk right off the end of the file object and into the ether.
252 * While I'm here, might as well check for something else that
253 * is invalid: filsz cannot be greater than memsz.
255 if ((off_t)filsz + offset > vp->v_filesize || filsz > memsz) {
256 uprintf("elf_load_section: truncated ELF file\n");
260 map_addr = trunc_page((vm_offset_t)vmaddr);
261 file_addr = trunc_page(offset);
264 * We have two choices. We can either clear the data in the last page
265 * of an oversized mapping, or we can start the anon mapping a page
266 * early and copy the initialized data into that first page. We
267 * choose the second..
270 map_len = trunc_page(offset+filsz) - file_addr;
272 map_len = round_page(offset+filsz) - file_addr;
275 vm_object_reference(object);
277 /* cow flags: don't dump readonly sections in core */
278 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
279 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
281 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
282 vm_map_lock(&vmspace->vm_map);
283 rv = vm_map_insert(&vmspace->vm_map, &count,
285 file_addr, /* file offset */
286 map_addr, /* virtual start */
287 map_addr + map_len,/* virtual end */
291 vm_map_unlock(&vmspace->vm_map);
292 vm_map_entry_release(count);
293 if (rv != KERN_SUCCESS) {
294 vm_object_deallocate(object);
298 /* we can stop now if we've covered it all */
299 if (memsz == filsz) {
306 * We have to get the remaining bit of the file into the first part
307 * of the oversized map segment. This is normally because the .data
308 * segment in the file is extended to provide bss. It's a neat idea
309 * to try and save a page, but it's a pain in the behind to implement.
311 copy_len = (offset + filsz) - trunc_page(offset + filsz);
312 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
313 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
315 /* This had damn well better be true! */
317 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
318 vm_map_lock(&vmspace->vm_map);
319 rv = vm_map_insert(&vmspace->vm_map, &count,
321 map_addr, map_addr + map_len,
323 VM_PROT_ALL, VM_PROT_ALL,
325 vm_map_unlock(&vmspace->vm_map);
326 vm_map_entry_release(count);
327 if (rv != KERN_SUCCESS) {
335 struct lwbuf lwb_cache;
337 m = vm_fault_object_page(object, trunc_page(offset + filsz),
338 VM_PROT_READ, 0, &error);
340 lwb = lwbuf_alloc(m, &lwb_cache);
341 error = copyout((caddr_t)lwbuf_kva(lwb),
342 (caddr_t)map_addr, copy_len);
352 * set it to the specified protection
354 vm_map_protect(&vmspace->vm_map, map_addr, map_addr + map_len, prot,
361 * Load the file "file" into memory. It may be either a shared object
364 * The "addr" reference parameter is in/out. On entry, it specifies
365 * the address where a shared object should be loaded. If the file is
366 * an executable, this value is ignored. On exit, "addr" specifies
367 * where the file was actually loaded.
369 * The "entry" reference parameter is out only. On exit, it specifies
370 * the entry point for the loaded file.
373 __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry)
376 struct nlookupdata nd;
378 struct image_params image_params;
380 const Elf_Ehdr *hdr = NULL;
381 const Elf_Phdr *phdr = NULL;
382 struct nlookupdata *nd;
383 struct vmspace *vmspace = p->p_vmspace;
385 struct image_params *imgp;
386 struct mount *topmnt;
389 u_long base_addr = 0;
390 int error, i, numsegs;
392 tempdata = kmalloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
394 attr = &tempdata->attr;
395 imgp = &tempdata->image_params;
398 * Initialize part of the common data
402 imgp->firstpage = NULL;
403 imgp->image_header = NULL;
406 error = nlookup_init(nd, file, UIO_SYSSPACE, NLC_FOLLOW);
410 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &imgp->vp);
411 topmnt = nd->nl_nch.mount;
417 * Check permissions, modes, uid, etc on the file, and "open" it.
419 error = exec_check_permissions(imgp, topmnt);
425 error = exec_map_first_page(imgp);
427 * Also make certain that the interpreter stays the same, so set
428 * its VTEXT flag, too.
431 vsetflags(imgp->vp, VTEXT);
436 hdr = (const Elf_Ehdr *)imgp->image_header;
437 if ((error = __elfN(check_header)(hdr)) != 0)
439 if (hdr->e_type == ET_DYN)
441 else if (hdr->e_type == ET_EXEC)
448 /* Only support headers that fit within first page for now */
449 /* (multiplication of two Elf_Half fields will not overflow) */
450 if ((hdr->e_phoff > PAGE_SIZE) ||
451 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
456 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
457 if (!aligned(phdr, Elf_Addr)) {
462 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
463 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
464 /* Loadable segment */
466 if (phdr[i].p_flags & PF_X)
467 prot |= VM_PROT_EXECUTE;
468 if (phdr[i].p_flags & PF_W)
469 prot |= VM_PROT_WRITE;
470 if (phdr[i].p_flags & PF_R)
471 prot |= VM_PROT_READ;
473 error = __elfN(load_section)(
474 p, vmspace, imgp->vp,
476 (caddr_t)phdr[i].p_vaddr +
479 phdr[i].p_filesz, prot);
483 * Establish the base address if this is the
487 base_addr = trunc_page(phdr[i].p_vaddr + rbase);
492 *entry = (unsigned long)hdr->e_entry + rbase;
496 exec_unmap_first_page(imgp);
501 kfree(tempdata, M_TEMP);
506 static Elf_Brandinfo *
507 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
510 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
515 /* We support four types of branding -- (1) the ELF EI_OSABI field
516 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
517 * branding within the ELF header, (3) path of the `interp_path' field,
518 * and (4) the ".note.ABI-tag" ELF section.
521 /* Look for an ".note.ABI-tag" ELF section */
522 for (i = 0; i < MAX_BRANDS; i++) {
523 bi = elf_brand_list[i];
527 if (hdr->e_machine == bi->machine && (bi->flags &
528 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
529 ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
535 /* If the executable has a brand, search for it in the brand list. */
536 for (i = 0; i < MAX_BRANDS; i++) {
537 bi = elf_brand_list[i];
539 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
541 if (hdr->e_machine == bi->machine &&
542 (hdr->e_ident[EI_OSABI] == bi->brand ||
543 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
544 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
548 /* Lacking a known brand, search for a recognized interpreter. */
549 if (interp != NULL) {
550 for (i = 0; i < MAX_BRANDS; i++) {
551 bi = elf_brand_list[i];
553 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
555 if (hdr->e_machine == bi->machine &&
556 strcmp(interp, bi->interp_path) == 0)
561 /* Lacking a recognized interpreter, try the default brand */
562 for (i = 0; i < MAX_BRANDS; i++) {
563 bi = elf_brand_list[i];
565 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
567 if (hdr->e_machine == bi->machine &&
568 __elfN(fallback_brand) == bi->brand)
575 __CONCAT(exec_,__elfN(imgact))(struct image_params *imgp)
577 const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header;
578 const Elf_Phdr *phdr;
579 Elf_Auxargs *elf_auxargs;
580 struct vmspace *vmspace;
582 u_long text_size = 0, data_size = 0, total_size = 0;
583 u_long text_addr = 0, data_addr = 0;
584 u_long seg_size, seg_addr;
585 u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
588 const char *interp = NULL, *newinterp = NULL;
589 Elf_Brandinfo *brand_info;
593 * Do we have a valid ELF header ?
595 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later if a particular
596 * brand doesn't support it. Both DragonFly platforms do by default.
598 if (__elfN(check_header)(hdr) != 0 ||
599 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
603 * From here on down, we return an errno, not -1, as we've
604 * detected an ELF file.
607 if ((hdr->e_phoff > PAGE_SIZE) ||
608 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
609 /* Only support headers in first page for now */
612 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
613 if (!aligned(phdr, Elf_Addr))
617 for (i = 0; i < hdr->e_phnum; i++) {
618 if (phdr[i].p_type == PT_LOAD) {
620 baddr = phdr[i].p_vaddr;
624 if (phdr[i].p_type == PT_INTERP) {
625 /* Path to interpreter */
626 if (phdr[i].p_filesz > MAXPATHLEN ||
627 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
629 interp = imgp->image_header + phdr[i].p_offset;
634 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
635 if (brand_info == NULL) {
636 uprintf("ELF binary type \"%u\" not known.\n",
637 hdr->e_ident[EI_OSABI]);
640 if (hdr->e_type == ET_DYN) {
641 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
644 * Honour the base load address from the dso if it is
645 * non-zero for some reason.
648 et_dyn_addr = ET_DYN_LOAD_ADDR;
654 if (interp != NULL && brand_info->interp_newpath != NULL)
655 newinterp = brand_info->interp_newpath;
657 exec_new_vmspace(imgp, NULL);
660 * Yeah, I'm paranoid. There is every reason in the world to get
661 * VTEXT now since from here on out, there are places we can have
662 * a context switch. Better safe than sorry; I really don't want
663 * the file to change while it's being loaded.
665 vsetflags(imgp->vp, VTEXT);
667 vmspace = imgp->proc->p_vmspace;
669 for (i = 0; i < hdr->e_phnum; i++) {
670 switch (phdr[i].p_type) {
672 case PT_LOAD: /* Loadable segment */
673 if (phdr[i].p_memsz == 0)
676 if (phdr[i].p_flags & PF_X)
677 prot |= VM_PROT_EXECUTE;
678 if (phdr[i].p_flags & PF_W)
679 prot |= VM_PROT_WRITE;
680 if (phdr[i].p_flags & PF_R)
681 prot |= VM_PROT_READ;
683 if ((error = __elfN(load_section)(
688 (caddr_t)phdr[i].p_vaddr + et_dyn_addr,
695 * If this segment contains the program headers,
696 * remember their virtual address for the AT_PHDR
697 * aux entry. Static binaries don't usually include
700 if (phdr[i].p_offset == 0 &&
701 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
703 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
706 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
707 seg_size = round_page(phdr[i].p_memsz +
708 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
711 * Is this .text or .data? We can't use
712 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
713 * alpha terribly and possibly does other bad
714 * things so we stick to the old way of figuring
715 * it out: If the segment contains the program
716 * entry point, it's a text segment, otherwise it
719 * Note that obreak() assumes that data_addr +
720 * data_size == end of data load area, and the ELF
721 * file format expects segments to be sorted by
722 * address. If multiple data segments exist, the
723 * last one will be used.
725 if (hdr->e_entry >= phdr[i].p_vaddr &&
726 hdr->e_entry < (phdr[i].p_vaddr +
728 text_size = seg_size;
729 text_addr = seg_addr;
730 entry = (u_long)hdr->e_entry + et_dyn_addr;
732 data_size = seg_size;
733 data_addr = seg_addr;
735 total_size += seg_size;
738 * Check limits. It should be safe to check the
739 * limits after loading the segment since we do
740 * not actually fault in all the segment's pages.
743 imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur ||
744 text_size > maxtsiz ||
746 imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) {
751 case PT_PHDR: /* Program header table info */
752 proghdr = phdr[i].p_vaddr + et_dyn_addr;
759 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
760 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
761 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
762 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
764 addr = ELF_RTLD_ADDR(vmspace);
766 imgp->entry_addr = entry;
768 imgp->proc->p_sysent = brand_info->sysvec;
769 EVENTHANDLER_INVOKE(process_exec, imgp);
771 if (interp != NULL) {
772 int have_interp = FALSE;
773 if (brand_info->emul_path != NULL &&
774 brand_info->emul_path[0] != '\0') {
775 path = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
776 ksnprintf(path, MAXPATHLEN, "%s%s",
777 brand_info->emul_path, interp);
778 error = __elfN(load_file)(imgp->proc, path, &addr,
784 if (!have_interp && newinterp != NULL) {
785 error = __elfN(load_file)(imgp->proc, newinterp,
786 &addr, &imgp->entry_addr);
791 error = __elfN(load_file)(imgp->proc, interp, &addr,
795 uprintf("ELF interpreter %s not found\n", interp);
802 * Construct auxargs table (used by the fixup routine)
804 elf_auxargs = kmalloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
805 elf_auxargs->execfd = -1;
806 elf_auxargs->phdr = proghdr;
807 elf_auxargs->phent = hdr->e_phentsize;
808 elf_auxargs->phnum = hdr->e_phnum;
809 elf_auxargs->pagesz = PAGE_SIZE;
810 elf_auxargs->base = addr;
811 elf_auxargs->flags = 0;
812 elf_auxargs->entry = entry;
814 imgp->auxargs = elf_auxargs;
815 imgp->interpreted = 0;
816 imgp->proc->p_osrel = osrel;
822 __elfN(dragonfly_fixup)(register_t **stack_base, struct image_params *imgp)
824 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
828 base = (Elf_Addr *)*stack_base;
829 pos = base + (imgp->args->argc + imgp->args->envc + 2);
831 if (args->execfd != -1)
832 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
833 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
834 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
835 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
836 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
837 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
838 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
839 AUXARGS_ENTRY(pos, AT_BASE, args->base);
840 if (imgp->execpathp != 0)
841 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
842 AUXARGS_ENTRY(pos, AT_NULL, 0);
844 kfree(imgp->auxargs, M_TEMP);
845 imgp->auxargs = NULL;
848 suword(base, (long)imgp->args->argc);
849 *stack_base = (register_t *)base;
854 * Code for generating ELF core dumps.
857 typedef int (*segment_callback)(vm_map_entry_t, void *);
859 /* Closure for cb_put_phdr(). */
860 struct phdr_closure {
861 Elf_Phdr *phdr; /* Program header to fill in (incremented) */
862 Elf_Phdr *phdr_max; /* Pointer bound for error check */
863 Elf_Off offset; /* Offset of segment in core file */
866 /* Closure for cb_size_segment(). */
867 struct sseg_closure {
868 int count; /* Count of writable segments. */
869 size_t vsize; /* Total size of all writable segments. */
872 /* Closure for cb_put_fp(). */
875 struct vn_hdr *vnh_max;
880 typedef struct elf_buf {
886 static void *target_reserve(elf_buf_t target, size_t bytes, int *error);
888 static int cb_put_phdr (vm_map_entry_t, void *);
889 static int cb_size_segment (vm_map_entry_t, void *);
890 static int cb_fpcount_segment(vm_map_entry_t, void *);
891 static int cb_put_fp(vm_map_entry_t, void *);
894 static int each_segment (struct proc *, segment_callback, void *, int);
895 static int __elfN(corehdr)(struct lwp *, int, struct file *, struct ucred *,
897 enum putmode { WRITE, DRYRUN };
898 static int __elfN(puthdr)(struct lwp *, elf_buf_t, int sig, enum putmode,
900 static int elf_putallnotes(struct lwp *, elf_buf_t, int, enum putmode);
901 static int __elfN(putnote)(elf_buf_t, const char *, int, const void *, size_t);
903 static int elf_putsigs(struct lwp *, elf_buf_t);
904 static int elf_puttextvp(struct proc *, elf_buf_t);
905 static int elf_putfiles(struct proc *, elf_buf_t, struct file *);
908 __elfN(coredump)(struct lwp *lp, int sig, struct vnode *vp, off_t limit)
913 if ((error = falloc(NULL, &fp, NULL)) != 0)
915 fsetcred(fp, lp->lwp_proc->p_ucred);
920 fp->f_type = DTYPE_VNODE;
921 fp->f_flag = O_CREAT|O_WRONLY|O_NOFOLLOW;
922 fp->f_ops = &vnode_fileops;
926 error = generic_elf_coredump(lp, sig, fp, limit);
930 fp->f_ops = &badfileops;
937 generic_elf_coredump(struct lwp *lp, int sig, struct file *fp, off_t limit)
939 struct proc *p = lp->lwp_proc;
940 struct ucred *cred = p->p_ucred;
942 struct sseg_closure seginfo;
943 struct elf_buf target;
946 kprintf("can't dump core - null fp\n");
949 * Size the program segments
953 each_segment(p, cb_size_segment, &seginfo, 1);
956 * Calculate the size of the core file header area by making
957 * a dry run of generating it. Nothing is written, but the
958 * size is calculated.
960 bzero(&target, sizeof(target));
961 __elfN(puthdr)(lp, &target, sig, DRYRUN, seginfo.count, fp);
963 if (target.off + seginfo.vsize >= limit)
967 * Allocate memory for building the header, fill it up,
970 target.off_max = target.off;
972 target.buf = kmalloc(target.off_max, M_TEMP, M_WAITOK|M_ZERO);
974 error = __elfN(corehdr)(lp, sig, fp, cred, seginfo.count, &target);
976 /* Write the contents of all of the writable segments. */
982 php = (Elf_Phdr *)(target.buf + sizeof(Elf_Ehdr)) + 1;
983 for (i = 0; i < seginfo.count; i++) {
984 error = fp_write(fp, (caddr_t)php->p_vaddr,
985 php->p_filesz, &nbytes, UIO_USERSPACE);
991 kfree(target.buf, M_TEMP);
997 * A callback for each_segment() to write out the segment's
998 * program header entry.
1001 cb_put_phdr(vm_map_entry_t entry, void *closure)
1003 struct phdr_closure *phc = closure;
1004 Elf_Phdr *phdr = phc->phdr;
1006 if (phc->phdr == phc->phdr_max)
1009 phc->offset = round_page(phc->offset);
1011 phdr->p_type = PT_LOAD;
1012 phdr->p_offset = phc->offset;
1013 phdr->p_vaddr = entry->start;
1015 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1016 phdr->p_align = PAGE_SIZE;
1018 if (entry->protection & VM_PROT_READ)
1019 phdr->p_flags |= PF_R;
1020 if (entry->protection & VM_PROT_WRITE)
1021 phdr->p_flags |= PF_W;
1022 if (entry->protection & VM_PROT_EXECUTE)
1023 phdr->p_flags |= PF_X;
1025 phc->offset += phdr->p_filesz;
1031 * A callback for each_writable_segment() to gather information about
1032 * the number of segments and their total size.
1035 cb_size_segment(vm_map_entry_t entry, void *closure)
1037 struct sseg_closure *ssc = closure;
1040 ssc->vsize += entry->end - entry->start;
1045 * A callback for each_segment() to gather information about
1046 * the number of text segments.
1049 cb_fpcount_segment(vm_map_entry_t entry, void *closure)
1051 int *count = closure;
1054 if (entry->object.vm_object->type == OBJT_VNODE) {
1055 vp = (struct vnode *)entry->object.vm_object->handle;
1056 if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp)
1064 cb_put_fp(vm_map_entry_t entry, void *closure)
1066 struct fp_closure *fpc = closure;
1067 struct vn_hdr *vnh = fpc->vnh;
1068 Elf_Phdr *phdr = &vnh->vnh_phdr;
1073 * If an entry represents a vnode then write out a file handle.
1075 * If we are checkpointing a checkpoint-restored program we do
1076 * NOT record the filehandle for the old checkpoint vnode (which
1077 * is mapped all over the place). Instead we rely on the fact
1078 * that a checkpoint-restored program does not mmap() the checkpt
1079 * vnode NOCORE, so its contents will be written out to the
1080 * new checkpoint file. This is necessary because the 'old'
1081 * checkpoint file is typically destroyed when a new one is created
1082 * and thus cannot be used to restore the new checkpoint.
1084 * Theoretically we could create a chain of checkpoint files and
1085 * operate the checkpointing operation kinda like an incremental
1086 * checkpoint, but a checkpoint restore would then likely wind up
1087 * referencing many prior checkpoint files and that is a bit over
1088 * the top for the purpose of the checkpoint API.
1090 if (entry->object.vm_object->type == OBJT_VNODE) {
1091 vp = (struct vnode *)entry->object.vm_object->handle;
1092 if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp)
1094 if (vnh == fpc->vnh_max)
1098 vnh->vnh_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
1099 error = VFS_VPTOFH(vp, &vnh->vnh_fh.fh_fid);
1101 char *freepath, *fullpath;
1103 if (vn_fullpath(curproc, vp, &fullpath, &freepath, 0)) {
1104 kprintf("Warning: coredump, error %d: cannot store file handle for vnode %p\n", error, vp);
1106 kprintf("Warning: coredump, error %d: cannot store file handle for %s\n", error, fullpath);
1107 kfree(freepath, M_TEMP);
1112 phdr->p_type = PT_LOAD;
1113 phdr->p_offset = 0; /* not written to core */
1114 phdr->p_vaddr = entry->start;
1116 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1117 phdr->p_align = PAGE_SIZE;
1119 if (entry->protection & VM_PROT_READ)
1120 phdr->p_flags |= PF_R;
1121 if (entry->protection & VM_PROT_WRITE)
1122 phdr->p_flags |= PF_W;
1123 if (entry->protection & VM_PROT_EXECUTE)
1124 phdr->p_flags |= PF_X;
1132 * For each writable segment in the process's memory map, call the given
1133 * function with a pointer to the map entry and some arbitrary
1134 * caller-supplied data.
1137 each_segment(struct proc *p, segment_callback func, void *closure, int writable)
1140 vm_map_t map = &p->p_vmspace->vm_map;
1141 vm_map_entry_t entry;
1143 for (entry = map->header.next; error == 0 && entry != &map->header;
1144 entry = entry->next) {
1148 * Don't dump inaccessible mappings, deal with legacy
1151 * Note that read-only segments related to the elf binary
1152 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1153 * need to arbitrarily ignore such segments.
1155 if (elf_legacy_coredump) {
1156 if (writable && (entry->protection & VM_PROT_RW) != VM_PROT_RW)
1159 if (writable && (entry->protection & VM_PROT_ALL) == 0)
1164 * Dont include memory segment in the coredump if
1165 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1168 * Currently we only dump normal VM object maps. We do
1169 * not dump submaps or virtual page tables.
1171 if (writable && (entry->eflags & MAP_ENTRY_NOCOREDUMP))
1173 if (entry->maptype != VM_MAPTYPE_NORMAL)
1175 if ((obj = entry->object.vm_object) == NULL)
1178 /* Find the deepest backing object. */
1179 while (obj->backing_object != NULL)
1180 obj = obj->backing_object;
1182 /* Ignore memory-mapped devices and such things. */
1183 if (obj->type != OBJT_DEFAULT &&
1184 obj->type != OBJT_SWAP &&
1185 obj->type != OBJT_VNODE)
1188 error = (*func)(entry, closure);
1195 target_reserve(elf_buf_t target, size_t bytes, int *error)
1200 if (target->off + bytes > target->off_max)
1203 res = target->buf + target->off;
1205 target->off += bytes;
1210 * Write the core file header to the file, including padding up to
1211 * the page boundary.
1214 __elfN(corehdr)(struct lwp *lp, int sig, struct file *fp, struct ucred *cred,
1215 int numsegs, elf_buf_t target)
1221 * Fill in the header. The fp is passed so we can detect and flag
1222 * a checkpoint file pointer within the core file itself, because
1223 * it may not be restored from the same file handle.
1225 error = __elfN(puthdr)(lp, target, sig, WRITE, numsegs, fp);
1227 /* Write it to the core file. */
1229 error = fp_write(fp, target->buf, target->off, &nbytes,
1236 __elfN(puthdr)(struct lwp *lp, elf_buf_t target, int sig, enum putmode mode,
1237 int numsegs, struct file *fp)
1239 struct proc *p = lp->lwp_proc;
1247 ehdr = target_reserve(target, sizeof(Elf_Ehdr), &error);
1249 phoff = target->off;
1250 phdr = target_reserve(target, (numsegs + 1) * sizeof(Elf_Phdr), &error);
1252 noteoff = target->off;
1254 elf_putallnotes(lp, target, sig, mode);
1255 notesz = target->off - noteoff;
1258 * put extra cruft for dumping process state here
1259 * - we really want it be before all the program
1261 * - we just need to update the offset accordingly
1262 * and GDB will be none the wiser.
1265 error = elf_puttextvp(p, target);
1267 error = elf_putsigs(lp, target);
1269 error = elf_putfiles(p, target, fp);
1272 * Align up to a page boundary for the program segments. The
1273 * actual data will be written to the outptu file, not to elf_buf_t,
1274 * so we do not have to do any further bounds checking.
1276 target->off = round_page(target->off);
1277 if (error == 0 && ehdr != NULL) {
1279 * Fill in the ELF header.
1281 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1282 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1283 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1284 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1285 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1286 ehdr->e_ident[EI_DATA] = ELF_DATA;
1287 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1288 ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
1289 ehdr->e_ident[EI_ABIVERSION] = 0;
1290 ehdr->e_ident[EI_PAD] = 0;
1291 ehdr->e_type = ET_CORE;
1292 ehdr->e_machine = ELF_ARCH;
1293 ehdr->e_version = EV_CURRENT;
1295 ehdr->e_phoff = phoff;
1297 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1298 ehdr->e_phentsize = sizeof(Elf_Phdr);
1299 ehdr->e_phnum = numsegs + 1;
1300 ehdr->e_shentsize = sizeof(Elf_Shdr);
1302 ehdr->e_shstrndx = SHN_UNDEF;
1304 if (error == 0 && phdr != NULL) {
1306 * Fill in the program header entries.
1308 struct phdr_closure phc;
1310 /* The note segement. */
1311 phdr->p_type = PT_NOTE;
1312 phdr->p_offset = noteoff;
1315 phdr->p_filesz = notesz;
1321 /* All the writable segments from the program. */
1323 phc.phdr_max = phdr + numsegs;
1324 phc.offset = target->off;
1325 each_segment(p, cb_put_phdr, &phc, 1);
1331 * Append core dump notes to target ELF buffer or simply update target size
1332 * if dryrun selected.
1335 elf_putallnotes(struct lwp *corelp, elf_buf_t target, int sig,
1338 struct proc *p = corelp->lwp_proc;
1342 prfpregset_t fpregs;
1346 prfpregset_t *fpregs;
1351 * Allocate temporary storage for notes on heap to avoid stack overflow.
1353 if (mode != DRYRUN) {
1354 tmpdata = kmalloc(sizeof(*tmpdata), M_TEMP, M_ZERO | M_WAITOK);
1355 status = &tmpdata->status;
1356 fpregs = &tmpdata->fpregs;
1357 psinfo = &tmpdata->psinfo;
1366 * Append LWP-agnostic note.
1368 if (mode != DRYRUN) {
1369 psinfo->pr_version = PRPSINFO_VERSION;
1370 psinfo->pr_psinfosz = sizeof(prpsinfo_t);
1371 strlcpy(psinfo->pr_fname, p->p_comm,
1372 sizeof(psinfo->pr_fname));
1374 * XXX - We don't fill in the command line arguments
1377 strlcpy(psinfo->pr_psargs, p->p_comm,
1378 sizeof(psinfo->pr_psargs));
1381 __elfN(putnote)(target, "CORE", NT_PRPSINFO, psinfo, sizeof *psinfo);
1386 * Append first note for LWP that triggered core so that it is
1387 * the selected one when the debugger starts.
1389 if (mode != DRYRUN) {
1390 status->pr_version = PRSTATUS_VERSION;
1391 status->pr_statussz = sizeof(prstatus_t);
1392 status->pr_gregsetsz = sizeof(gregset_t);
1393 status->pr_fpregsetsz = sizeof(fpregset_t);
1394 status->pr_osreldate = osreldate;
1395 status->pr_cursig = sig;
1397 * XXX GDB needs unique pr_pid for each LWP and does not
1398 * not support pr_pid==0 but lwp_tid can be 0, so hack unique
1401 status->pr_pid = corelp->lwp_tid;
1402 fill_regs(corelp, &status->pr_reg);
1403 fill_fpregs(corelp, fpregs);
1406 __elfN(putnote)(target, "CORE", NT_PRSTATUS, status, sizeof *status);
1410 __elfN(putnote)(target, "CORE", NT_FPREGSET, fpregs, sizeof *fpregs);
1415 * Then append notes for other LWPs.
1417 FOREACH_LWP_IN_PROC(lp, p) {
1420 /* skip lwps being created */
1421 if (lp->lwp_thread == NULL)
1423 if (mode != DRYRUN) {
1424 status->pr_pid = lp->lwp_tid;
1425 fill_regs(lp, &status->pr_reg);
1426 fill_fpregs(lp, fpregs);
1428 error = __elfN(putnote)(target, "CORE", NT_PRSTATUS,
1429 status, sizeof *status);
1432 error = __elfN(putnote)(target, "CORE", NT_FPREGSET,
1433 fpregs, sizeof *fpregs);
1439 if (tmpdata != NULL)
1440 kfree(tmpdata, M_TEMP);
1445 * Generate a note sub-structure.
1447 * NOTE: 4-byte alignment.
1450 __elfN(putnote)(elf_buf_t target, const char *name, int type,
1451 const void *desc, size_t descsz)
1457 note.n_namesz = strlen(name) + 1;
1458 note.n_descsz = descsz;
1460 dst = target_reserve(target, sizeof(note), &error);
1462 bcopy(¬e, dst, sizeof note);
1463 dst = target_reserve(target, note.n_namesz, &error);
1465 bcopy(name, dst, note.n_namesz);
1466 target->off = roundup2(target->off, sizeof(Elf_Word));
1467 dst = target_reserve(target, note.n_descsz, &error);
1469 bcopy(desc, dst, note.n_descsz);
1470 target->off = roundup2(target->off, sizeof(Elf_Word));
1476 elf_putsigs(struct lwp *lp, elf_buf_t target)
1478 /* XXX lwp handle more than one lwp */
1479 struct proc *p = lp->lwp_proc;
1481 struct ckpt_siginfo *csi;
1483 csi = target_reserve(target, sizeof(struct ckpt_siginfo), &error);
1485 csi->csi_ckptpisz = sizeof(struct ckpt_siginfo);
1486 bcopy(p->p_sigacts, &csi->csi_sigacts, sizeof(*p->p_sigacts));
1487 bcopy(&p->p_realtimer, &csi->csi_itimerval, sizeof(struct itimerval));
1488 bcopy(&lp->lwp_sigmask, &csi->csi_sigmask,
1490 csi->csi_sigparent = p->p_sigparent;
1496 elf_putfiles(struct proc *p, elf_buf_t target, struct file *ckfp)
1500 struct ckpt_filehdr *cfh = NULL;
1501 struct ckpt_fileinfo *cfi;
1505 * the duplicated loop is gross, but it was the only way
1506 * to eliminate uninitialized variable warnings
1508 cfh = target_reserve(target, sizeof(struct ckpt_filehdr), &error);
1510 cfh->cfh_nfiles = 0;
1514 * ignore STDIN/STDERR/STDOUT.
1516 for (i = 3; error == 0 && i < p->p_fd->fd_nfiles; i++) {
1517 fp = holdfp(p->p_fd, i, -1);
1521 * XXX Only checkpoint vnodes for now.
1523 if (fp->f_type != DTYPE_VNODE) {
1527 cfi = target_reserve(target, sizeof(struct ckpt_fileinfo),
1533 cfi->cfi_index = -1;
1534 cfi->cfi_type = fp->f_type;
1535 cfi->cfi_flags = fp->f_flag;
1536 cfi->cfi_offset = fp->f_offset;
1537 cfi->cfi_ckflags = 0;
1540 cfi->cfi_ckflags |= CKFIF_ISCKPTFD;
1541 /* f_count and f_msgcount should not be saved/restored */
1542 /* XXX save cred info */
1544 switch(fp->f_type) {
1546 vp = (struct vnode *)fp->f_data;
1548 * it looks like a bug in ptrace is marking
1549 * a non-vnode as a vnode - until we find the
1550 * root cause this will at least prevent
1551 * further panics from truss
1553 if (vp == NULL || vp->v_mount == NULL)
1557 cfi->cfi_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
1558 error = VFS_VPTOFH(vp, &cfi->cfi_fh.fh_fid);
1569 elf_puttextvp(struct proc *p, elf_buf_t target)
1573 struct fp_closure fpc;
1574 struct ckpt_vminfo *vminfo;
1576 vminfo = target_reserve(target, sizeof(struct ckpt_vminfo), &error);
1577 if (vminfo != NULL) {
1578 vminfo->cvm_dsize = p->p_vmspace->vm_dsize;
1579 vminfo->cvm_tsize = p->p_vmspace->vm_tsize;
1580 vminfo->cvm_daddr = p->p_vmspace->vm_daddr;
1581 vminfo->cvm_taddr = p->p_vmspace->vm_taddr;
1585 vn_count = target_reserve(target, sizeof(int), &error);
1586 if (target->buf != NULL) {
1587 fpc.vnh = (struct vn_hdr *)(target->buf + target->off);
1588 fpc.vnh_max = fpc.vnh +
1589 (target->off_max - target->off) / sizeof(struct vn_hdr);
1590 error = each_segment(p, cb_put_fp, &fpc, 0);
1592 *vn_count = fpc.count;
1594 error = each_segment(p, cb_fpcount_segment, &fpc.count, 0);
1596 target->off += fpc.count * sizeof(struct vn_hdr);
1601 * Try to find the appropriate ABI-note section for checknote,
1602 * fetch the osreldate for binary from the ELF OSABI-note. Only the
1603 * first page of the image is searched, the same as for headers.
1606 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1609 const Elf_Note *note, *note0, *note_end;
1610 const Elf_Phdr *phdr, *pnote;
1611 const Elf_Ehdr *hdr;
1612 const char *note_name;
1616 hdr = (const Elf_Ehdr *)imgp->image_header;
1617 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1619 for (i = 0; i < hdr->e_phnum; i++) {
1620 if (phdr[i].p_type == PT_NOTE) {
1626 if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1627 pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1630 note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1631 note_end = (const Elf_Note *)(imgp->image_header +
1632 pnote->p_offset + pnote->p_filesz);
1633 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1634 if (!aligned(note, Elf32_Addr))
1636 if (note->n_namesz != checknote->hdr.n_namesz ||
1637 note->n_descsz != checknote->hdr.n_descsz ||
1638 note->n_type != checknote->hdr.n_type)
1640 note_name = (const char *)(note + 1);
1641 if (strncmp(checknote->vendor, note_name,
1642 checknote->hdr.n_namesz) != 0)
1646 * Fetch the osreldate for binary
1647 * from the ELF OSABI-note if necessary.
1649 if ((checknote->flags & BN_CAN_FETCH_OSREL) != 0 &&
1651 *osrel = *(const int32_t *) (note_name +
1652 roundup2(checknote->hdr.n_namesz,
1653 sizeof(Elf32_Addr)));
1657 note = (const Elf_Note *)((const char *)(note + 1) +
1658 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1659 roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1666 * Tell kern_execve.c about it, with a little help from the linker.
1668 #if defined(__x86_64__)
1669 static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"};
1670 EXEC_SET_ORDERED(elf64, elf_execsw, SI_ORDER_FIRST);
1671 #else /* i386 assumed */
1672 static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"};
1673 EXEC_SET_ORDERED(elf32, elf_execsw, SI_ORDER_FIRST);