Commit | Line | Data |
---|---|---|
984263bc | 1 | /*- |
315b8b8b | 2 | * Copyright (c) 2000 David O'Brien |
08d72226 | 3 | * Copyright (c) 1995-1996 Søren Schmidt |
984263bc MD |
4 | * Copyright (c) 1996 Peter Wemm |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions | |
9 | * are met: | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer | |
12 | * in this position and unchanged. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. The name of the author may not be used to endorse or promote products | |
4d9022e3 | 17 | * derived from this software without specific prior written permission |
984263bc MD |
18 | * |
19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
20 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
21 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
22 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
23 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
24 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
28 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | * | |
30 | * $FreeBSD: src/sys/kern/imgact_elf.c,v 1.73.2.13 2002/12/28 19:49:41 dillon Exp $ | |
31 | */ | |
32 | ||
33 | #include <sys/param.h> | |
34 | #include <sys/exec.h> | |
35 | #include <sys/fcntl.h> | |
731100e5 | 36 | #include <sys/file.h> |
984263bc MD |
37 | #include <sys/imgact.h> |
38 | #include <sys/imgact_elf.h> | |
39 | #include <sys/kernel.h> | |
40 | #include <sys/malloc.h> | |
41 | #include <sys/mman.h> | |
dadab5e9 MD |
42 | #include <sys/systm.h> |
43 | #include <sys/proc.h> | |
fad57d0e | 44 | #include <sys/nlookup.h> |
984263bc | 45 | #include <sys/pioctl.h> |
984263bc MD |
46 | #include <sys/procfs.h> |
47 | #include <sys/resourcevar.h> | |
48 | #include <sys/signalvar.h> | |
49 | #include <sys/stat.h> | |
50 | #include <sys/syscall.h> | |
51 | #include <sys/sysctl.h> | |
52 | #include <sys/sysent.h> | |
984263bc | 53 | #include <sys/vnode.h> |
8ba5f7ef | 54 | #include <sys/eventhandler.h> |
984263bc | 55 | |
5c5185ae SG |
56 | #include <cpu/lwbuf.h> |
57 | ||
984263bc MD |
58 | #include <vm/vm.h> |
59 | #include <vm/vm_kern.h> | |
60 | #include <vm/vm_param.h> | |
61 | #include <vm/pmap.h> | |
62 | #include <sys/lock.h> | |
63 | #include <vm/vm_map.h> | |
64 | #include <vm/vm_object.h> | |
65 | #include <vm/vm_extern.h> | |
66 | ||
67 | #include <machine/elf.h> | |
68 | #include <machine/md_var.h> | |
731100e5 MD |
69 | #include <sys/mount.h> |
70 | #include <sys/ckpt.h> | |
984263bc | 71 | |
315b8b8b | 72 | #define OLD_EI_BRAND 8 |
3f7b7260 | 73 | #define truncps(va,ps) rounddown2(va, ps) |
315b8b8b | 74 | #define aligned(a,t) (truncps((u_long)(a), sizeof(t)) == (u_long)(a)) |
984263bc | 75 | |
315b8b8b JM |
76 | static int __elfN(check_header)(const Elf_Ehdr *hdr); |
77 | static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp, | |
78 | const char *interp, int32_t *osrel); | |
79 | static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr, | |
402ed7e1 | 80 | u_long *entry); |
315b8b8b | 81 | static int __elfN(load_section)(struct proc *p, |
984263bc MD |
82 | struct vmspace *vmspace, struct vnode *vp, |
83 | vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz, | |
402ed7e1 | 84 | vm_prot_t prot); |
315b8b8b | 85 | static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp); |
f2000797 JM |
86 | static boolean_t __elfN(bsd_trans_osrel)(const Elf_Note *note, |
87 | int32_t *osrel); | |
315b8b8b JM |
88 | static boolean_t __elfN(check_note)(struct image_params *imgp, |
89 | Elf_Brandnote *checknote, int32_t *osrel); | |
565fefef JM |
90 | static vm_prot_t __elfN(trans_prot)(Elf_Word); |
91 | static Elf_Word __elfN(untrans_prot)(vm_prot_t); | |
9d35f29f JM |
92 | static boolean_t check_PT_NOTE(struct image_params *imgp, |
93 | Elf_Brandnote *checknote, int32_t *osrel, const Elf_Phdr * pnote); | |
99aed3e4 JM |
94 | static boolean_t extract_interpreter(struct image_params *imgp, |
95 | const Elf_Phdr *pinterpreter, char *data); | |
9f95d105 | 96 | static u_long pie_base_hint(struct proc *p); |
984263bc | 97 | |
984263bc | 98 | static int elf_legacy_coredump = 0; |
315b8b8b | 99 | static int __elfN(fallback_brand) = -1; |
9f95d105 | 100 | static int elf_pie_base_mmap = 0; |
1799f7b1 | 101 | #if defined(__x86_64__) |
315b8b8b JM |
102 | SYSCTL_NODE(_kern, OID_AUTO, elf64, CTLFLAG_RW, 0, ""); |
103 | SYSCTL_INT(_debug, OID_AUTO, elf64_legacy_coredump, CTLFLAG_RW, | |
104 | &elf_legacy_coredump, 0, "legacy coredump mode"); | |
105 | SYSCTL_INT(_kern_elf64, OID_AUTO, fallback_brand, CTLFLAG_RW, | |
106 | &elf64_fallback_brand, 0, "ELF64 brand of last resort"); | |
107 | TUNABLE_INT("kern.elf64.fallback_brand", &elf64_fallback_brand); | |
9f95d105 MD |
108 | SYSCTL_INT(_kern_elf64, OID_AUTO, pie_base_mmap, CTLFLAG_RW, |
109 | &elf_pie_base_mmap, 0, | |
110 | "choose a base address for PIE as if it is mapped with mmap()"); | |
111 | TUNABLE_INT("kern.elf64.pie_base_mmap", &elf_pie_base_mmap); | |
315b8b8b JM |
112 | #else /* i386 assumed */ |
113 | SYSCTL_NODE(_kern, OID_AUTO, elf32, CTLFLAG_RW, 0, ""); | |
114 | SYSCTL_INT(_debug, OID_AUTO, elf32_legacy_coredump, CTLFLAG_RW, | |
115 | &elf_legacy_coredump, 0, "legacy coredump mode"); | |
116 | SYSCTL_INT(_kern_elf32, OID_AUTO, fallback_brand, CTLFLAG_RW, | |
117 | &elf32_fallback_brand, 0, "ELF32 brand of last resort"); | |
118 | TUNABLE_INT("kern.elf32.fallback_brand", &elf32_fallback_brand); | |
9f95d105 MD |
119 | SYSCTL_INT(_kern_elf32, OID_AUTO, pie_base_mmap, CTLFLAG_RW, |
120 | &elf_pie_base_mmap, 0, | |
121 | "choose a base address for PIE as if it is mapped with mmap()"); | |
122 | TUNABLE_INT("kern.elf32.pie_base_mmap", &elf_pie_base_mmap); | |
1799f7b1 | 123 | #endif |
984263bc | 124 | |
315b8b8b | 125 | static Elf_Brandinfo *elf_brand_list[MAX_BRANDS]; |
984263bc | 126 | |
315b8b8b | 127 | static const char DRAGONFLY_ABI_VENDOR[] = "DragonFly"; |
8d0415e1 | 128 | |
315b8b8b JM |
129 | Elf_Brandnote __elfN(dragonfly_brandnote) = { |
130 | .hdr.n_namesz = sizeof(DRAGONFLY_ABI_VENDOR), | |
131 | .hdr.n_descsz = sizeof(int32_t), | |
132 | .hdr.n_type = 1, | |
133 | .vendor = DRAGONFLY_ABI_VENDOR, | |
f2000797 JM |
134 | .flags = BN_TRANSLATE_OSREL, |
135 | .trans_osrel = __elfN(bsd_trans_osrel), | |
315b8b8b | 136 | }; |
8d0415e1 | 137 | |
984263bc | 138 | int |
315b8b8b | 139 | __elfN(insert_brand_entry)(Elf_Brandinfo *entry) |
984263bc MD |
140 | { |
141 | int i; | |
142 | ||
315b8b8b | 143 | for (i = 0; i < MAX_BRANDS; i++) { |
984263bc MD |
144 | if (elf_brand_list[i] == NULL) { |
145 | elf_brand_list[i] = entry; | |
146 | break; | |
147 | } | |
148 | } | |
315b8b8b JM |
149 | if (i == MAX_BRANDS) { |
150 | uprintf("WARNING: %s: could not insert brandinfo entry: %p\n", | |
151 | __func__, entry); | |
152 | return (-1); | |
153 | } | |
154 | return (0); | |
984263bc MD |
155 | } |
156 | ||
157 | int | |
315b8b8b | 158 | __elfN(remove_brand_entry)(Elf_Brandinfo *entry) |
984263bc MD |
159 | { |
160 | int i; | |
161 | ||
315b8b8b | 162 | for (i = 0; i < MAX_BRANDS; i++) { |
984263bc MD |
163 | if (elf_brand_list[i] == entry) { |
164 | elf_brand_list[i] = NULL; | |
165 | break; | |
166 | } | |
167 | } | |
168 | if (i == MAX_BRANDS) | |
315b8b8b JM |
169 | return (-1); |
170 | return (0); | |
984263bc MD |
171 | } |
172 | ||
0e5797fe MD |
173 | /* |
174 | * Check if an elf brand is being used anywhere in the system. | |
175 | * | |
7bd34050 | 176 | * Used by the linux emulation module unloader. This isn't safe from |
0e5797fe MD |
177 | * races. |
178 | */ | |
179 | struct elf_brand_inuse_info { | |
180 | int rval; | |
181 | Elf_Brandinfo *entry; | |
182 | }; | |
183 | ||
184 | static int elf_brand_inuse_callback(struct proc *p, void *data); | |
185 | ||
984263bc | 186 | int |
315b8b8b | 187 | __elfN(brand_inuse)(Elf_Brandinfo *entry) |
984263bc | 188 | { |
0e5797fe | 189 | struct elf_brand_inuse_info info; |
984263bc | 190 | |
0e5797fe MD |
191 | info.rval = FALSE; |
192 | info.entry = entry; | |
586c4308 | 193 | allproc_scan(elf_brand_inuse_callback, &info, 0); |
0e5797fe MD |
194 | return (info.rval); |
195 | } | |
196 | ||
197 | static | |
198 | int | |
199 | elf_brand_inuse_callback(struct proc *p, void *data) | |
200 | { | |
201 | struct elf_brand_inuse_info *info = data; | |
984263bc | 202 | |
0e5797fe MD |
203 | if (p->p_sysent == info->entry->sysvec) { |
204 | info->rval = TRUE; | |
315b8b8b | 205 | return (-1); |
0e5797fe | 206 | } |
315b8b8b | 207 | return (0); |
984263bc MD |
208 | } |
209 | ||
210 | static int | |
315b8b8b | 211 | __elfN(check_header)(const Elf_Ehdr *hdr) |
984263bc | 212 | { |
315b8b8b JM |
213 | Elf_Brandinfo *bi; |
214 | int i; | |
215 | ||
984263bc MD |
216 | if (!IS_ELF(*hdr) || |
217 | hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || | |
218 | hdr->e_ident[EI_DATA] != ELF_TARG_DATA || | |
2cc7c579 DR |
219 | hdr->e_ident[EI_VERSION] != EV_CURRENT || |
220 | hdr->e_phentsize != sizeof(Elf_Phdr) || | |
221 | hdr->e_ehsize != sizeof(Elf_Ehdr) || | |
222 | hdr->e_version != ELF_TARG_VER) | |
315b8b8b | 223 | return (ENOEXEC); |
7c34d798 SS |
224 | |
225 | /* | |
315b8b8b | 226 | * Make sure we have at least one brand for this machine. |
7c34d798 | 227 | */ |
7c34d798 | 228 | |
315b8b8b JM |
229 | for (i = 0; i < MAX_BRANDS; i++) { |
230 | bi = elf_brand_list[i]; | |
231 | if (bi != NULL && bi->machine == hdr->e_machine) | |
7c34d798 | 232 | break; |
7c34d798 | 233 | } |
315b8b8b JM |
234 | if (i == MAX_BRANDS) |
235 | return (ENOEXEC); | |
7c34d798 | 236 | |
315b8b8b | 237 | return (0); |
7c34d798 SS |
238 | } |
239 | ||
984263bc | 240 | static int |
315b8b8b | 241 | __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp, |
57f7b636 MD |
242 | vm_offset_t offset, caddr_t vmaddr, size_t memsz, |
243 | size_t filsz, vm_prot_t prot) | |
984263bc MD |
244 | { |
245 | size_t map_len; | |
246 | vm_offset_t map_addr; | |
247 | int error, rv, cow; | |
a108bf71 | 248 | int count; |
ce94514e | 249 | int shared; |
984263bc MD |
250 | size_t copy_len; |
251 | vm_object_t object; | |
252 | vm_offset_t file_addr; | |
984263bc | 253 | |
7540ab49 | 254 | object = vp->v_object; |
984263bc MD |
255 | error = 0; |
256 | ||
ce94514e MD |
257 | /* |
258 | * In most cases we will be able to use a shared lock on the | |
259 | * object we are inserting into the map. The lock will be | |
260 | * upgraded in situations where new VM pages must be allocated. | |
261 | */ | |
501747bf MD |
262 | vm_object_hold_shared(object); |
263 | shared = 1; | |
b12defdc | 264 | |
984263bc MD |
265 | /* |
266 | * It's necessary to fail if the filsz + offset taken from the | |
267 | * header is greater than the actual file pager object's size. | |
268 | * If we were to allow this, then the vm_map_find() below would | |
269 | * walk right off the end of the file object and into the ether. | |
270 | * | |
271 | * While I'm here, might as well check for something else that | |
272 | * is invalid: filsz cannot be greater than memsz. | |
273 | */ | |
57f7b636 | 274 | if ((off_t)filsz + offset > vp->v_filesize || filsz > memsz) { |
984263bc | 275 | uprintf("elf_load_section: truncated ELF file\n"); |
b12defdc | 276 | vm_object_drop(object); |
984263bc MD |
277 | return (ENOEXEC); |
278 | } | |
279 | ||
280 | map_addr = trunc_page((vm_offset_t)vmaddr); | |
281 | file_addr = trunc_page(offset); | |
282 | ||
283 | /* | |
284 | * We have two choices. We can either clear the data in the last page | |
285 | * of an oversized mapping, or we can start the anon mapping a page | |
286 | * early and copy the initialized data into that first page. We | |
287 | * choose the second.. | |
288 | */ | |
289 | if (memsz > filsz) | |
290 | map_len = trunc_page(offset+filsz) - file_addr; | |
291 | else | |
292 | map_len = round_page(offset+filsz) - file_addr; | |
293 | ||
294 | if (map_len != 0) { | |
b12defdc | 295 | vm_object_reference_locked(object); |
984263bc MD |
296 | |
297 | /* cow flags: don't dump readonly sections in core */ | |
ce94514e MD |
298 | cow = MAP_COPY_ON_WRITE | MAP_PREFAULT; |
299 | if ((prot & VM_PROT_WRITE) == 0) | |
300 | cow |= MAP_DISABLE_COREDUMP; | |
301 | if (shared == 0) | |
302 | cow |= MAP_PREFAULT_RELOCK; | |
984263bc | 303 | |
a108bf71 | 304 | count = vm_map_entry_reserve(MAP_RESERVE_COUNT); |
984263bc | 305 | vm_map_lock(&vmspace->vm_map); |
a108bf71 | 306 | rv = vm_map_insert(&vmspace->vm_map, &count, |
0adbcbd6 | 307 | object, NULL, |
64b5a8a5 | 308 | file_addr, NULL, /* file offset */ |
984263bc MD |
309 | map_addr, /* virtual start */ |
310 | map_addr + map_len,/* virtual end */ | |
1b874851 | 311 | VM_MAPTYPE_NORMAL, |
3091de50 MD |
312 | VM_SUBSYS_IMGACT, |
313 | prot, VM_PROT_ALL, cow); | |
984263bc | 314 | vm_map_unlock(&vmspace->vm_map); |
a108bf71 | 315 | vm_map_entry_release(count); |
ce94514e MD |
316 | |
317 | /* | |
318 | * NOTE: Object must have a hold ref when calling | |
319 | * vm_object_deallocate(). | |
320 | */ | |
984263bc | 321 | if (rv != KERN_SUCCESS) { |
5b329e62 | 322 | vm_object_deallocate_locked(object); |
b12defdc | 323 | vm_object_drop(object); |
315b8b8b | 324 | return (EINVAL); |
984263bc MD |
325 | } |
326 | ||
327 | /* we can stop now if we've covered it all */ | |
328 | if (memsz == filsz) { | |
b12defdc | 329 | vm_object_drop(object); |
315b8b8b | 330 | return (0); |
984263bc MD |
331 | } |
332 | } | |
333 | ||
984263bc MD |
334 | /* |
335 | * We have to get the remaining bit of the file into the first part | |
336 | * of the oversized map segment. This is normally because the .data | |
337 | * segment in the file is extended to provide bss. It's a neat idea | |
338 | * to try and save a page, but it's a pain in the behind to implement. | |
339 | */ | |
340 | copy_len = (offset + filsz) - trunc_page(offset + filsz); | |
341 | map_addr = trunc_page((vm_offset_t)vmaddr + filsz); | |
342 | map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr; | |
343 | ||
344 | /* This had damn well better be true! */ | |
345 | if (map_len != 0) { | |
a108bf71 | 346 | count = vm_map_entry_reserve(MAP_RESERVE_COUNT); |
984263bc | 347 | vm_map_lock(&vmspace->vm_map); |
a108bf71 | 348 | rv = vm_map_insert(&vmspace->vm_map, &count, |
0adbcbd6 | 349 | NULL, NULL, |
64b5a8a5 | 350 | 0, NULL, |
0adbcbd6 MD |
351 | map_addr, |
352 | map_addr + map_len, | |
1b874851 | 353 | VM_MAPTYPE_NORMAL, |
3091de50 MD |
354 | VM_SUBSYS_IMGACT, |
355 | VM_PROT_ALL, VM_PROT_ALL, 0); | |
984263bc | 356 | vm_map_unlock(&vmspace->vm_map); |
a108bf71 | 357 | vm_map_entry_release(count); |
984263bc | 358 | if (rv != KERN_SUCCESS) { |
b12defdc | 359 | vm_object_drop(object); |
315b8b8b | 360 | return (EINVAL); |
984263bc MD |
361 | } |
362 | } | |
363 | ||
364 | if (copy_len != 0) { | |
5c5185ae | 365 | struct lwbuf *lwb; |
7a683a24 | 366 | struct lwbuf lwb_cache; |
ce94514e | 367 | vm_page_t m; |
18b3f457 MD |
368 | |
369 | m = vm_fault_object_page(object, trunc_page(offset + filsz), | |
501747bf | 370 | VM_PROT_READ, 0, &shared, &error); |
2734d278 | 371 | vm_object_drop(object); |
18b3f457 | 372 | if (m) { |
7a683a24 | 373 | lwb = lwbuf_alloc(m, &lwb_cache); |
5c5185ae | 374 | error = copyout((caddr_t)lwbuf_kva(lwb), |
18b3f457 | 375 | (caddr_t)map_addr, copy_len); |
5c5185ae | 376 | lwbuf_free(lwb); |
18b3f457 | 377 | vm_page_unhold(m); |
984263bc | 378 | } |
2734d278 MD |
379 | } else { |
380 | vm_object_drop(object); | |
984263bc MD |
381 | } |
382 | ||
383 | /* | |
384 | * set it to the specified protection | |
385 | */ | |
2734d278 MD |
386 | if (error == 0) { |
387 | vm_map_protect(&vmspace->vm_map, | |
388 | map_addr, map_addr + map_len, | |
389 | prot, FALSE); | |
390 | } | |
315b8b8b | 391 | return (error); |
984263bc MD |
392 | } |
393 | ||
394 | /* | |
395 | * Load the file "file" into memory. It may be either a shared object | |
396 | * or an executable. | |
397 | * | |
398 | * The "addr" reference parameter is in/out. On entry, it specifies | |
399 | * the address where a shared object should be loaded. If the file is | |
400 | * an executable, this value is ignored. On exit, "addr" specifies | |
401 | * where the file was actually loaded. | |
402 | * | |
403 | * The "entry" reference parameter is out only. On exit, it specifies | |
404 | * the entry point for the loaded file. | |
405 | */ | |
406 | static int | |
315b8b8b | 407 | __elfN(load_file)(struct proc *p, const char *file, u_long *addr, u_long *entry) |
984263bc MD |
408 | { |
409 | struct { | |
fad57d0e | 410 | struct nlookupdata nd; |
de9bb133 | 411 | struct vattr_lite lva; |
984263bc MD |
412 | struct image_params image_params; |
413 | } *tempdata; | |
414 | const Elf_Ehdr *hdr = NULL; | |
415 | const Elf_Phdr *phdr = NULL; | |
fad57d0e | 416 | struct nlookupdata *nd; |
984263bc | 417 | struct vmspace *vmspace = p->p_vmspace; |
de9bb133 | 418 | struct vattr_lite *lvap; |
984263bc | 419 | struct image_params *imgp; |
246693ac | 420 | struct mount *topmnt; |
984263bc MD |
421 | vm_prot_t prot; |
422 | u_long rbase; | |
423 | u_long base_addr = 0; | |
424 | int error, i, numsegs; | |
425 | ||
efda3bd0 | 426 | tempdata = kmalloc(sizeof(*tempdata), M_TEMP, M_WAITOK); |
984263bc | 427 | nd = &tempdata->nd; |
de9bb133 | 428 | lvap = &tempdata->lva; |
984263bc MD |
429 | imgp = &tempdata->image_params; |
430 | ||
431 | /* | |
432 | * Initialize part of the common data | |
433 | */ | |
434 | imgp->proc = p; | |
de9bb133 | 435 | imgp->lvap = lvap; |
984263bc | 436 | imgp->firstpage = NULL; |
18f40545 | 437 | imgp->image_header = NULL; |
fad57d0e | 438 | imgp->vp = NULL; |
984263bc | 439 | |
fad57d0e MD |
440 | error = nlookup_init(nd, file, UIO_SYSSPACE, NLC_FOLLOW); |
441 | if (error == 0) | |
442 | error = nlookup(nd); | |
443 | if (error == 0) | |
12cdc371 MD |
444 | error = cache_vget(&nd->nl_nch, nd->nl_cred, |
445 | LK_SHARED, &imgp->vp); | |
246693ac | 446 | topmnt = nd->nl_nch.mount; |
fad57d0e MD |
447 | nlookup_done(nd); |
448 | if (error) | |
984263bc | 449 | goto fail; |
984263bc MD |
450 | |
451 | /* | |
452 | * Check permissions, modes, uid, etc on the file, and "open" it. | |
453 | */ | |
246693ac | 454 | error = exec_check_permissions(imgp, topmnt); |
984263bc | 455 | if (error) { |
a11aaa81 | 456 | vn_unlock(imgp->vp); |
984263bc MD |
457 | goto fail; |
458 | } | |
459 | ||
460 | error = exec_map_first_page(imgp); | |
461 | /* | |
462 | * Also make certain that the interpreter stays the same, so set | |
463 | * its VTEXT flag, too. | |
464 | */ | |
465 | if (error == 0) | |
2247fe02 | 466 | vsetflags(imgp->vp, VTEXT); |
a11aaa81 | 467 | vn_unlock(imgp->vp); |
984263bc MD |
468 | if (error) |
469 | goto fail; | |
470 | ||
471 | hdr = (const Elf_Ehdr *)imgp->image_header; | |
315b8b8b | 472 | if ((error = __elfN(check_header)(hdr)) != 0) |
984263bc MD |
473 | goto fail; |
474 | if (hdr->e_type == ET_DYN) | |
475 | rbase = *addr; | |
476 | else if (hdr->e_type == ET_EXEC) | |
477 | rbase = 0; | |
478 | else { | |
479 | error = ENOEXEC; | |
480 | goto fail; | |
481 | } | |
482 | ||
315b8b8b JM |
483 | /* Only support headers that fit within first page for now */ |
484 | /* (multiplication of two Elf_Half fields will not overflow) */ | |
984263bc | 485 | if ((hdr->e_phoff > PAGE_SIZE) || |
2cc7c579 | 486 | (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) { |
984263bc MD |
487 | error = ENOEXEC; |
488 | goto fail; | |
489 | } | |
490 | ||
491 | phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); | |
72453240 JM |
492 | if (!aligned(phdr, Elf_Addr)) { |
493 | error = ENOEXEC; | |
494 | goto fail; | |
495 | } | |
984263bc MD |
496 | |
497 | for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) { | |
c350568d JM |
498 | if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) { |
499 | /* Loadable segment */ | |
565fefef | 500 | prot = __elfN(trans_prot)(phdr[i].p_flags); |
315b8b8b | 501 | error = __elfN(load_section)( |
fad57d0e | 502 | p, vmspace, imgp->vp, |
dadab5e9 MD |
503 | phdr[i].p_offset, |
504 | (caddr_t)phdr[i].p_vaddr + | |
505 | rbase, | |
506 | phdr[i].p_memsz, | |
507 | phdr[i].p_filesz, prot); | |
508 | if (error != 0) | |
984263bc MD |
509 | goto fail; |
510 | /* | |
511 | * Establish the base address if this is the | |
512 | * first segment. | |
513 | */ | |
514 | if (numsegs == 0) | |
515 | base_addr = trunc_page(phdr[i].p_vaddr + rbase); | |
516 | numsegs++; | |
517 | } | |
518 | } | |
519 | *addr = base_addr; | |
315b8b8b | 520 | *entry = (unsigned long)hdr->e_entry + rbase; |
984263bc MD |
521 | |
522 | fail: | |
523 | if (imgp->firstpage) | |
524 | exec_unmap_first_page(imgp); | |
fad57d0e MD |
525 | if (imgp->vp) { |
526 | vrele(imgp->vp); | |
527 | imgp->vp = NULL; | |
528 | } | |
efda3bd0 | 529 | kfree(tempdata, M_TEMP); |
984263bc | 530 | |
315b8b8b | 531 | return (error); |
984263bc MD |
532 | } |
533 | ||
315b8b8b JM |
534 | static Elf_Brandinfo * |
535 | __elfN(get_brandinfo)(struct image_params *imgp, const char *interp, | |
536 | int32_t *osrel) | |
537 | { | |
538 | const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header; | |
539 | Elf_Brandinfo *bi; | |
540 | boolean_t ret; | |
541 | int i; | |
542 | ||
543 | /* We support four types of branding -- (1) the ELF EI_OSABI field | |
544 | * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string | |
545 | * branding within the ELF header, (3) path of the `interp_path' field, | |
546 | * and (4) the ".note.ABI-tag" ELF section. | |
547 | */ | |
548 | ||
549 | /* Look for an ".note.ABI-tag" ELF section */ | |
550 | for (i = 0; i < MAX_BRANDS; i++) { | |
551 | bi = elf_brand_list[i]; | |
552 | ||
553 | if (bi == NULL) | |
554 | continue; | |
555 | if (hdr->e_machine == bi->machine && (bi->flags & | |
556 | (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) { | |
557 | ret = __elfN(check_note)(imgp, bi->brand_note, osrel); | |
558 | if (ret) | |
559 | return (bi); | |
560 | } | |
561 | } | |
562 | ||
563 | /* If the executable has a brand, search for it in the brand list. */ | |
564 | for (i = 0; i < MAX_BRANDS; i++) { | |
565 | bi = elf_brand_list[i]; | |
566 | ||
567 | if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) | |
568 | continue; | |
569 | if (hdr->e_machine == bi->machine && | |
570 | (hdr->e_ident[EI_OSABI] == bi->brand || | |
571 | strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND], | |
572 | bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0)) | |
573 | return (bi); | |
574 | } | |
575 | ||
576 | /* Lacking a known brand, search for a recognized interpreter. */ | |
577 | if (interp != NULL) { | |
578 | for (i = 0; i < MAX_BRANDS; i++) { | |
579 | bi = elf_brand_list[i]; | |
580 | ||
581 | if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) | |
582 | continue; | |
583 | if (hdr->e_machine == bi->machine && | |
584 | strcmp(interp, bi->interp_path) == 0) | |
585 | return (bi); | |
586 | } | |
587 | } | |
984263bc | 588 | |
315b8b8b JM |
589 | /* Lacking a recognized interpreter, try the default brand */ |
590 | for (i = 0; i < MAX_BRANDS; i++) { | |
591 | bi = elf_brand_list[i]; | |
592 | ||
593 | if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY) | |
594 | continue; | |
595 | if (hdr->e_machine == bi->machine && | |
596 | __elfN(fallback_brand) == bi->brand) | |
597 | return (bi); | |
598 | } | |
599 | return (NULL); | |
600 | } | |
08d72226 | 601 | |
984263bc | 602 | static int |
315b8b8b | 603 | __CONCAT(exec_,__elfN(imgact))(struct image_params *imgp) |
984263bc MD |
604 | { |
605 | const Elf_Ehdr *hdr = (const Elf_Ehdr *) imgp->image_header; | |
606 | const Elf_Phdr *phdr; | |
72453240 | 607 | Elf_Auxargs *elf_auxargs; |
984263bc MD |
608 | struct vmspace *vmspace; |
609 | vm_prot_t prot; | |
610 | u_long text_size = 0, data_size = 0, total_size = 0; | |
611 | u_long text_addr = 0, data_addr = 0; | |
612 | u_long seg_size, seg_addr; | |
9f95d105 | 613 | u_long addr, baddr, et_dyn_addr = 0, entry = 0, proghdr = 0; |
315b8b8b | 614 | int32_t osrel = 0; |
72453240 | 615 | int error = 0, i, n; |
99aed3e4 JM |
616 | boolean_t failure; |
617 | char *interp = NULL; | |
618 | const char *newinterp = NULL; | |
315b8b8b | 619 | Elf_Brandinfo *brand_info; |
984263bc MD |
620 | char *path; |
621 | ||
622 | /* | |
623 | * Do we have a valid ELF header ? | |
315b8b8b JM |
624 | * |
625 | * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later if a particular | |
626 | * brand doesn't support it. Both DragonFly platforms do by default. | |
984263bc | 627 | */ |
315b8b8b JM |
628 | if (__elfN(check_header)(hdr) != 0 || |
629 | (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN)) | |
630 | return (-1); | |
984263bc MD |
631 | |
632 | /* | |
633 | * From here on down, we return an errno, not -1, as we've | |
634 | * detected an ELF file. | |
635 | */ | |
636 | ||
637 | if ((hdr->e_phoff > PAGE_SIZE) || | |
638 | (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) { | |
639 | /* Only support headers in first page for now */ | |
315b8b8b | 640 | return (ENOEXEC); |
984263bc | 641 | } |
72453240 JM |
642 | phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); |
643 | if (!aligned(phdr, Elf_Addr)) | |
644 | return (ENOEXEC); | |
645 | n = 0; | |
646 | baddr = 0; | |
315b8b8b | 647 | for (i = 0; i < hdr->e_phnum; i++) { |
72453240 JM |
648 | if (phdr[i].p_type == PT_LOAD) { |
649 | if (n == 0) | |
650 | baddr = phdr[i].p_vaddr; | |
651 | n++; | |
652 | continue; | |
653 | } | |
654 | if (phdr[i].p_type == PT_INTERP) { | |
99aed3e4 JM |
655 | /* |
656 | * If interp is already defined there are more than | |
657 | * one PT_INTERP program headers present. Take only | |
658 | * the first one and ignore the rest. | |
659 | */ | |
660 | if (interp != NULL) | |
661 | continue; | |
662 | ||
663 | if (phdr[i].p_filesz == 0 || | |
664 | phdr[i].p_filesz > PAGE_SIZE || | |
665 | phdr[i].p_filesz > MAXPATHLEN) | |
72453240 | 666 | return (ENOEXEC); |
99aed3e4 JM |
667 | |
668 | interp = kmalloc(phdr[i].p_filesz, M_TEMP, M_WAITOK); | |
669 | failure = extract_interpreter(imgp, &phdr[i], interp); | |
670 | if (failure) { | |
671 | kfree(interp, M_TEMP); | |
672 | return (ENOEXEC); | |
673 | } | |
72453240 JM |
674 | continue; |
675 | } | |
315b8b8b | 676 | } |
984263bc | 677 | |
315b8b8b JM |
678 | brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel); |
679 | if (brand_info == NULL) { | |
72453240 JM |
680 | uprintf("ELF binary type \"%u\" not known.\n", |
681 | hdr->e_ident[EI_OSABI]); | |
99aed3e4 | 682 | if (interp != NULL) |
9f95d105 | 683 | kfree(interp, M_TEMP); |
72453240 | 684 | return (ENOEXEC); |
315b8b8b | 685 | } |
72453240 | 686 | if (hdr->e_type == ET_DYN) { |
99aed3e4 | 687 | if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) { |
9f95d105 MD |
688 | if (interp != NULL) |
689 | kfree(interp, M_TEMP); | |
72453240 | 690 | return (ENOEXEC); |
9f95d105 | 691 | } |
72453240 | 692 | /* |
9f95d105 MD |
693 | * If p_vaddr field of PT_LOAD program header is zero and type of an |
694 | * executale is ET_DYN, then it must be a position independent | |
695 | * executable (PIE). In this case the system needs to pick a base | |
696 | * address for us. Set et_dyn_addr to non-zero and choose the actual | |
697 | * address when we are ready. | |
72453240 JM |
698 | */ |
699 | if (baddr == 0) | |
9f95d105 MD |
700 | et_dyn_addr = 1; |
701 | } | |
315b8b8b JM |
702 | |
703 | if (interp != NULL && brand_info->interp_newpath != NULL) | |
704 | newinterp = brand_info->interp_newpath; | |
984263bc | 705 | |
29802dbb | 706 | exec_new_vmspace(imgp, NULL); |
984263bc MD |
707 | |
708 | /* | |
709 | * Yeah, I'm paranoid. There is every reason in the world to get | |
710 | * VTEXT now since from here on out, there are places we can have | |
711 | * a context switch. Better safe than sorry; I really don't want | |
712 | * the file to change while it's being loaded. | |
713 | */ | |
5fd012e0 | 714 | vsetflags(imgp->vp, VTEXT); |
984263bc MD |
715 | |
716 | vmspace = imgp->proc->p_vmspace; | |
9f95d105 MD |
717 | /* Choose the base address for dynamic executables if we need to. */ |
718 | if (et_dyn_addr) | |
719 | et_dyn_addr = pie_base_hint(imgp->proc); | |
984263bc MD |
720 | |
721 | for (i = 0; i < hdr->e_phnum; i++) { | |
315b8b8b | 722 | switch (phdr[i].p_type) { |
984263bc | 723 | case PT_LOAD: /* Loadable segment */ |
c350568d JM |
724 | if (phdr[i].p_memsz == 0) |
725 | break; | |
565fefef | 726 | prot = __elfN(trans_prot)(phdr[i].p_flags); |
984263bc | 727 | |
315b8b8b JM |
728 | if ((error = __elfN(load_section)( |
729 | imgp->proc, | |
72453240 JM |
730 | vmspace, |
731 | imgp->vp, | |
315b8b8b | 732 | phdr[i].p_offset, |
72453240 | 733 | (caddr_t)phdr[i].p_vaddr + et_dyn_addr, |
315b8b8b | 734 | phdr[i].p_memsz, |
72453240 | 735 | phdr[i].p_filesz, |
99aed3e4 JM |
736 | prot)) != 0) { |
737 | if (interp != NULL) | |
738 | kfree (interp, M_TEMP); | |
315b8b8b | 739 | return (error); |
99aed3e4 | 740 | } |
984263bc | 741 | |
22cc9971 DX |
742 | /* |
743 | * If this segment contains the program headers, | |
744 | * remember their virtual address for the AT_PHDR | |
745 | * aux entry. Static binaries don't usually include | |
746 | * a PT_PHDR entry. | |
747 | */ | |
748 | if (phdr[i].p_offset == 0 && | |
749 | hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize | |
750 | <= phdr[i].p_filesz) | |
72453240 JM |
751 | proghdr = phdr[i].p_vaddr + hdr->e_phoff + |
752 | et_dyn_addr; | |
22cc9971 | 753 | |
72453240 | 754 | seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr); |
984263bc | 755 | seg_size = round_page(phdr[i].p_memsz + |
72453240 | 756 | phdr[i].p_vaddr + et_dyn_addr - seg_addr); |
984263bc MD |
757 | |
758 | /* | |
759 | * Is this .text or .data? We can't use | |
760 | * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the | |
761 | * alpha terribly and possibly does other bad | |
762 | * things so we stick to the old way of figuring | |
763 | * it out: If the segment contains the program | |
764 | * entry point, it's a text segment, otherwise it | |
765 | * is a data segment. | |
766 | * | |
767 | * Note that obreak() assumes that data_addr + | |
768 | * data_size == end of data load area, and the ELF | |
769 | * file format expects segments to be sorted by | |
770 | * address. If multiple data segments exist, the | |
771 | * last one will be used. | |
772 | */ | |
773 | if (hdr->e_entry >= phdr[i].p_vaddr && | |
774 | hdr->e_entry < (phdr[i].p_vaddr + | |
775 | phdr[i].p_memsz)) { | |
776 | text_size = seg_size; | |
777 | text_addr = seg_addr; | |
72453240 | 778 | entry = (u_long)hdr->e_entry + et_dyn_addr; |
984263bc MD |
779 | } else { |
780 | data_size = seg_size; | |
781 | data_addr = seg_addr; | |
782 | } | |
783 | total_size += seg_size; | |
784 | ||
785 | /* | |
786 | * Check limits. It should be safe to check the | |
787 | * limits after loading the segment since we do | |
788 | * not actually fault in all the segment's pages. | |
789 | */ | |
790 | if (data_size > | |
791 | imgp->proc->p_rlimit[RLIMIT_DATA].rlim_cur || | |
792 | text_size > maxtsiz || | |
793 | total_size > | |
794 | imgp->proc->p_rlimit[RLIMIT_VMEM].rlim_cur) { | |
99aed3e4 JM |
795 | if (interp != NULL) |
796 | kfree(interp, M_TEMP); | |
984263bc | 797 | error = ENOMEM; |
99aed3e4 | 798 | return (error); |
984263bc | 799 | } |
8d0415e1 | 800 | break; |
984263bc | 801 | case PT_PHDR: /* Program header table info */ |
72453240 | 802 | proghdr = phdr[i].p_vaddr + et_dyn_addr; |
984263bc MD |
803 | break; |
804 | default: | |
805 | break; | |
806 | } | |
807 | } | |
808 | ||
4b566556 | 809 | vmspace->vm_tsize = text_size; /* in bytes */ |
984263bc | 810 | vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr; |
4b566556 | 811 | vmspace->vm_dsize = data_size; /* in bytes */ |
984263bc MD |
812 | vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr; |
813 | ||
814 | addr = ELF_RTLD_ADDR(vmspace); | |
815 | ||
816 | imgp->entry_addr = entry; | |
817 | ||
315b8b8b | 818 | imgp->proc->p_sysent = brand_info->sysvec; |
984263bc | 819 | |
315b8b8b JM |
820 | if (interp != NULL) { |
821 | int have_interp = FALSE; | |
822 | if (brand_info->emul_path != NULL && | |
823 | brand_info->emul_path[0] != '\0') { | |
824 | path = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); | |
825 | ksnprintf(path, MAXPATHLEN, "%s%s", | |
826 | brand_info->emul_path, interp); | |
827 | error = __elfN(load_file)(imgp->proc, path, &addr, | |
828 | &imgp->entry_addr); | |
829 | kfree(path, M_TEMP); | |
830 | if (error == 0) | |
72453240 | 831 | have_interp = TRUE; |
984263bc | 832 | } |
315b8b8b JM |
833 | if (!have_interp && newinterp != NULL) { |
834 | error = __elfN(load_file)(imgp->proc, newinterp, | |
835 | &addr, &imgp->entry_addr); | |
836 | if (error == 0) | |
72453240 | 837 | have_interp = TRUE; |
8d0415e1 | 838 | } |
315b8b8b JM |
839 | if (!have_interp) { |
840 | error = __elfN(load_file)(imgp->proc, interp, &addr, | |
841 | &imgp->entry_addr); | |
984263bc | 842 | } |
315b8b8b JM |
843 | if (error != 0) { |
844 | uprintf("ELF interpreter %s not found\n", interp); | |
99aed3e4 | 845 | kfree(interp, M_TEMP); |
315b8b8b | 846 | return (error); |
984263bc | 847 | } |
99aed3e4 | 848 | kfree(interp, M_TEMP); |
315b8b8b | 849 | } else |
72453240 | 850 | addr = et_dyn_addr; |
984263bc MD |
851 | |
852 | /* | |
853 | * Construct auxargs table (used by the fixup routine) | |
854 | */ | |
efda3bd0 | 855 | elf_auxargs = kmalloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); |
984263bc MD |
856 | elf_auxargs->execfd = -1; |
857 | elf_auxargs->phdr = proghdr; | |
858 | elf_auxargs->phent = hdr->e_phentsize; | |
859 | elf_auxargs->phnum = hdr->e_phnum; | |
860 | elf_auxargs->pagesz = PAGE_SIZE; | |
861 | elf_auxargs->base = addr; | |
862 | elf_auxargs->flags = 0; | |
863 | elf_auxargs->entry = entry; | |
984263bc MD |
864 | |
865 | imgp->auxargs = elf_auxargs; | |
866 | imgp->interpreted = 0; | |
315b8b8b | 867 | imgp->proc->p_osrel = osrel; |
984263bc | 868 | |
315b8b8b | 869 | return (error); |
984263bc MD |
870 | } |
871 | ||
315b8b8b JM |
872 | int |
873 | __elfN(dragonfly_fixup)(register_t **stack_base, struct image_params *imgp) | |
984263bc MD |
874 | { |
875 | Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs; | |
315b8b8b JM |
876 | Elf_Addr *base; |
877 | Elf_Addr *pos; | |
984263bc | 878 | |
315b8b8b JM |
879 | base = (Elf_Addr *)*stack_base; |
880 | pos = base + (imgp->args->argc + imgp->args->envc + 2); | |
984263bc | 881 | |
315b8b8b | 882 | if (args->execfd != -1) |
984263bc | 883 | AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd); |
984263bc MD |
884 | AUXARGS_ENTRY(pos, AT_PHDR, args->phdr); |
885 | AUXARGS_ENTRY(pos, AT_PHENT, args->phent); | |
886 | AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum); | |
887 | AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz); | |
888 | AUXARGS_ENTRY(pos, AT_FLAGS, args->flags); | |
889 | AUXARGS_ENTRY(pos, AT_ENTRY, args->entry); | |
890 | AUXARGS_ENTRY(pos, AT_BASE, args->base); | |
adc42cf3 JM |
891 | if (imgp->execpathp != 0) |
892 | AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp); | |
88303370 | 893 | AUXARGS_ENTRY(pos, AT_OSRELDATE, osreldate); |
984263bc MD |
894 | AUXARGS_ENTRY(pos, AT_NULL, 0); |
895 | ||
efda3bd0 | 896 | kfree(imgp->auxargs, M_TEMP); |
984263bc MD |
897 | imgp->auxargs = NULL; |
898 | ||
315b8b8b | 899 | base--; |
5947157e | 900 | suword64(base, (long)imgp->args->argc); |
315b8b8b JM |
901 | *stack_base = (register_t *)base; |
902 | return (0); | |
903 | } | |
984263bc MD |
904 | |
905 | /* | |
906 | * Code for generating ELF core dumps. | |
907 | */ | |
908 | ||
72453240 | 909 | typedef int (*segment_callback)(vm_map_entry_t, void *); |
984263bc MD |
910 | |
911 | /* Closure for cb_put_phdr(). */ | |
912 | struct phdr_closure { | |
55e44363 MD |
913 | Elf_Phdr *phdr; /* Program header to fill in (incremented) */ |
914 | Elf_Phdr *phdr_max; /* Pointer bound for error check */ | |
984263bc MD |
915 | Elf_Off offset; /* Offset of segment in core file */ |
916 | }; | |
917 | ||
918 | /* Closure for cb_size_segment(). */ | |
919 | struct sseg_closure { | |
920 | int count; /* Count of writable segments. */ | |
55e44363 | 921 | size_t vsize; /* Total size of all writable segments. */ |
984263bc MD |
922 | }; |
923 | ||
55e44363 | 924 | /* Closure for cb_put_fp(). */ |
731100e5 MD |
925 | struct fp_closure { |
926 | struct vn_hdr *vnh; | |
55e44363 | 927 | struct vn_hdr *vnh_max; |
731100e5 MD |
928 | int count; |
929 | struct stat *sb; | |
930 | }; | |
931 | ||
55e44363 MD |
932 | typedef struct elf_buf { |
933 | char *buf; | |
934 | size_t off; | |
935 | size_t off_max; | |
936 | } *elf_buf_t; | |
731100e5 | 937 | |
55e44363 | 938 | static void *target_reserve(elf_buf_t target, size_t bytes, int *error); |
731100e5 | 939 | |
55e44363 MD |
940 | static int cb_put_phdr (vm_map_entry_t, void *); |
941 | static int cb_size_segment (vm_map_entry_t, void *); | |
942 | static int cb_fpcount_segment(vm_map_entry_t, void *); | |
943 | static int cb_put_fp(vm_map_entry_t, void *); | |
984263bc | 944 | |
731100e5 | 945 | |
55e44363 | 946 | static int each_segment (struct proc *, segment_callback, void *, int); |
315b8b8b | 947 | static int __elfN(corehdr)(struct lwp *, int, struct file *, struct ucred *, |
55e44363 | 948 | int, elf_buf_t); |
d913b2fa | 949 | enum putmode { WRITE, DRYRUN }; |
315b8b8b | 950 | static int __elfN(puthdr)(struct lwp *, elf_buf_t, int sig, enum putmode, |
d913b2fa NT |
951 | int, struct file *); |
952 | static int elf_putallnotes(struct lwp *, elf_buf_t, int, enum putmode); | |
315b8b8b | 953 | static int __elfN(putnote)(elf_buf_t, const char *, int, const void *, size_t); |
731100e5 | 954 | |
7d20a8ff | 955 | static int elf_putsigs(struct lwp *, elf_buf_t); |
55e44363 | 956 | static int elf_puttextvp(struct proc *, elf_buf_t); |
ff7a3478 | 957 | static int elf_putfiles(struct proc *, elf_buf_t, struct file *); |
731100e5 | 958 | |
984263bc | 959 | int |
315b8b8b | 960 | __elfN(coredump)(struct lwp *lp, int sig, struct vnode *vp, off_t limit) |
984263bc | 961 | { |
731100e5 MD |
962 | struct file *fp; |
963 | int error; | |
964 | ||
965 | if ((error = falloc(NULL, &fp, NULL)) != 0) | |
966 | return (error); | |
7d20a8ff | 967 | fsetcred(fp, lp->lwp_proc->p_ucred); |
731100e5 | 968 | |
fad57d0e MD |
969 | /* |
970 | * XXX fixme. | |
971 | */ | |
fbb4eeab | 972 | fp->f_type = DTYPE_VNODE; |
731100e5 | 973 | fp->f_flag = O_CREAT|O_WRONLY|O_NOFOLLOW; |
fad57d0e | 974 | fp->f_ops = &vnode_fileops; |
fbb4eeab | 975 | fp->f_data = vp; |
731100e5 | 976 | |
7d20a8ff | 977 | error = generic_elf_coredump(lp, sig, fp, limit); |
731100e5 | 978 | |
fbb4eeab | 979 | fp->f_type = 0; |
731100e5 MD |
980 | fp->f_flag = 0; |
981 | fp->f_ops = &badfileops; | |
fbb4eeab | 982 | fp->f_data = NULL; |
9f87144f | 983 | fdrop(fp); |
731100e5 MD |
984 | return (error); |
985 | } | |
986 | ||
987 | int | |
7d20a8ff | 988 | generic_elf_coredump(struct lwp *lp, int sig, struct file *fp, off_t limit) |
731100e5 | 989 | { |
7d20a8ff | 990 | struct proc *p = lp->lwp_proc; |
dadab5e9 | 991 | struct ucred *cred = p->p_ucred; |
984263bc MD |
992 | int error = 0; |
993 | struct sseg_closure seginfo; | |
55e44363 | 994 | struct elf_buf target; |
984263bc | 995 | |
731100e5 | 996 | if (!fp) |
6ea70f76 | 997 | kprintf("can't dump core - null fp\n"); |
55e44363 MD |
998 | |
999 | /* | |
1000 | * Size the program segments | |
1001 | */ | |
984263bc | 1002 | seginfo.count = 0; |
55e44363 | 1003 | seginfo.vsize = 0; |
731100e5 | 1004 | each_segment(p, cb_size_segment, &seginfo, 1); |
984263bc MD |
1005 | |
1006 | /* | |
1007 | * Calculate the size of the core file header area by making | |
1008 | * a dry run of generating it. Nothing is written, but the | |
1009 | * size is calculated. | |
1010 | */ | |
55e44363 | 1011 | bzero(&target, sizeof(target)); |
315b8b8b | 1012 | __elfN(puthdr)(lp, &target, sig, DRYRUN, seginfo.count, fp); |
731100e5 | 1013 | |
55e44363 | 1014 | if (target.off + seginfo.vsize >= limit) |
984263bc MD |
1015 | return (EFAULT); |
1016 | ||
1017 | /* | |
1018 | * Allocate memory for building the header, fill it up, | |
1019 | * and write it out. | |
1020 | */ | |
55e44363 MD |
1021 | target.off_max = target.off; |
1022 | target.off = 0; | |
efda3bd0 | 1023 | target.buf = kmalloc(target.off_max, M_TEMP, M_WAITOK|M_ZERO); |
55e44363 | 1024 | |
315b8b8b | 1025 | error = __elfN(corehdr)(lp, sig, fp, cred, seginfo.count, &target); |
984263bc MD |
1026 | |
1027 | /* Write the contents of all of the writable segments. */ | |
1028 | if (error == 0) { | |
1029 | Elf_Phdr *php; | |
984263bc | 1030 | int i; |
a63bf9b6 | 1031 | ssize_t nbytes; |
984263bc | 1032 | |
55e44363 MD |
1033 | php = (Elf_Phdr *)(target.buf + sizeof(Elf_Ehdr)) + 1; |
1034 | for (i = 0; i < seginfo.count; i++) { | |
be7d8f4f | 1035 | error = fp_write(fp, (caddr_t)php->p_vaddr, |
e7440b28 | 1036 | php->p_filesz, &nbytes, UIO_USERSPACE); |
984263bc MD |
1037 | if (error != 0) |
1038 | break; | |
984263bc MD |
1039 | php++; |
1040 | } | |
1041 | } | |
efda3bd0 | 1042 | kfree(target.buf, M_TEMP); |
984263bc | 1043 | |
315b8b8b | 1044 | return (error); |
984263bc MD |
1045 | } |
1046 | ||
1047 | /* | |
731100e5 | 1048 | * A callback for each_segment() to write out the segment's |
984263bc MD |
1049 | * program header entry. |
1050 | */ | |
55e44363 | 1051 | static int |
731100e5 | 1052 | cb_put_phdr(vm_map_entry_t entry, void *closure) |
984263bc | 1053 | { |
55e44363 | 1054 | struct phdr_closure *phc = closure; |
984263bc MD |
1055 | Elf_Phdr *phdr = phc->phdr; |
1056 | ||
55e44363 | 1057 | if (phc->phdr == phc->phdr_max) |
315b8b8b | 1058 | return (EINVAL); |
55e44363 | 1059 | |
984263bc MD |
1060 | phc->offset = round_page(phc->offset); |
1061 | ||
1062 | phdr->p_type = PT_LOAD; | |
1063 | phdr->p_offset = phc->offset; | |
67e7cb85 | 1064 | phdr->p_vaddr = entry->ba.start; |
984263bc | 1065 | phdr->p_paddr = 0; |
67e7cb85 | 1066 | phdr->p_filesz = phdr->p_memsz = entry->ba.end - entry->ba.start; |
984263bc | 1067 | phdr->p_align = PAGE_SIZE; |
565fefef | 1068 | phdr->p_flags = __elfN(untrans_prot)(entry->protection); |
984263bc MD |
1069 | |
1070 | phc->offset += phdr->p_filesz; | |
55e44363 | 1071 | ++phc->phdr; |
315b8b8b | 1072 | return (0); |
984263bc MD |
1073 | } |
1074 | ||
1075 | /* | |
1076 | * A callback for each_writable_segment() to gather information about | |
1077 | * the number of segments and their total size. | |
1078 | */ | |
55e44363 | 1079 | static int |
731100e5 | 1080 | cb_size_segment(vm_map_entry_t entry, void *closure) |
984263bc | 1081 | { |
55e44363 | 1082 | struct sseg_closure *ssc = closure; |
984263bc | 1083 | |
55e44363 | 1084 | ++ssc->count; |
67e7cb85 | 1085 | ssc->vsize += entry->ba.end - entry->ba.start; |
315b8b8b | 1086 | return (0); |
984263bc MD |
1087 | } |
1088 | ||
731100e5 MD |
1089 | /* |
1090 | * A callback for each_segment() to gather information about | |
1091 | * the number of text segments. | |
1092 | */ | |
55e44363 | 1093 | static int |
731100e5 MD |
1094 | cb_fpcount_segment(vm_map_entry_t entry, void *closure) |
1095 | { | |
55e44363 | 1096 | int *count = closure; |
12693083 MD |
1097 | struct vnode *vp; |
1098 | ||
9de48ead MD |
1099 | if (entry->ba.object && entry->ba.object->type == OBJT_VNODE) { |
1100 | vp = (struct vnode *)entry->ba.object->handle; | |
12693083 | 1101 | if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp) |
315b8b8b | 1102 | return (0); |
731100e5 | 1103 | ++*count; |
12693083 | 1104 | } |
315b8b8b | 1105 | return (0); |
731100e5 MD |
1106 | } |
1107 | ||
55e44363 | 1108 | static int |
731100e5 MD |
1109 | cb_put_fp(vm_map_entry_t entry, void *closure) |
1110 | { | |
55e44363 | 1111 | struct fp_closure *fpc = closure; |
731100e5 MD |
1112 | struct vn_hdr *vnh = fpc->vnh; |
1113 | Elf_Phdr *phdr = &vnh->vnh_phdr; | |
1114 | struct vnode *vp; | |
1115 | int error; | |
1116 | ||
12693083 MD |
1117 | /* |
1118 | * If an entry represents a vnode then write out a file handle. | |
1119 | * | |
1120 | * If we are checkpointing a checkpoint-restored program we do | |
1121 | * NOT record the filehandle for the old checkpoint vnode (which | |
1122 | * is mapped all over the place). Instead we rely on the fact | |
1123 | * that a checkpoint-restored program does not mmap() the checkpt | |
1124 | * vnode NOCORE, so its contents will be written out to the | |
4f12bfd3 MD |
1125 | * new checkpoint file. This is necessary because the 'old' |
1126 | * checkpoint file is typically destroyed when a new one is created | |
1127 | * and thus cannot be used to restore the new checkpoint. | |
1128 | * | |
1129 | * Theoretically we could create a chain of checkpoint files and | |
1130 | * operate the checkpointing operation kinda like an incremental | |
1131 | * checkpoint, but a checkpoint restore would then likely wind up | |
1132 | * referencing many prior checkpoint files and that is a bit over | |
1133 | * the top for the purpose of the checkpoint API. | |
12693083 | 1134 | */ |
9de48ead MD |
1135 | if (entry->ba.object && entry->ba.object->type == OBJT_VNODE) { |
1136 | vp = (struct vnode *)entry->ba.object->handle; | |
12693083 | 1137 | if ((vp->v_flag & VCKPT) && curproc->p_textvp == vp) |
315b8b8b | 1138 | return (0); |
55e44363 | 1139 | if (vnh == fpc->vnh_max) |
315b8b8b | 1140 | return (EINVAL); |
55e44363 MD |
1141 | |
1142 | if (vp->v_mount) | |
1143 | vnh->vnh_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid; | |
731100e5 | 1144 | error = VFS_VPTOFH(vp, &vnh->vnh_fh.fh_fid); |
4fe2217f MD |
1145 | if (error) { |
1146 | char *freepath, *fullpath; | |
1147 | ||
8d6a3ef8 MD |
1148 | /* |
1149 | * This is actually a relatively common occurance, | |
1150 | * so don't spew on the console by default. | |
1151 | */ | |
5b4cfb7e | 1152 | if (vn_fullpath(curproc, vp, &fullpath, &freepath, 0)) { |
8d6a3ef8 MD |
1153 | if (bootverbose) |
1154 | kprintf("Warning: coredump, error %d: cannot store file handle for vnode %p\n", error, vp); | |
4fe2217f | 1155 | } else { |
8d6a3ef8 MD |
1156 | if (bootverbose) |
1157 | kprintf("Warning: coredump, error %d: cannot store file handle for %s\n", error, fullpath); | |
efda3bd0 | 1158 | kfree(freepath, M_TEMP); |
4fe2217f MD |
1159 | } |
1160 | error = 0; | |
1161 | } | |
55e44363 | 1162 | |
731100e5 MD |
1163 | phdr->p_type = PT_LOAD; |
1164 | phdr->p_offset = 0; /* not written to core */ | |
67e7cb85 | 1165 | phdr->p_vaddr = entry->ba.start; |
731100e5 | 1166 | phdr->p_paddr = 0; |
67e7cb85 MD |
1167 | phdr->p_filesz = phdr->p_memsz = |
1168 | entry->ba.end - entry->ba.start; | |
731100e5 MD |
1169 | phdr->p_align = PAGE_SIZE; |
1170 | phdr->p_flags = 0; | |
1171 | if (entry->protection & VM_PROT_READ) | |
1172 | phdr->p_flags |= PF_R; | |
1173 | if (entry->protection & VM_PROT_WRITE) | |
1174 | phdr->p_flags |= PF_W; | |
1175 | if (entry->protection & VM_PROT_EXECUTE) | |
1176 | phdr->p_flags |= PF_X; | |
1177 | ++fpc->vnh; | |
1178 | ++fpc->count; | |
1179 | } | |
315b8b8b | 1180 | return (0); |
731100e5 MD |
1181 | } |
1182 | ||
984263bc MD |
1183 | /* |
1184 | * For each writable segment in the process's memory map, call the given | |
1185 | * function with a pointer to the map entry and some arbitrary | |
1186 | * caller-supplied data. | |
1187 | */ | |
55e44363 | 1188 | static int |
731100e5 | 1189 | each_segment(struct proc *p, segment_callback func, void *closure, int writable) |
984263bc | 1190 | { |
55e44363 | 1191 | int error = 0; |
984263bc MD |
1192 | vm_map_t map = &p->p_vmspace->vm_map; |
1193 | vm_map_entry_t entry; | |
1194 | ||
47ec0953 | 1195 | RB_FOREACH(entry, vm_map_rb_tree, &map->rb_root) { |
44293a80 | 1196 | vm_map_backing_t ba; |
984263bc MD |
1197 | vm_object_t obj; |
1198 | ||
1199 | /* | |
1200 | * Don't dump inaccessible mappings, deal with legacy | |
1201 | * coredump mode. | |
1202 | * | |
1203 | * Note that read-only segments related to the elf binary | |
1204 | * are marked MAP_ENTRY_NOCOREDUMP now so we no longer | |
1205 | * need to arbitrarily ignore such segments. | |
1206 | */ | |
1207 | if (elf_legacy_coredump) { | |
731100e5 | 1208 | if (writable && (entry->protection & VM_PROT_RW) != VM_PROT_RW) |
984263bc MD |
1209 | continue; |
1210 | } else { | |
731100e5 | 1211 | if (writable && (entry->protection & VM_PROT_ALL) == 0) |
984263bc MD |
1212 | continue; |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * Dont include memory segment in the coredump if | |
1217 | * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in | |
1b874851 MD |
1218 | * madvise(2). |
1219 | * | |
1220 | * Currently we only dump normal VM object maps. We do | |
1221 | * not dump submaps or virtual page tables. | |
984263bc | 1222 | */ |
1b874851 MD |
1223 | if (writable && (entry->eflags & MAP_ENTRY_NOCOREDUMP)) |
1224 | continue; | |
1225 | if (entry->maptype != VM_MAPTYPE_NORMAL) | |
984263bc | 1226 | continue; |
984263bc | 1227 | |
b12defdc MD |
1228 | /* |
1229 | * Find the bottom-most object, leaving the base object | |
1230 | * and the bottom-most object held (but only one hold | |
1231 | * if they happen to be the same). | |
1232 | */ | |
9de48ead MD |
1233 | ba = &entry->ba; |
1234 | while (ba->backing_ba) | |
1235 | ba = ba->backing_ba; | |
1236 | obj = ba->object; | |
984263bc | 1237 | |
b12defdc MD |
1238 | /* |
1239 | * The callback only applies to default, swap, or vnode | |
1240 | * objects. Other types of objects such as memory-mapped | |
1241 | * devices are ignored. | |
1242 | */ | |
9de48ead MD |
1243 | if (obj) { |
1244 | vm_object_hold_shared(obj); | |
1245 | ||
1246 | if (obj->type == OBJT_DEFAULT || | |
1247 | obj->type == OBJT_SWAP || | |
1248 | obj->type == OBJT_VNODE) { | |
1249 | error = (*func)(entry, closure); | |
1250 | } | |
1251 | vm_object_drop(obj); | |
b12defdc | 1252 | } |
984263bc | 1253 | } |
315b8b8b | 1254 | return (error); |
55e44363 MD |
1255 | } |
1256 | ||
1257 | static | |
1258 | void * | |
1259 | target_reserve(elf_buf_t target, size_t bytes, int *error) | |
1260 | { | |
1261 | void *res = NULL; | |
1262 | ||
1263 | if (target->buf) { | |
1264 | if (target->off + bytes > target->off_max) | |
1265 | *error = EINVAL; | |
1266 | else | |
1267 | res = target->buf + target->off; | |
1268 | } | |
1269 | target->off += bytes; | |
1270 | return (res); | |
984263bc MD |
1271 | } |
1272 | ||
1273 | /* | |
1274 | * Write the core file header to the file, including padding up to | |
1275 | * the page boundary. | |
1276 | */ | |
1277 | static int | |
315b8b8b | 1278 | __elfN(corehdr)(struct lwp *lp, int sig, struct file *fp, struct ucred *cred, |
d913b2fa | 1279 | int numsegs, elf_buf_t target) |
984263bc | 1280 | { |
55e44363 | 1281 | int error; |
a63bf9b6 | 1282 | ssize_t nbytes; |
7d20a8ff | 1283 | |
ff7a3478 MD |
1284 | /* |
1285 | * Fill in the header. The fp is passed so we can detect and flag | |
1286 | * a checkpoint file pointer within the core file itself, because | |
1287 | * it may not be restored from the same file handle. | |
1288 | */ | |
315b8b8b | 1289 | error = __elfN(puthdr)(lp, target, sig, WRITE, numsegs, fp); |
984263bc MD |
1290 | |
1291 | /* Write it to the core file. */ | |
e7440b28 MD |
1292 | if (error == 0) { |
1293 | error = fp_write(fp, target->buf, target->off, &nbytes, | |
1294 | UIO_SYSSPACE); | |
1295 | } | |
315b8b8b | 1296 | return (error); |
984263bc MD |
1297 | } |
1298 | ||
55e44363 | 1299 | static int |
315b8b8b | 1300 | __elfN(puthdr)(struct lwp *lp, elf_buf_t target, int sig, enum putmode mode, |
d913b2fa | 1301 | int numsegs, struct file *fp) |
984263bc | 1302 | { |
7d20a8ff | 1303 | struct proc *p = lp->lwp_proc; |
55e44363 | 1304 | int error = 0; |
984263bc MD |
1305 | size_t phoff; |
1306 | size_t noteoff; | |
1307 | size_t notesz; | |
55e44363 MD |
1308 | Elf_Ehdr *ehdr; |
1309 | Elf_Phdr *phdr; | |
984263bc | 1310 | |
55e44363 MD |
1311 | ehdr = target_reserve(target, sizeof(Elf_Ehdr), &error); |
1312 | ||
1313 | phoff = target->off; | |
1314 | phdr = target_reserve(target, (numsegs + 1) * sizeof(Elf_Phdr), &error); | |
1315 | ||
1316 | noteoff = target->off; | |
d913b2fa NT |
1317 | if (error == 0) |
1318 | elf_putallnotes(lp, target, sig, mode); | |
55e44363 | 1319 | notesz = target->off - noteoff; |
984263bc | 1320 | |
55e44363 MD |
1321 | /* |
1322 | * put extra cruft for dumping process state here | |
1323 | * - we really want it be before all the program | |
1324 | * mappings | |
1325 | * - we just need to update the offset accordingly | |
1326 | * and GDB will be none the wiser. | |
1327 | */ | |
1328 | if (error == 0) | |
1329 | error = elf_puttextvp(p, target); | |
1330 | if (error == 0) | |
7d20a8ff | 1331 | error = elf_putsigs(lp, target); |
55e44363 | 1332 | if (error == 0) |
ff7a3478 | 1333 | error = elf_putfiles(p, target, fp); |
55e44363 MD |
1334 | |
1335 | /* | |
1336 | * Align up to a page boundary for the program segments. The | |
1337 | * actual data will be written to the outptu file, not to elf_buf_t, | |
1338 | * so we do not have to do any further bounds checking. | |
1339 | */ | |
1340 | target->off = round_page(target->off); | |
1341 | if (error == 0 && ehdr != NULL) { | |
984263bc MD |
1342 | /* |
1343 | * Fill in the ELF header. | |
1344 | */ | |
984263bc MD |
1345 | ehdr->e_ident[EI_MAG0] = ELFMAG0; |
1346 | ehdr->e_ident[EI_MAG1] = ELFMAG1; | |
1347 | ehdr->e_ident[EI_MAG2] = ELFMAG2; | |
1348 | ehdr->e_ident[EI_MAG3] = ELFMAG3; | |
1349 | ehdr->e_ident[EI_CLASS] = ELF_CLASS; | |
1350 | ehdr->e_ident[EI_DATA] = ELF_DATA; | |
1351 | ehdr->e_ident[EI_VERSION] = EV_CURRENT; | |
64fce751 | 1352 | ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE; |
984263bc MD |
1353 | ehdr->e_ident[EI_ABIVERSION] = 0; |
1354 | ehdr->e_ident[EI_PAD] = 0; | |
1355 | ehdr->e_type = ET_CORE; | |
1356 | ehdr->e_machine = ELF_ARCH; | |
1357 | ehdr->e_version = EV_CURRENT; | |
1358 | ehdr->e_entry = 0; | |
1359 | ehdr->e_phoff = phoff; | |
1360 | ehdr->e_flags = 0; | |
1361 | ehdr->e_ehsize = sizeof(Elf_Ehdr); | |
1362 | ehdr->e_phentsize = sizeof(Elf_Phdr); | |
1363 | ehdr->e_phnum = numsegs + 1; | |
1364 | ehdr->e_shentsize = sizeof(Elf_Shdr); | |
1365 | ehdr->e_shnum = 0; | |
1366 | ehdr->e_shstrndx = SHN_UNDEF; | |
55e44363 MD |
1367 | } |
1368 | if (error == 0 && phdr != NULL) { | |
984263bc MD |
1369 | /* |
1370 | * Fill in the program header entries. | |
1371 | */ | |
55e44363 | 1372 | struct phdr_closure phc; |
984263bc MD |
1373 | |
1374 | /* The note segement. */ | |
1375 | phdr->p_type = PT_NOTE; | |
1376 | phdr->p_offset = noteoff; | |
1377 | phdr->p_vaddr = 0; | |
1378 | phdr->p_paddr = 0; | |
1379 | phdr->p_filesz = notesz; | |
1380 | phdr->p_memsz = 0; | |
1381 | phdr->p_flags = 0; | |
1382 | phdr->p_align = 0; | |
55e44363 | 1383 | ++phdr; |
984263bc MD |
1384 | |
1385 | /* All the writable segments from the program. */ | |
1386 | phc.phdr = phdr; | |
55e44363 MD |
1387 | phc.phdr_max = phdr + numsegs; |
1388 | phc.offset = target->off; | |
731100e5 | 1389 | each_segment(p, cb_put_phdr, &phc, 1); |
984263bc | 1390 | } |
55e44363 | 1391 | return (error); |
984263bc MD |
1392 | } |
1393 | ||
d913b2fa NT |
1394 | /* |
1395 | * Append core dump notes to target ELF buffer or simply update target size | |
1396 | * if dryrun selected. | |
1397 | */ | |
1398 | static int | |
1399 | elf_putallnotes(struct lwp *corelp, elf_buf_t target, int sig, | |
1400 | enum putmode mode) | |
1401 | { | |
1402 | struct proc *p = corelp->lwp_proc; | |
1403 | int error; | |
1404 | struct { | |
1405 | prstatus_t status; | |
1406 | prfpregset_t fpregs; | |
1407 | prpsinfo_t psinfo; | |
1408 | } *tmpdata; | |
1409 | prstatus_t *status; | |
1410 | prfpregset_t *fpregs; | |
1411 | prpsinfo_t *psinfo; | |
1412 | struct lwp *lp; | |
1413 | ||
1414 | /* | |
1415 | * Allocate temporary storage for notes on heap to avoid stack overflow. | |
1416 | */ | |
1417 | if (mode != DRYRUN) { | |
1418 | tmpdata = kmalloc(sizeof(*tmpdata), M_TEMP, M_ZERO | M_WAITOK); | |
1419 | status = &tmpdata->status; | |
1420 | fpregs = &tmpdata->fpregs; | |
1421 | psinfo = &tmpdata->psinfo; | |
1422 | } else { | |
1423 | tmpdata = NULL; | |
1424 | status = NULL; | |
1425 | fpregs = NULL; | |
1426 | psinfo = NULL; | |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * Append LWP-agnostic note. | |
1431 | */ | |
1432 | if (mode != DRYRUN) { | |
1433 | psinfo->pr_version = PRPSINFO_VERSION; | |
1434 | psinfo->pr_psinfosz = sizeof(prpsinfo_t); | |
315b8b8b JM |
1435 | strlcpy(psinfo->pr_fname, p->p_comm, |
1436 | sizeof(psinfo->pr_fname)); | |
d913b2fa NT |
1437 | /* |
1438 | * XXX - We don't fill in the command line arguments | |
1439 | * properly yet. | |
1440 | */ | |
315b8b8b JM |
1441 | strlcpy(psinfo->pr_psargs, p->p_comm, |
1442 | sizeof(psinfo->pr_psargs)); | |
d913b2fa NT |
1443 | } |
1444 | error = | |
315b8b8b | 1445 | __elfN(putnote)(target, "CORE", NT_PRPSINFO, psinfo, sizeof *psinfo); |
d913b2fa NT |
1446 | if (error) |
1447 | goto exit; | |
1448 | ||
1449 | /* | |
1450 | * Append first note for LWP that triggered core so that it is | |
1451 | * the selected one when the debugger starts. | |
1452 | */ | |
1453 | if (mode != DRYRUN) { | |
1454 | status->pr_version = PRSTATUS_VERSION; | |
1455 | status->pr_statussz = sizeof(prstatus_t); | |
1456 | status->pr_gregsetsz = sizeof(gregset_t); | |
1457 | status->pr_fpregsetsz = sizeof(fpregset_t); | |
1458 | status->pr_osreldate = osreldate; | |
1459 | status->pr_cursig = sig; | |
64fce751 | 1460 | status->pr_pid = corelp->lwp_tid; |
d913b2fa NT |
1461 | fill_regs(corelp, &status->pr_reg); |
1462 | fill_fpregs(corelp, fpregs); | |
1463 | } | |
1464 | error = | |
315b8b8b | 1465 | __elfN(putnote)(target, "CORE", NT_PRSTATUS, status, sizeof *status); |
d913b2fa NT |
1466 | if (error) |
1467 | goto exit; | |
1468 | error = | |
315b8b8b | 1469 | __elfN(putnote)(target, "CORE", NT_FPREGSET, fpregs, sizeof *fpregs); |
d913b2fa NT |
1470 | if (error) |
1471 | goto exit; | |
1472 | ||
1473 | /* | |
1474 | * Then append notes for other LWPs. | |
1475 | */ | |
1476 | FOREACH_LWP_IN_PROC(lp, p) { | |
1477 | if (lp == corelp) | |
1478 | continue; | |
1479 | /* skip lwps being created */ | |
1480 | if (lp->lwp_thread == NULL) | |
1481 | continue; | |
1482 | if (mode != DRYRUN) { | |
64fce751 | 1483 | status->pr_pid = lp->lwp_tid; |
d913b2fa NT |
1484 | fill_regs(lp, &status->pr_reg); |
1485 | fill_fpregs(lp, fpregs); | |
1486 | } | |
315b8b8b | 1487 | error = __elfN(putnote)(target, "CORE", NT_PRSTATUS, |
d913b2fa NT |
1488 | status, sizeof *status); |
1489 | if (error) | |
1490 | goto exit; | |
315b8b8b | 1491 | error = __elfN(putnote)(target, "CORE", NT_FPREGSET, |
d913b2fa NT |
1492 | fpregs, sizeof *fpregs); |
1493 | if (error) | |
1494 | goto exit; | |
1495 | } | |
1496 | ||
1497 | exit: | |
1498 | if (tmpdata != NULL) | |
1499 | kfree(tmpdata, M_TEMP); | |
1500 | return (error); | |
1501 | } | |
1502 | ||
f606e7b4 MD |
1503 | /* |
1504 | * Generate a note sub-structure. | |
1505 | * | |
1506 | * NOTE: 4-byte alignment. | |
1507 | */ | |
55e44363 | 1508 | static int |
315b8b8b | 1509 | __elfN(putnote)(elf_buf_t target, const char *name, int type, |
55e44363 | 1510 | const void *desc, size_t descsz) |
984263bc | 1511 | { |
55e44363 MD |
1512 | int error = 0; |
1513 | char *dst; | |
984263bc MD |
1514 | Elf_Note note; |
1515 | ||
1516 | note.n_namesz = strlen(name) + 1; | |
1517 | note.n_descsz = descsz; | |
1518 | note.n_type = type; | |
55e44363 | 1519 | dst = target_reserve(target, sizeof(note), &error); |
984263bc | 1520 | if (dst != NULL) |
55e44363 MD |
1521 | bcopy(¬e, dst, sizeof note); |
1522 | dst = target_reserve(target, note.n_namesz, &error); | |
984263bc | 1523 | if (dst != NULL) |
55e44363 | 1524 | bcopy(name, dst, note.n_namesz); |
f606e7b4 | 1525 | target->off = roundup2(target->off, sizeof(Elf_Word)); |
55e44363 | 1526 | dst = target_reserve(target, note.n_descsz, &error); |
984263bc | 1527 | if (dst != NULL) |
55e44363 | 1528 | bcopy(desc, dst, note.n_descsz); |
f606e7b4 | 1529 | target->off = roundup2(target->off, sizeof(Elf_Word)); |
315b8b8b | 1530 | return (error); |
984263bc MD |
1531 | } |
1532 | ||
731100e5 | 1533 | |
55e44363 | 1534 | static int |
7d20a8ff | 1535 | elf_putsigs(struct lwp *lp, elf_buf_t target) |
731100e5 | 1536 | { |
7d20a8ff SS |
1537 | /* XXX lwp handle more than one lwp */ |
1538 | struct proc *p = lp->lwp_proc; | |
55e44363 | 1539 | int error = 0; |
731100e5 | 1540 | struct ckpt_siginfo *csi; |
55e44363 MD |
1541 | |
1542 | csi = target_reserve(target, sizeof(struct ckpt_siginfo), &error); | |
1543 | if (csi) { | |
1544 | csi->csi_ckptpisz = sizeof(struct ckpt_siginfo); | |
b1b4e5a6 | 1545 | bcopy(p->p_sigacts, &csi->csi_sigacts, sizeof(*p->p_sigacts)); |
55e44363 | 1546 | bcopy(&p->p_realtimer, &csi->csi_itimerval, sizeof(struct itimerval)); |
7d20a8ff | 1547 | bcopy(&lp->lwp_sigmask, &csi->csi_sigmask, |
08f2f1bb | 1548 | sizeof(sigset_t)); |
55e44363 | 1549 | csi->csi_sigparent = p->p_sigparent; |
731100e5 | 1550 | } |
315b8b8b | 1551 | return (error); |
731100e5 MD |
1552 | } |
1553 | ||
55e44363 | 1554 | static int |
ff7a3478 | 1555 | elf_putfiles(struct proc *p, elf_buf_t target, struct file *ckfp) |
731100e5 | 1556 | { |
35949930 | 1557 | thread_t td = curthread; |
55e44363 MD |
1558 | int error = 0; |
1559 | int i; | |
be7d8f4f | 1560 | struct ckpt_filehdr *cfh = NULL; |
731100e5 MD |
1561 | struct ckpt_fileinfo *cfi; |
1562 | struct file *fp; | |
1563 | struct vnode *vp; | |
35949930 | 1564 | |
731100e5 MD |
1565 | /* |
1566 | * the duplicated loop is gross, but it was the only way | |
1567 | * to eliminate uninitialized variable warnings | |
1568 | */ | |
55e44363 MD |
1569 | cfh = target_reserve(target, sizeof(struct ckpt_filehdr), &error); |
1570 | if (cfh) { | |
731100e5 | 1571 | cfh->cfh_nfiles = 0; |
be7d8f4f | 1572 | } |
be7d8f4f MD |
1573 | |
1574 | /* | |
4f12bfd3 | 1575 | * ignore STDIN/STDERR/STDOUT. |
be7d8f4f | 1576 | */ |
35949930 | 1577 | KKASSERT(td->td_proc == p); |
55e44363 | 1578 | for (i = 3; error == 0 && i < p->p_fd->fd_nfiles; i++) { |
35949930 | 1579 | fp = holdfp(td, i, -1); |
228b401d | 1580 | if (fp == NULL) |
be7d8f4f | 1581 | continue; |
4f12bfd3 MD |
1582 | /* |
1583 | * XXX Only checkpoint vnodes for now. | |
1584 | */ | |
228b401d MD |
1585 | if (fp->f_type != DTYPE_VNODE) { |
1586 | fdrop(fp); | |
be7d8f4f | 1587 | continue; |
228b401d | 1588 | } |
4f12bfd3 MD |
1589 | cfi = target_reserve(target, sizeof(struct ckpt_fileinfo), |
1590 | &error); | |
228b401d MD |
1591 | if (cfi == NULL) { |
1592 | fdrop(fp); | |
4f12bfd3 | 1593 | continue; |
228b401d | 1594 | } |
4f12bfd3 MD |
1595 | cfi->cfi_index = -1; |
1596 | cfi->cfi_type = fp->f_type; | |
1597 | cfi->cfi_flags = fp->f_flag; | |
1598 | cfi->cfi_offset = fp->f_offset; | |
ff7a3478 MD |
1599 | cfi->cfi_ckflags = 0; |
1600 | ||
1601 | if (fp == ckfp) | |
1602 | cfi->cfi_ckflags |= CKFIF_ISCKPTFD; | |
4f12bfd3 MD |
1603 | /* f_count and f_msgcount should not be saved/restored */ |
1604 | /* XXX save cred info */ | |
1605 | ||
1606 | switch(fp->f_type) { | |
1607 | case DTYPE_VNODE: | |
9ce25870 | 1608 | vp = (struct vnode *)fp->f_data; |
55e44363 MD |
1609 | /* |
1610 | * it looks like a bug in ptrace is marking | |
9ce25870 MD |
1611 | * a non-vnode as a vnode - until we find the |
1612 | * root cause this will at least prevent | |
1613 | * further panics from truss | |
1614 | */ | |
55e44363 | 1615 | if (vp == NULL || vp->v_mount == NULL) |
4f12bfd3 | 1616 | break; |
be7d8f4f | 1617 | cfh->cfh_nfiles++; |
be7d8f4f | 1618 | cfi->cfi_index = i; |
be7d8f4f MD |
1619 | cfi->cfi_fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid; |
1620 | error = VFS_VPTOFH(vp, &cfi->cfi_fh.fh_fid); | |
4f12bfd3 MD |
1621 | break; |
1622 | default: | |
1623 | break; | |
731100e5 | 1624 | } |
228b401d | 1625 | fdrop(fp); |
731100e5 | 1626 | } |
315b8b8b | 1627 | return (error); |
731100e5 MD |
1628 | } |
1629 | ||
55e44363 MD |
1630 | static int |
1631 | elf_puttextvp(struct proc *p, elf_buf_t target) | |
731100e5 | 1632 | { |
55e44363 | 1633 | int error = 0; |
731100e5 | 1634 | int *vn_count; |
5a5c6af6 MD |
1635 | struct fp_closure fpc; |
1636 | struct ckpt_vminfo *vminfo; | |
1637 | ||
55e44363 MD |
1638 | vminfo = target_reserve(target, sizeof(struct ckpt_vminfo), &error); |
1639 | if (vminfo != NULL) { | |
4b566556 MD |
1640 | vminfo->cvm_dsize = btoc(p->p_vmspace->vm_dsize); /* pages */ |
1641 | vminfo->cvm_tsize = btoc(p->p_vmspace->vm_tsize); /* pages */ | |
5a5c6af6 MD |
1642 | vminfo->cvm_daddr = p->p_vmspace->vm_daddr; |
1643 | vminfo->cvm_taddr = p->p_vmspace->vm_taddr; | |
731100e5 | 1644 | } |
5a5c6af6 | 1645 | |
731100e5 | 1646 | fpc.count = 0; |
55e44363 MD |
1647 | vn_count = target_reserve(target, sizeof(int), &error); |
1648 | if (target->buf != NULL) { | |
1649 | fpc.vnh = (struct vn_hdr *)(target->buf + target->off); | |
1650 | fpc.vnh_max = fpc.vnh + | |
1651 | (target->off_max - target->off) / sizeof(struct vn_hdr); | |
1652 | error = each_segment(p, cb_put_fp, &fpc, 0); | |
1653 | if (vn_count) | |
1654 | *vn_count = fpc.count; | |
5a5c6af6 | 1655 | } else { |
55e44363 | 1656 | error = each_segment(p, cb_fpcount_segment, &fpc.count, 0); |
5a5c6af6 | 1657 | } |
55e44363 | 1658 | target->off += fpc.count * sizeof(struct vn_hdr); |
315b8b8b | 1659 | return (error); |
731100e5 MD |
1660 | } |
1661 | ||
315b8b8b JM |
1662 | /* |
1663 | * Try to find the appropriate ABI-note section for checknote, | |
9d35f29f | 1664 | * The entire image is searched if necessary, not only the first page. |
315b8b8b JM |
1665 | */ |
1666 | static boolean_t | |
1667 | __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote, | |
1668 | int32_t *osrel) | |
1669 | { | |
9d35f29f | 1670 | boolean_t valid_note_found; |
315b8b8b JM |
1671 | const Elf_Phdr *phdr, *pnote; |
1672 | const Elf_Ehdr *hdr; | |
315b8b8b JM |
1673 | int i; |
1674 | ||
9d35f29f | 1675 | valid_note_found = FALSE; |
315b8b8b JM |
1676 | hdr = (const Elf_Ehdr *)imgp->image_header; |
1677 | phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff); | |
1678 | ||
1679 | for (i = 0; i < hdr->e_phnum; i++) { | |
1680 | if (phdr[i].p_type == PT_NOTE) { | |
1681 | pnote = &phdr[i]; | |
9d35f29f JM |
1682 | valid_note_found = check_PT_NOTE (imgp, checknote, |
1683 | osrel, pnote); | |
1684 | if (valid_note_found) | |
1685 | break; | |
315b8b8b JM |
1686 | } |
1687 | } | |
9d35f29f JM |
1688 | return valid_note_found; |
1689 | } | |
315b8b8b | 1690 | |
d48ef01c MD |
1691 | /* |
1692 | * Be careful not to create new overflow conditions when checking | |
1693 | * for overflow. | |
1694 | */ | |
1695 | static boolean_t | |
1696 | note_overflow(const Elf_Note *note, size_t maxsize) | |
1697 | { | |
1698 | if (sizeof(*note) > maxsize) | |
1699 | return TRUE; | |
1700 | if (note->n_namesz > maxsize - sizeof(*note)) | |
1701 | return TRUE; | |
1702 | return FALSE; | |
1703 | } | |
1704 | ||
1705 | static boolean_t | |
1706 | hdr_overflow(__ElfN(Off) off_beg, __ElfN(Size) size) | |
1707 | { | |
1708 | __ElfN(Off) off_end; | |
1709 | ||
1710 | off_end = off_beg + size; | |
1711 | if (off_end < off_beg) | |
1712 | return TRUE; | |
1713 | return FALSE; | |
1714 | } | |
1715 | ||
9d35f29f JM |
1716 | static boolean_t |
1717 | check_PT_NOTE(struct image_params *imgp, Elf_Brandnote *checknote, | |
d48ef01c | 1718 | int32_t *osrel, const Elf_Phdr * pnote) |
9d35f29f JM |
1719 | { |
1720 | boolean_t limited_to_first_page; | |
1721 | boolean_t found = FALSE; | |
1722 | const Elf_Note *note, *note0, *note_end; | |
1723 | const char *note_name; | |
1724 | __ElfN(Off) noteloc, firstloc; | |
1725 | __ElfN(Size) notesz, firstlen, endbyte; | |
1726 | struct lwbuf *lwb; | |
1727 | struct lwbuf lwb_cache; | |
1728 | const char *page; | |
1729 | char *data = NULL; | |
1730 | int n; | |
1731 | ||
d48ef01c MD |
1732 | if (hdr_overflow(pnote->p_offset, pnote->p_filesz)) |
1733 | return (FALSE); | |
9d35f29f JM |
1734 | notesz = pnote->p_filesz; |
1735 | noteloc = pnote->p_offset; | |
1736 | endbyte = noteloc + notesz; | |
1737 | limited_to_first_page = noteloc < PAGE_SIZE && endbyte < PAGE_SIZE; | |
1738 | ||
1739 | if (limited_to_first_page) { | |
1740 | note = (const Elf_Note *)(imgp->image_header + noteloc); | |
1741 | note_end = (const Elf_Note *)(imgp->image_header + endbyte); | |
1742 | note0 = note; | |
1743 | } else { | |
1744 | firstloc = noteloc & PAGE_MASK; | |
1745 | firstlen = PAGE_SIZE - firstloc; | |
1746 | if (notesz < sizeof(Elf_Note) || notesz > PAGE_SIZE) | |
1747 | return (FALSE); | |
315b8b8b | 1748 | |
9d35f29f JM |
1749 | lwb = &lwb_cache; |
1750 | if (exec_map_page(imgp, noteloc >> PAGE_SHIFT, &lwb, &page)) | |
315b8b8b | 1751 | return (FALSE); |
9d35f29f JM |
1752 | if (firstlen < notesz) { /* crosses page boundary */ |
1753 | data = kmalloc(notesz, M_TEMP, M_WAITOK); | |
1754 | bcopy(page + firstloc, data, firstlen); | |
1755 | ||
1756 | exec_unmap_page(lwb); | |
1757 | lwb = &lwb_cache; | |
1758 | if (exec_map_page(imgp, (noteloc >> PAGE_SHIFT) + 1, | |
1759 | &lwb, &page)) { | |
1760 | kfree(data, M_TEMP); | |
453edd1b | 1761 | return (FALSE); |
9d35f29f JM |
1762 | } |
1763 | bcopy(page, data + firstlen, notesz - firstlen); | |
1764 | note = note0 = (const Elf_Note *)(data); | |
1765 | note_end = (const Elf_Note *)(data + notesz); | |
1766 | } else { | |
1767 | note = note0 = (const Elf_Note *)(page + firstloc); | |
1768 | note_end = (const Elf_Note *)(page + firstloc + | |
1769 | firstlen); | |
1770 | } | |
1771 | } | |
1772 | ||
1773 | for (n = 0; n < 100 && note >= note0 && note < note_end; n++) { | |
1774 | if (!aligned(note, Elf32_Addr)) | |
1775 | break; | |
d48ef01c MD |
1776 | if (note_overflow(note, (const char *)note_end - |
1777 | (const char *)note)) { | |
1778 | break; | |
1779 | } | |
315b8b8b | 1780 | note_name = (const char *)(note + 1); |
315b8b8b | 1781 | |
9d35f29f JM |
1782 | if (note->n_namesz == checknote->hdr.n_namesz |
1783 | && note->n_descsz == checknote->hdr.n_descsz | |
1784 | && note->n_type == checknote->hdr.n_type | |
1785 | && (strncmp(checknote->vendor, note_name, | |
1786 | checknote->hdr.n_namesz) == 0)) { | |
1787 | /* Fetch osreldata from ABI.note-tag */ | |
f2000797 JM |
1788 | if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 && |
1789 | checknote->trans_osrel != NULL) | |
8c26a330 | 1790 | checknote->trans_osrel(note, osrel); |
9d35f29f JM |
1791 | found = TRUE; |
1792 | break; | |
1793 | } | |
315b8b8b JM |
1794 | note = (const Elf_Note *)((const char *)(note + 1) + |
1795 | roundup2(note->n_namesz, sizeof(Elf32_Addr)) + | |
1796 | roundup2(note->n_descsz, sizeof(Elf32_Addr))); | |
1797 | } | |
1798 | ||
9d35f29f JM |
1799 | if (!limited_to_first_page) { |
1800 | if (data != NULL) | |
1801 | kfree(data, M_TEMP); | |
1802 | exec_unmap_page(lwb); | |
1803 | } | |
1804 | return (found); | |
315b8b8b | 1805 | } |
731100e5 | 1806 | |
99aed3e4 JM |
1807 | /* |
1808 | * The interpreter program header may be located beyond the first page, so | |
1809 | * regardless of its location, a copy of the interpreter path is created so | |
1810 | * that it may be safely referenced by the calling function in all case. The | |
1811 | * memory is allocated by calling function, and the copying is done here. | |
1812 | */ | |
1813 | static boolean_t | |
1814 | extract_interpreter(struct image_params *imgp, const Elf_Phdr *pinterpreter, | |
d48ef01c | 1815 | char *data) |
99aed3e4 JM |
1816 | { |
1817 | boolean_t limited_to_first_page; | |
1818 | const boolean_t result_success = FALSE; | |
1819 | const boolean_t result_failure = TRUE; | |
1820 | __ElfN(Off) pathloc, firstloc; | |
1821 | __ElfN(Size) pathsz, firstlen, endbyte; | |
1822 | struct lwbuf *lwb; | |
1823 | struct lwbuf lwb_cache; | |
1824 | const char *page; | |
1825 | ||
d48ef01c MD |
1826 | if (hdr_overflow(pinterpreter->p_offset, pinterpreter->p_filesz)) |
1827 | return (result_failure); | |
99aed3e4 JM |
1828 | pathsz = pinterpreter->p_filesz; |
1829 | pathloc = pinterpreter->p_offset; | |
1830 | endbyte = pathloc + pathsz; | |
1831 | ||
1832 | limited_to_first_page = pathloc < PAGE_SIZE && endbyte < PAGE_SIZE; | |
1833 | if (limited_to_first_page) { | |
1834 | bcopy(imgp->image_header + pathloc, data, pathsz); | |
1835 | return (result_success); | |
1836 | } | |
1837 | ||
1838 | firstloc = pathloc & PAGE_MASK; | |
1839 | firstlen = PAGE_SIZE - firstloc; | |
1840 | ||
1841 | lwb = &lwb_cache; | |
1842 | if (exec_map_page(imgp, pathloc >> PAGE_SHIFT, &lwb, &page)) | |
1843 | return (result_failure); | |
1844 | ||
1845 | if (firstlen < pathsz) { /* crosses page boundary */ | |
1846 | bcopy(page + firstloc, data, firstlen); | |
1847 | ||
1848 | exec_unmap_page(lwb); | |
1849 | lwb = &lwb_cache; | |
1850 | if (exec_map_page(imgp, (pathloc >> PAGE_SHIFT) + 1, &lwb, | |
1851 | &page)) | |
1852 | return (result_failure); | |
1853 | bcopy(page, data + firstlen, pathsz - firstlen); | |
1854 | } else | |
1855 | bcopy(page + firstloc, data, pathsz); | |
1856 | ||
1857 | exec_unmap_page(lwb); | |
1858 | return (result_success); | |
1859 | } | |
9d35f29f | 1860 | |
f2000797 JM |
1861 | static boolean_t |
1862 | __elfN(bsd_trans_osrel)(const Elf_Note *note, int32_t *osrel) | |
1863 | { | |
1864 | uintptr_t p; | |
1865 | ||
1866 | p = (uintptr_t)(note + 1); | |
1867 | p += roundup2(note->n_namesz, sizeof(Elf32_Addr)); | |
1868 | *osrel = *(const int32_t *)(p); | |
1869 | ||
1870 | return (TRUE); | |
1871 | } | |
1872 | ||
984263bc MD |
1873 | /* |
1874 | * Tell kern_execve.c about it, with a little help from the linker. | |
1875 | */ | |
315b8b8b JM |
1876 | #if defined(__x86_64__) |
1877 | static struct execsw elf_execsw = {exec_elf64_imgact, "ELF64"}; | |
1878 | EXEC_SET_ORDERED(elf64, elf_execsw, SI_ORDER_FIRST); | |
1879 | #else /* i386 assumed */ | |
1880 | static struct execsw elf_execsw = {exec_elf32_imgact, "ELF32"}; | |
1881 | EXEC_SET_ORDERED(elf32, elf_execsw, SI_ORDER_FIRST); | |
1882 | #endif | |
565fefef JM |
1883 | |
1884 | static vm_prot_t | |
1885 | __elfN(trans_prot)(Elf_Word flags) | |
1886 | { | |
1887 | vm_prot_t prot; | |
1888 | ||
1889 | prot = 0; | |
1890 | if (flags & PF_X) | |
1891 | prot |= VM_PROT_EXECUTE; | |
1892 | if (flags & PF_W) | |
1893 | prot |= VM_PROT_WRITE; | |
1894 | if (flags & PF_R) | |
1895 | prot |= VM_PROT_READ; | |
1896 | return (prot); | |
1897 | } | |
1898 | ||
1899 | static Elf_Word | |
1900 | __elfN(untrans_prot)(vm_prot_t prot) | |
1901 | { | |
1902 | Elf_Word flags; | |
1903 | ||
1904 | flags = 0; | |
1905 | if (prot & VM_PROT_EXECUTE) | |
1906 | flags |= PF_X; | |
1907 | if (prot & VM_PROT_READ) | |
1908 | flags |= PF_R; | |
1909 | if (prot & VM_PROT_WRITE) | |
1910 | flags |= PF_W; | |
1911 | return (flags); | |
1912 | } | |
9f95d105 MD |
1913 | |
1914 | static u_long | |
1915 | pie_base_hint(struct proc *p) | |
1916 | { | |
1917 | u_long base; | |
1918 | ||
1919 | if (elf_pie_base_mmap) | |
1920 | base = vm_map_hint(p, 0, VM_PROT_READ | VM_PROT_EXECUTE); | |
1921 | else | |
1922 | base = ET_DYN_LOAD_ADDR; | |
1923 | return base; | |
1924 | } |