2 * Copyright 1996-1998 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include <sys/param.h>
41 static Elf_Ehdr *get_elf_header (int, const char *);
42 static int convert_prot(int); /* Elf flags -> mmap protection */
43 static int convert_flags(int); /* Elf flags -> mmap flags */
46 * Map a shared object into memory. The "fd" argument is a file descriptor,
47 * which must be open on the object and positioned at its beginning.
48 * The "path" argument is a pathname that is used only for error messages.
50 * The return value is a pointer to a newly-allocated Obj_Entry structure
51 * for the shared object. Returns NULL on failure.
54 map_object(int fd, const char *path, const struct stat *sb)
81 size_t nclear, phsize;
91 hdr = get_elf_header(fd, path);
96 * Scan the program header entries, and save key information.
98 * We expect that the loadable segments are ordered by load address.
100 phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
101 phsize = hdr->e_phnum * sizeof (phdr[0]);
102 phlimit = phdr + hdr->e_phnum;
104 phdyn = phinterp = phtls = NULL;
110 segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
111 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
112 while (phdr < phlimit) {
113 switch (phdr->p_type) {
120 segs[++nsegs] = phdr;
121 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
122 _rtld_error("%s: PT_LOAD segment %d not page-aligned",
129 phdr_vaddr = phdr->p_vaddr;
130 phsize = phdr->p_memsz;
142 stack_flags = phdr->p_flags;
146 relro_page = phdr->p_vaddr;
147 relro_size = phdr->p_memsz;
151 if (phdr->p_offset > PAGE_SIZE ||
152 phdr->p_offset + phdr->p_filesz > PAGE_SIZE)
154 note_start = (Elf_Addr)(char *)hdr + phdr->p_offset;
155 note_end = note_start + phdr->p_filesz;
162 _rtld_error("%s: object is not dynamically-linked", path);
167 _rtld_error("%s: too few PT_LOAD segments", path);
172 * Map the entire address space of the object, to stake out our
173 * contiguous region, and to establish the base address for relocation.
175 base_vaddr = trunc_page(segs[0]->p_vaddr);
176 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
177 mapsize = base_vlimit - base_vaddr;
178 base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL;
180 mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
182 if (mapbase == (caddr_t) -1) {
183 _rtld_error("%s: mmap of entire address space failed: %s",
184 path, rtld_strerror(errno));
187 if (base_addr != NULL && mapbase != base_addr) {
188 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
189 path, base_addr, mapbase);
190 munmap(mapbase, mapsize);
194 for (i = 0; i <= nsegs; i++) {
195 /* Overlay the segment onto the proper region. */
196 data_offset = trunc_page(segs[i]->p_offset);
197 data_vaddr = trunc_page(segs[i]->p_vaddr);
198 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
199 data_addr = mapbase + (data_vaddr - base_vaddr);
200 data_prot = convert_prot(segs[i]->p_flags);
201 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
202 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
203 data_flags, fd, data_offset) == (caddr_t) -1) {
204 _rtld_error("%s: mmap of data failed: %s", path,
205 rtld_strerror(errno));
210 if (segs[i]->p_filesz != segs[i]->p_memsz) {
212 /* Clear any BSS in the last page of the segment. */
213 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
214 clear_addr = mapbase + (clear_vaddr - base_vaddr);
215 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);
217 if ((nclear = data_vlimit - clear_vaddr) > 0) {
218 /* Make sure the end of the segment is writable */
219 if ((data_prot & PROT_WRITE) == 0 && -1 ==
220 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
221 _rtld_error("%s: mprotect failed: %s", path,
222 rtld_strerror(errno));
226 memset(clear_addr, 0, nclear);
229 * reset the data protection back, enable the segment to be
230 * coredumped since we modified it.
232 if ((data_prot & PROT_WRITE) == 0) {
233 madvise(clear_page, PAGE_SIZE, MADV_CORE);
234 mprotect(clear_page, PAGE_SIZE, data_prot);
238 /* Overlay the BSS segment onto the proper region. */
239 bss_vaddr = data_vlimit;
240 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
241 bss_addr = mapbase + (bss_vaddr - base_vaddr);
242 if (bss_vlimit > bss_vaddr) { /* There is something to do */
243 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
244 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
245 _rtld_error("%s: mmap of bss failed: %s", path,
246 rtld_strerror(errno));
252 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
253 (data_vlimit - data_vaddr + data_offset) >=
254 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
255 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
261 obj->dev = sb->st_dev;
262 obj->ino = sb->st_ino;
264 obj->mapbase = mapbase;
265 obj->mapsize = mapsize;
266 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
268 obj->vaddrbase = base_vaddr;
269 obj->relocbase = mapbase - base_vaddr;
270 obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
271 if (hdr->e_entry != 0)
272 obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
273 if (phdr_vaddr != 0) {
274 obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
276 obj->phdr = malloc(phsize);
277 if (obj->phdr == NULL) {
279 _rtld_error("%s: cannot allocate program header", path);
282 memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
283 obj->phdr_alloc = true;
285 obj->phsize = phsize;
286 if (phinterp != NULL)
287 obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
289 tls_dtv_generation++;
290 obj->tlsindex = ++tls_max_index;
291 obj->tlssize = phtls->p_memsz;
292 obj->tlsalign = phtls->p_align;
293 obj->tlsinitsize = phtls->p_filesz;
294 obj->tlsinit = mapbase + phtls->p_vaddr;
296 obj->stack_flags = stack_flags;
298 obj->relro_page = obj->relocbase + trunc_page(relro_page);
299 obj->relro_size = round_page(relro_size);
301 if (note_start < note_end)
302 digest_notes(obj, note_start, note_end);
307 get_elf_header (int fd, const char *path)
315 if ((nbytes = pread(fd, u.buf, PAGE_SIZE, 0)) == -1) {
316 _rtld_error("%s: read error: %s", path, rtld_strerror(errno));
320 /* Make sure the file is valid */
321 if (nbytes < (ssize_t)sizeof(Elf_Ehdr) || !IS_ELF(u.hdr)) {
322 _rtld_error("%s: invalid file format", path);
325 if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS
326 || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) {
327 _rtld_error("%s: unsupported file layout", path);
330 if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT
331 || u.hdr.e_version != EV_CURRENT) {
332 _rtld_error("%s: unsupported file version", path);
335 if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) {
336 _rtld_error("%s: unsupported file type", path);
339 if (u.hdr.e_machine != ELF_TARG_MACH) {
340 _rtld_error("%s: unsupported machine", path);
345 * We rely on the program header being in the first page. This is
346 * not strictly required by the ABI specification, but it seems to
347 * always true in practice. And, it simplifies things considerably.
349 if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) {
351 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path);
354 if (u.hdr.e_phoff + u.hdr.e_phnum * sizeof(Elf_Phdr) > (size_t)nbytes) {
355 _rtld_error("%s: program header too large", path);
363 obj_free(Obj_Entry *obj)
368 free_tls_offset(obj);
369 while (obj->needed != NULL) {
370 Needed_Entry *needed = obj->needed;
371 obj->needed = needed->next;
374 while (!STAILQ_EMPTY(&obj->names)) {
375 Name_Entry *entry = STAILQ_FIRST(&obj->names);
376 STAILQ_REMOVE_HEAD(&obj->names, link);
379 while (!STAILQ_EMPTY(&obj->dldags)) {
380 elm = STAILQ_FIRST(&obj->dldags);
381 STAILQ_REMOVE_HEAD(&obj->dldags, link);
384 while (!STAILQ_EMPTY(&obj->dagmembers)) {
385 elm = STAILQ_FIRST(&obj->dagmembers);
386 STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
391 if (obj->origin_path)
392 free(obj->origin_path);
400 free((void *)obj->phdr);
409 obj = CNEW(Obj_Entry);
410 STAILQ_INIT(&obj->dldags);
411 STAILQ_INIT(&obj->dagmembers);
412 STAILQ_INIT(&obj->names);
417 * Given a set of ELF protection flags, return the corresponding protection
421 convert_prot(int elfflags)
434 convert_flags(int elfflags)
436 int flags = MAP_PRIVATE; /* All mappings are private */
439 * Readonly mappings are marked "MAP_NOCORE", because they can be
440 * reconstructed by a debugger.
442 if (!(elfflags & PF_W))