2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
4 * Copyright 2009, 2010, 2011 Konstantin Belousov <kib@FreeBSD.ORG>.
5 * Copyright 2012 John Marino <draco@marino.st>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Dynamic linker for ELF.
34 * John Polstra <jdp@polstra.com>.
38 #error "GCC is needed to compile this file"
41 #include <sys/param.h>
42 #include <sys/mount.h>
45 #include <sys/sysctl.h>
47 #include <sys/utsname.h>
48 #include <sys/ktrace.h>
49 #include <sys/resident.h>
52 #include <machine/tls.h>
67 #include "rtld_printf.h"
70 #define PATH_RTLD "/usr/libexec/ld-elf.so.2"
71 #define LD_ARY_CACHE 16
74 typedef void (*func_ptr_type)();
75 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
78 * Function declarations.
80 static const char *_getenv_ld(const char *id);
81 static void die(void) __dead2;
82 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
83 const Elf_Dyn **, const Elf_Dyn **);
84 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
86 static void digest_dynamic(Obj_Entry *, int);
87 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
88 static Obj_Entry *dlcheck(void *);
89 static Obj_Entry *dlopen_object(const char *name, Obj_Entry *refobj,
90 int lo_flags, int mode);
91 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
92 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
93 static bool donelist_check(DoneList *, const Obj_Entry *);
94 static void errmsg_restore(char *);
95 static char *errmsg_save(void);
96 static void *fill_search_info(const char *, size_t, void *);
97 static char *find_library(const char *, const Obj_Entry *);
98 static const char *gethints(const Obj_Entry *);
99 static void init_dag(Obj_Entry *);
100 static void init_rtld(caddr_t, Elf_Auxinfo **);
101 static void initlist_add_neededs(Needed_Entry *, Objlist *);
102 static void initlist_add_objects(Obj_Entry *, Obj_Entry **, Objlist *);
103 static bool is_exported(const Elf_Sym *);
104 static void linkmap_add(Obj_Entry *);
105 static void linkmap_delete(Obj_Entry *);
106 static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
107 static void unload_filtees(Obj_Entry *);
108 static int load_needed_objects(Obj_Entry *, int);
109 static int load_preload_objects(void);
110 static Obj_Entry *load_object(const char *, const Obj_Entry *, int);
111 static void map_stacks_exec(RtldLockState *);
112 static Obj_Entry *obj_from_addr(const void *);
113 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
114 static void objlist_call_init(Objlist *, RtldLockState *);
115 static void preinitialize_main_object (void);
116 static void objlist_clear(Objlist *);
117 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
118 static void objlist_init(Objlist *);
119 static void objlist_push_head(Objlist *, Obj_Entry *);
120 static void objlist_push_tail(Objlist *, Obj_Entry *);
121 static void objlist_remove(Objlist *, Obj_Entry *);
122 static void *path_enumerate(const char *, path_enum_proc, void *);
123 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, RtldLockState *);
124 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now,
125 RtldLockState *lockstate);
126 static int rtld_dirname(const char *, char *);
127 static int rtld_dirname_abs(const char *, char *);
128 static void rtld_exit(void);
129 static char *search_library_path(const char *, const char *);
130 static const void **get_program_var_addr(const char *, RtldLockState *);
131 static void set_program_var(const char *, const void *);
132 static int symlook_default(SymLook *, const Obj_Entry *refobj);
133 static int symlook_global(SymLook *, DoneList *);
134 static void symlook_init_from_req(SymLook *, const SymLook *);
135 static int symlook_list(SymLook *, const Objlist *, DoneList *);
136 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
137 static int symlook_obj1(SymLook *, const Obj_Entry *);
138 static int symlook_obj2(SymLook *, const Obj_Entry *);
139 static void trace_loaded_objects(Obj_Entry *);
140 static void unlink_object(Obj_Entry *);
141 static void unload_object(Obj_Entry *);
142 static void unref_dag(Obj_Entry *);
143 static void ref_dag(Obj_Entry *);
144 static int origin_subst_one(char **, const char *, const char *,
145 const char *, char *);
146 static char *origin_subst(const char *, const char *);
147 static int rtld_verify_versions(const Objlist *);
148 static int rtld_verify_object_versions(Obj_Entry *);
149 static void object_add_name(Obj_Entry *, const char *);
150 static int object_match_name(const Obj_Entry *, const char *);
151 static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
152 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
153 struct dl_phdr_info *phdr_info);
154 static uint_fast32_t gnu_hash (const char *);
155 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
156 const unsigned long);
158 void r_debug_state(struct r_debug *, struct link_map *) __noinline;
163 static char *error_message; /* Message for dlerror(), or NULL */
164 struct r_debug r_debug; /* for GDB; */
165 static bool libmap_disable; /* Disable libmap */
166 static bool ld_loadfltr; /* Immediate filters processing */
167 static char *libmap_override; /* Maps to use in addition to libmap.conf */
168 static bool trust; /* False for setuid and setgid programs */
169 static bool dangerous_ld_env; /* True if environment variables have been
170 used to affect the libraries loaded */
171 static const char *ld_bind_now; /* Environment variable for immediate binding */
172 static const char *ld_debug; /* Environment variable for debugging */
173 static const char *ld_library_path; /* Environment variable for search path */
174 static char *ld_preload; /* Environment variable for libraries to
176 static const char *ld_elf_hints_path; /* Environment variable for alternative hints path */
177 static const char *ld_tracing; /* Called from ldd to print libs */
178 static const char *ld_utrace; /* Use utrace() to log events. */
179 static int (*rtld_functrace)( /* Optional function call tracing hook */
180 const char *caller_obj,
181 const char *callee_obj,
182 const char *callee_func,
184 static const Obj_Entry *rtld_functrace_obj; /* Object thereof */
185 static Obj_Entry *obj_list; /* Head of linked list of shared objects */
186 static Obj_Entry **obj_tail; /* Link field of last object in list */
187 static Obj_Entry **preload_tail;
188 static Obj_Entry *obj_main; /* The main program shared object */
189 static Obj_Entry obj_rtld; /* The dynamic linker shared object */
190 static unsigned int obj_count; /* Number of objects in obj_list */
191 static unsigned int obj_loads; /* Number of objects in obj_list */
193 static int ld_resident; /* Non-zero if resident */
194 static const char *ld_ary[LD_ARY_CACHE];
196 static Objlist initlist;
198 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
199 STAILQ_HEAD_INITIALIZER(list_global);
200 static Objlist list_main = /* Objects loaded at program startup */
201 STAILQ_HEAD_INITIALIZER(list_main);
202 static Objlist list_fini = /* Objects needing fini() calls */
203 STAILQ_HEAD_INITIALIZER(list_fini);
205 static Elf_Sym sym_zero; /* For resolving undefined weak refs. */
207 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
209 extern Elf_Dyn _DYNAMIC;
210 #pragma weak _DYNAMIC
211 #ifndef RTLD_IS_DYNAMIC
212 #define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL)
215 #ifdef ENABLE_OSRELDATE
219 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
220 static int max_stack_flags;
223 * These are the functions the dynamic linker exports to application
224 * programs. They are the only symbols the dynamic linker is willing
225 * to export from itself.
227 static func_ptr_type exports[] = {
228 (func_ptr_type) &_rtld_error,
229 (func_ptr_type) &dlclose,
230 (func_ptr_type) &dlerror,
231 (func_ptr_type) &dlopen,
232 (func_ptr_type) &dlfunc,
233 (func_ptr_type) &dlsym,
234 (func_ptr_type) &dlvsym,
235 (func_ptr_type) &dladdr,
236 (func_ptr_type) &dlinfo,
237 (func_ptr_type) &dl_iterate_phdr,
239 (func_ptr_type) &___tls_get_addr,
241 (func_ptr_type) &__tls_get_addr,
242 (func_ptr_type) &__tls_get_addr_tcb,
243 (func_ptr_type) &_rtld_allocate_tls,
244 (func_ptr_type) &_rtld_free_tls,
245 (func_ptr_type) &_rtld_call_init,
246 (func_ptr_type) &_rtld_thread_init,
247 (func_ptr_type) &_rtld_addr_phdr,
248 (func_ptr_type) &_rtld_get_stack_prot,
253 * Global declarations normally provided by crt1. The dynamic linker is
254 * not built with crt1, so we have to provide them ourselves.
260 * Used to pass argc, argv to init functions.
266 * Globals to control TLS allocation.
268 size_t tls_last_offset; /* Static TLS offset of last module */
269 size_t tls_last_size; /* Static TLS size of last module */
270 size_t tls_static_space; /* Static TLS space allocated */
271 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */
272 int tls_max_index = 1; /* Largest module index allocated */
275 * Fill in a DoneList with an allocation large enough to hold all of
276 * the currently-loaded objects. Keep this as a macro since it calls
277 * alloca and we want that to occur within the scope of the caller.
279 #define donelist_init(dlp) \
280 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
281 assert((dlp)->objs != NULL), \
282 (dlp)->num_alloc = obj_count, \
285 #define UTRACE_DLOPEN_START 1
286 #define UTRACE_DLOPEN_STOP 2
287 #define UTRACE_DLCLOSE_START 3
288 #define UTRACE_DLCLOSE_STOP 4
289 #define UTRACE_LOAD_OBJECT 5
290 #define UTRACE_UNLOAD_OBJECT 6
291 #define UTRACE_ADD_RUNDEP 7
292 #define UTRACE_PRELOAD_FINISHED 8
293 #define UTRACE_INIT_CALL 9
294 #define UTRACE_FINI_CALL 10
297 char sig[4]; /* 'RTLD' */
300 void *mapbase; /* Used for 'parent' and 'init/fini' */
302 int refcnt; /* Used for 'mode' */
303 char name[MAXPATHLEN];
306 #define LD_UTRACE(e, h, mb, ms, r, n) do { \
307 if (ld_utrace != NULL) \
308 ld_utrace_log(e, h, mb, ms, r, n); \
312 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
313 int refcnt, const char *name)
315 struct utrace_rtld ut;
323 ut.mapbase = mapbase;
324 ut.mapsize = mapsize;
326 bzero(ut.name, sizeof(ut.name));
328 strlcpy(ut.name, name, sizeof(ut.name));
329 utrace(&ut, sizeof(ut));
333 * Main entry point for dynamic linking. The first argument is the
334 * stack pointer. The stack is expected to be laid out as described
335 * in the SVR4 ABI specification, Intel 386 Processor Supplement.
336 * Specifically, the stack pointer points to a word containing
337 * ARGC. Following that in the stack is a null-terminated sequence
338 * of pointers to argument strings. Then comes a null-terminated
339 * sequence of pointers to environment strings. Finally, there is a
340 * sequence of "auxiliary vector" entries.
342 * The second argument points to a place to store the dynamic linker's
343 * exit procedure pointer and the third to a place to store the main
346 * The return value is the main program's entry point.
349 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
351 Elf_Auxinfo *aux_info[AT_COUNT];
359 Objlist_Entry *entry;
362 /* marino: DO NOT MOVE THESE VARIABLES TO _rtld
363 Obj_Entry **preload_tail;
365 from global to here. It will break the DWARF2 unwind scheme.
366 The system compilers were unaffected, but not gcc 4.6
370 * On entry, the dynamic linker itself has not been relocated yet.
371 * Be very careful not to reference any global data until after
372 * init_rtld has returned. It is OK to reference file-scope statics
373 * and string constants, and to call static and global functions.
376 /* Find the auxiliary vector on the stack. */
379 sp += argc + 1; /* Skip over arguments and NULL terminator */
383 * If we aren't already resident we have to dig out some more info.
384 * Note that auxinfo does not exist when we are resident.
386 * I'm not sure about the ld_resident check. It seems to read zero
387 * prior to relocation, which is what we want. When running from a
388 * resident copy everything will be relocated so we are definitely
391 if (ld_resident == 0) {
392 while (*sp++ != 0) /* Skip over environment, and NULL terminator */
394 aux = (Elf_Auxinfo *) sp;
396 /* Digest the auxiliary vector. */
397 for (i = 0; i < AT_COUNT; i++)
399 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
400 if (auxp->a_type < AT_COUNT)
401 aux_info[auxp->a_type] = auxp;
404 /* Initialize and relocate ourselves. */
405 assert(aux_info[AT_BASE] != NULL);
406 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
409 ld_index = 0; /* don't use old env cache in case we are resident */
410 __progname = obj_rtld.path;
411 argv0 = argv[0] != NULL ? argv[0] : "(null)";
416 trust = !issetugid();
418 ld_bind_now = _getenv_ld("LD_BIND_NOW");
420 * If the process is tainted, then we un-set the dangerous environment
421 * variables. The process will be marked as tainted until setuid(2)
422 * is called. If any child process calls setuid(2) we do not want any
423 * future processes to honor the potentially un-safe variables.
426 if ( unsetenv("LD_DEBUG")
427 || unsetenv("LD_PRELOAD")
428 || unsetenv("LD_LIBRARY_PATH")
429 || unsetenv("LD_ELF_HINTS_PATH")
430 || unsetenv("LD_LIBMAP")
431 || unsetenv("LD_LIBMAP_DISABLE")
432 || unsetenv("LD_LOADFLTR")
434 _rtld_error("environment corrupt; aborting");
438 ld_debug = _getenv_ld("LD_DEBUG");
439 libmap_disable = _getenv_ld("LD_LIBMAP_DISABLE") != NULL;
440 libmap_override = (char *)_getenv_ld("LD_LIBMAP");
441 ld_library_path = _getenv_ld("LD_LIBRARY_PATH");
442 ld_preload = (char *)_getenv_ld("LD_PRELOAD");
443 ld_elf_hints_path = _getenv_ld("LD_ELF_HINTS_PATH");
444 ld_loadfltr = _getenv_ld("LD_LOADFLTR") != NULL;
445 dangerous_ld_env = (ld_library_path != NULL)
446 || (ld_preload != NULL)
447 || (ld_elf_hints_path != NULL)
449 || (libmap_override != NULL)
452 ld_tracing = _getenv_ld("LD_TRACE_LOADED_OBJECTS");
453 ld_utrace = _getenv_ld("LD_UTRACE");
455 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
456 ld_elf_hints_path = _PATH_ELF_HINTS;
458 if (ld_debug != NULL && *ld_debug != '\0')
460 dbg("%s is initialized, base address = %p", __progname,
461 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
462 dbg("RTLD dynamic = %p", obj_rtld.dynamic);
463 dbg("RTLD pltgot = %p", obj_rtld.pltgot);
465 dbg("initializing thread locks");
469 * If we are resident we can skip work that we have already done.
470 * Note that the stack is reset and there is no Elf_Auxinfo
471 * when running from a resident image, and the static globals setup
472 * between here and resident_skip will have already been setup.
478 * Load the main program, or process its program header if it is
481 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */
482 int fd = aux_info[AT_EXECFD]->a_un.a_val;
483 dbg("loading main program");
484 obj_main = map_object(fd, argv0, NULL);
486 if (obj_main == NULL)
488 max_stack_flags = obj->stack_flags;
489 } else { /* Main program already loaded. */
490 const Elf_Phdr *phdr;
494 dbg("processing main program's program header");
495 assert(aux_info[AT_PHDR] != NULL);
496 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
497 assert(aux_info[AT_PHNUM] != NULL);
498 phnum = aux_info[AT_PHNUM]->a_un.a_val;
499 assert(aux_info[AT_PHENT] != NULL);
500 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
501 assert(aux_info[AT_ENTRY] != NULL);
502 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
503 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL)
507 char buf[MAXPATHLEN];
508 if (aux_info[AT_EXECPATH] != NULL) {
511 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
512 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
513 if (kexecpath[0] == '/')
514 obj_main->path = kexecpath;
515 else if (getcwd(buf, sizeof(buf)) == NULL ||
516 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
517 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
518 obj_main->path = xstrdup(argv0);
520 obj_main->path = xstrdup(buf);
522 char resolved[MAXPATHLEN];
523 dbg("No AT_EXECPATH");
524 if (argv0[0] == '/') {
525 if (realpath(argv0, resolved) != NULL)
526 obj_main->path = xstrdup(resolved);
528 obj_main->path = xstrdup(argv0);
530 if (getcwd(buf, sizeof(buf)) != NULL
531 && strlcat(buf, "/", sizeof(buf)) < sizeof(buf)
532 && strlcat(buf, argv0, sizeof (buf)) < sizeof(buf)
533 && access(buf, R_OK) == 0
534 && realpath(buf, resolved) != NULL)
535 obj_main->path = xstrdup(resolved);
537 obj_main->path = xstrdup(argv0);
540 dbg("obj_main path %s", obj_main->path);
541 obj_main->mainprog = true;
543 if (aux_info[AT_STACKPROT] != NULL &&
544 aux_info[AT_STACKPROT]->a_un.a_val != 0)
545 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
548 * Get the actual dynamic linker pathname from the executable if
549 * possible. (It should always be possible.) That ensures that
550 * gdb will find the right dynamic linker even if a non-standard
553 if (obj_main->interp != NULL &&
554 strcmp(obj_main->interp, obj_rtld.path) != 0) {
556 obj_rtld.path = xstrdup(obj_main->interp);
557 __progname = obj_rtld.path;
560 digest_dynamic(obj_main, 0);
562 linkmap_add(obj_main);
563 linkmap_add(&obj_rtld);
565 /* Link the main program into the list of objects. */
566 *obj_tail = obj_main;
567 obj_tail = &obj_main->next;
571 /* Initialize a fake symbol for resolving undefined weak references. */
572 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
573 sym_zero.st_shndx = SHN_UNDEF;
574 sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
577 libmap_disable = (bool)lm_init(libmap_override);
579 dbg("loading LD_PRELOAD libraries");
580 if (load_preload_objects() == -1)
582 preload_tail = obj_tail;
584 dbg("loading needed objects");
585 if (load_needed_objects(obj_main, 0) == -1)
588 /* Make a list of all objects loaded at startup. */
589 for (obj = obj_list; obj != NULL; obj = obj->next) {
590 objlist_push_tail(&list_main, obj);
594 dbg("checking for required versions");
595 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
600 if (ld_tracing) { /* We're done */
601 trace_loaded_objects(obj_main);
605 if (ld_resident) /* XXX clean this up! */
608 if (_getenv_ld("LD_DUMP_REL_PRE") != NULL) {
609 dump_relocations(obj_main);
613 /* setup TLS for main thread */
614 dbg("initializing initial thread local storage");
615 STAILQ_FOREACH(entry, &list_main, link) {
617 * Allocate all the initial objects out of the static TLS
618 * block even if they didn't ask for it.
620 allocate_tls_offset(entry->obj);
623 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA;
626 * Do not try to allocate the TLS here, let libc do it itself.
627 * (crt1 for the program will call _init_tls())
630 if (relocate_objects(obj_main,
631 ld_bind_now != NULL && *ld_bind_now != '\0', &obj_rtld, NULL) == -1)
634 dbg("doing copy relocations");
635 if (do_copy_relocations(obj_main) == -1)
640 if (_getenv_ld("LD_RESIDENT_UNREGISTER_NOW")) {
641 if (exec_sys_unregister(-1) < 0) {
642 dbg("exec_sys_unregister failed %d\n", errno);
645 dbg("exec_sys_unregister success\n");
649 if (_getenv_ld("LD_DUMP_REL_POST") != NULL) {
650 dump_relocations(obj_main);
654 dbg("initializing key program variables");
655 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
656 set_program_var("environ", env);
657 set_program_var("__elf_aux_vector", aux);
659 if (_getenv_ld("LD_RESIDENT_REGISTER_NOW")) {
660 extern void resident_start(void);
662 if (exec_sys_register(resident_start) < 0) {
663 dbg("exec_sys_register failed %d\n", errno);
666 dbg("exec_sys_register success\n");
670 /* Make a list of init functions to call. */
671 objlist_init(&initlist);
672 initlist_add_objects(obj_list, preload_tail, &initlist);
674 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
676 map_stacks_exec(NULL);
678 dbg("resolving ifuncs");
679 if (resolve_objects_ifunc(obj_main,
680 ld_bind_now != NULL && *ld_bind_now != '\0', NULL) == -1)
684 * Do NOT call the initlist here, give libc a chance to set up
685 * the initial TLS segment. crt1 will then call _rtld_call_init().
688 dbg("transferring control to program entry point = %p", obj_main->entry);
690 /* Return the exit procedure and the program entry point. */
691 *exit_proc = rtld_exit;
693 return (func_ptr_type) obj_main->entry;
697 * Call the initialization list for dynamically loaded libraries.
698 * (called from crt1.c).
701 _rtld_call_init(void)
703 RtldLockState lockstate;
706 if (!obj_main->note_present && obj_main->valid_hash_gnu) {
708 * The use of a linker script with a PHDRS directive that does not include
709 * PT_NOTE will block the crt_no_init note. In this case we'll look for the
710 * recently added GNU hash dynamic tag which gets built by default. It is
711 * extremely unlikely to find a pre-3.1 binary without a PT_NOTE header and
712 * a gnu hash tag. If gnu hash found, consider binary to use new crt code.
714 obj_main->crt_no_init = true;
715 dbg("Setting crt_no_init without presence of PT_NOTE header");
718 wlock_acquire(rtld_bind_lock, &lockstate);
719 if (obj_main->crt_no_init) {
720 preinitialize_main_object();
724 * Make sure we don't call the main program's init and fini functions
725 * for binaries linked with old crt1 which calls _init itself.
727 obj_main->init = obj_main->fini = (Elf_Addr)NULL;
728 obj_main->init_array = obj_main->fini_array = (Elf_Addr)NULL;
730 objlist_call_init(&initlist, &lockstate);
731 objlist_clear(&initlist);
732 dbg("loading filtees");
733 for (obj = obj_list->next; obj != NULL; obj = obj->next) {
734 if (ld_loadfltr || obj->z_loadfltr)
735 load_filtees(obj, 0, &lockstate);
737 lock_release(rtld_bind_lock, &lockstate);
741 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
746 ptr = (void *)make_function_pointer(def, obj);
747 target = ((Elf_Addr (*)(void))ptr)();
748 return ((void *)target);
752 _rtld_bind(Obj_Entry *obj, Elf_Size reloff, void *stack)
756 const Obj_Entry *defobj;
759 RtldLockState lockstate;
761 rlock_acquire(rtld_bind_lock, &lockstate);
762 if (sigsetjmp(lockstate.env, 0) != 0)
763 lock_upgrade(rtld_bind_lock, &lockstate);
765 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
767 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
769 where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
770 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL,
774 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
775 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
777 target = (Elf_Addr)(defobj->relocbase + def->st_value);
779 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
780 defobj->strtab + def->st_name, basename(obj->path),
781 (void *)target, basename(defobj->path));
784 * If we have a function call tracing hook, and the
785 * hook would like to keep tracing this one function,
786 * prevent the relocation so we will wind up here
787 * the next time again.
789 * We don't want to functrace calls from the functracer
790 * to avoid recursive loops.
792 if (rtld_functrace != NULL && obj != rtld_functrace_obj) {
793 if (rtld_functrace(obj->path,
795 defobj->strtab + def->st_name,
797 lock_release(rtld_bind_lock, &lockstate);
802 * Write the new contents for the jmpslot. Note that depending on
803 * architecture, the value which we need to return back to the
804 * lazy binding trampoline may or may not be the target
805 * address. The value returned from reloc_jmpslot() is the value
806 * that the trampoline needs.
808 target = reloc_jmpslot(where, target, defobj, obj, rel);
809 lock_release(rtld_bind_lock, &lockstate);
814 * Error reporting function. Use it like printf. If formats the message
815 * into a buffer, and sets things up so that the next call to dlerror()
816 * will return the message.
819 _rtld_error(const char *fmt, ...)
821 static char buf[512];
825 rtld_vsnprintf(buf, sizeof buf, fmt, ap);
831 * Return a dynamically-allocated copy of the current error message, if any.
836 return error_message == NULL ? NULL : xstrdup(error_message);
840 * Restore the current error message from a copy which was previously saved
841 * by errmsg_save(). The copy is freed.
844 errmsg_restore(char *saved_msg)
846 if (saved_msg == NULL)
847 error_message = NULL;
849 _rtld_error("%s", saved_msg);
855 basename(const char *name)
857 const char *p = strrchr(name, '/');
858 return p != NULL ? p + 1 : name;
861 static struct utsname uts;
864 origin_subst_one(char **res, const char *real, const char *kw, const char *subst,
874 subst_len = kw_len = 0;
878 if (subst_len == 0) {
879 subst_len = strlen(subst);
883 *res = xmalloc(PATH_MAX);
886 if ((res1 - *res) + subst_len + (p1 - p) >= PATH_MAX) {
887 _rtld_error("Substitution of %s in %s cannot be performed",
889 if (may_free != NULL)
894 memcpy(res1, p, p1 - p);
896 memcpy(res1, subst, subst_len);
901 if (may_free != NULL)
904 *res = xstrdup(real);
908 if (may_free != NULL)
910 if (strlcat(res1, p, PATH_MAX - (res1 - *res)) >= PATH_MAX) {
920 origin_subst(const char *real, const char *origin_path)
922 char *res1, *res2, *res3, *res4;
924 if (uts.sysname[0] == '\0') {
925 if (uname(&uts) != 0) {
926 _rtld_error("utsname failed: %d", errno);
930 if (!origin_subst_one(&res1, real, "$ORIGIN", origin_path, NULL) ||
931 !origin_subst_one(&res2, res1, "$OSNAME", uts.sysname, res1) ||
932 !origin_subst_one(&res3, res2, "$OSREL", uts.release, res2) ||
933 !origin_subst_one(&res4, res3, "$PLATFORM", uts.machine, res3))
941 const char *msg = dlerror();
945 rtld_fdputstr(STDERR_FILENO, msg);
946 rtld_fdputchar(STDERR_FILENO, '\n');
951 * Process a shared object's DYNAMIC section, and save the important
952 * information in its Obj_Entry structure.
955 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
956 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
959 Needed_Entry **needed_tail = &obj->needed;
960 Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
961 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
962 int plttype = DT_REL;
968 obj->bind_now = false;
969 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
970 switch (dynp->d_tag) {
973 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
977 obj->relsize = dynp->d_un.d_val;
981 assert(dynp->d_un.d_val == sizeof(Elf_Rel));
985 obj->pltrel = (const Elf_Rel *)
986 (obj->relocbase + dynp->d_un.d_ptr);
990 obj->pltrelsize = dynp->d_un.d_val;
994 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
998 obj->relasize = dynp->d_un.d_val;
1002 assert(dynp->d_un.d_val == sizeof(Elf_Rela));
1006 plttype = dynp->d_un.d_val;
1007 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
1011 obj->symtab = (const Elf_Sym *)
1012 (obj->relocbase + dynp->d_un.d_ptr);
1016 assert(dynp->d_un.d_val == sizeof(Elf_Sym));
1020 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
1024 obj->strsize = dynp->d_un.d_val;
1028 obj->verneed = (const Elf_Verneed *) (obj->relocbase +
1033 obj->verneednum = dynp->d_un.d_val;
1037 obj->verdef = (const Elf_Verdef *) (obj->relocbase +
1042 obj->verdefnum = dynp->d_un.d_val;
1046 obj->versyms = (const Elf_Versym *)(obj->relocbase +
1052 const Elf_Hashelt *hashtab = (const Elf_Hashelt *)
1053 (obj->relocbase + dynp->d_un.d_ptr);
1054 obj->nbuckets = hashtab[0];
1055 obj->nchains = hashtab[1];
1056 obj->buckets = hashtab + 2;
1057 obj->chains = obj->buckets + obj->nbuckets;
1058 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
1059 obj->buckets != NULL;
1065 const Elf_Hashelt *hashtab = (const Elf_Hashelt *)
1066 (obj->relocbase + dynp->d_un.d_ptr);
1067 obj->nbuckets_gnu = hashtab[0];
1068 obj->symndx_gnu = hashtab[1];
1069 const Elf32_Word nmaskwords = hashtab[2];
1070 const int bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
1071 /* Number of bitmask words is required to be power of 2 */
1072 const bool nmw_power2 = ((nmaskwords & (nmaskwords - 1)) == 0);
1073 obj->maskwords_bm_gnu = nmaskwords - 1;
1074 obj->shift2_gnu = hashtab[3];
1075 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4);
1076 obj->buckets_gnu = hashtab + 4 + bloom_size32;
1077 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
1079 obj->valid_hash_gnu = nmw_power2 && obj->nbuckets_gnu > 0 &&
1080 obj->buckets_gnu != NULL;
1086 Needed_Entry *nep = NEW(Needed_Entry);
1087 nep->name = dynp->d_un.d_val;
1092 needed_tail = &nep->next;
1098 Needed_Entry *nep = NEW(Needed_Entry);
1099 nep->name = dynp->d_un.d_val;
1103 *needed_filtees_tail = nep;
1104 needed_filtees_tail = &nep->next;
1110 Needed_Entry *nep = NEW(Needed_Entry);
1111 nep->name = dynp->d_un.d_val;
1115 *needed_aux_filtees_tail = nep;
1116 needed_aux_filtees_tail = &nep->next;
1121 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
1125 obj->textrel = true;
1129 obj->symbolic = true;
1134 * We have to wait until later to process this, because we
1135 * might not have gotten the address of the string table yet.
1145 *dyn_runpath = dynp;
1149 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1153 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1156 case DT_PREINIT_ARRAY:
1157 obj->preinit_array = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1161 obj->init_array = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1165 obj->fini_array = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1168 case DT_PREINIT_ARRAYSZ:
1169 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1172 case DT_INIT_ARRAYSZ:
1173 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1176 case DT_FINI_ARRAYSZ:
1177 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1181 /* XXX - not implemented yet */
1183 dbg("Filling in DT_DEBUG entry");
1184 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
1188 if ((dynp->d_un.d_val & DF_ORIGIN) && trust)
1189 obj->z_origin = true;
1190 if (dynp->d_un.d_val & DF_SYMBOLIC)
1191 obj->symbolic = true;
1192 if (dynp->d_un.d_val & DF_TEXTREL)
1193 obj->textrel = true;
1194 if (dynp->d_un.d_val & DF_BIND_NOW)
1195 obj->bind_now = true;
1196 /*if (dynp->d_un.d_val & DF_STATIC_TLS)
1201 if (dynp->d_un.d_val & DF_1_NOOPEN)
1202 obj->z_noopen = true;
1203 if ((dynp->d_un.d_val & DF_1_ORIGIN) && trust)
1204 obj->z_origin = true;
1205 /*if (dynp->d_un.d_val & DF_1_GLOBAL)
1207 if (dynp->d_un.d_val & DF_1_BIND_NOW)
1208 obj->bind_now = true;
1209 if (dynp->d_un.d_val & DF_1_NODELETE)
1210 obj->z_nodelete = true;
1211 if (dynp->d_un.d_val & DF_1_LOADFLTR)
1212 obj->z_loadfltr = true;
1213 if (dynp->d_un.d_val & DF_1_NODEFLIB)
1214 obj->z_nodeflib = true;
1219 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
1226 obj->traced = false;
1228 if (plttype == DT_RELA) {
1229 obj->pltrela = (const Elf_Rela *) obj->pltrel;
1231 obj->pltrelasize = obj->pltrelsize;
1232 obj->pltrelsize = 0;
1235 /* Determine size of dynsym table (equal to nchains of sysv hash) */
1236 if (obj->valid_hash_sysv)
1237 obj->dynsymcount = obj->nchains;
1238 else if (obj->valid_hash_gnu) {
1239 obj->dynsymcount = 0;
1240 for (Elf32_Word bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
1241 if (obj->buckets_gnu[bkt] == 0)
1243 const Elf32_Word *hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
1246 while ((*hashval++ & 1u) == 0);
1248 obj->dynsymcount += obj->symndx_gnu;
1253 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
1254 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
1257 if (obj->z_origin && obj->origin_path == NULL) {
1258 obj->origin_path = xmalloc(PATH_MAX);
1259 if (rtld_dirname_abs(obj->path, obj->origin_path) == -1)
1263 if (dyn_runpath != NULL) {
1264 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val;
1266 obj->runpath = origin_subst(obj->runpath, obj->origin_path);
1268 else if (dyn_rpath != NULL) {
1269 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val;
1271 obj->rpath = origin_subst(obj->rpath, obj->origin_path);
1274 if (dyn_soname != NULL)
1275 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
1279 digest_dynamic(Obj_Entry *obj, int early)
1281 const Elf_Dyn *dyn_rpath;
1282 const Elf_Dyn *dyn_soname;
1283 const Elf_Dyn *dyn_runpath;
1285 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
1286 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath);
1290 * Process a shared object's program header. This is used only for the
1291 * main program, when the kernel has already loaded the main program
1292 * into memory before calling the dynamic linker. It creates and
1293 * returns an Obj_Entry structure.
1296 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
1299 const Elf_Phdr *phlimit = phdr + phnum;
1301 Elf_Addr note_start, note_end;
1305 for (ph = phdr; ph < phlimit; ph++) {
1306 if (ph->p_type != PT_PHDR)
1310 obj->phsize = ph->p_memsz;
1311 obj->relocbase = (caddr_t)phdr - ph->p_vaddr;
1315 obj->stack_flags = PF_X | PF_R | PF_W;
1317 for (ph = phdr; ph < phlimit; ph++) {
1318 switch (ph->p_type) {
1321 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
1325 if (nsegs == 0) { /* First load segment */
1326 obj->vaddrbase = trunc_page(ph->p_vaddr);
1327 obj->mapbase = obj->vaddrbase + obj->relocbase;
1328 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
1330 } else { /* Last load segment */
1331 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
1338 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
1343 obj->tlssize = ph->p_memsz;
1344 obj->tlsalign = ph->p_align;
1345 obj->tlsinitsize = ph->p_filesz;
1346 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
1350 obj->stack_flags = ph->p_flags;
1354 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
1355 obj->relro_size = round_page(ph->p_memsz);
1359 obj->note_present = true;
1360 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
1361 note_end = note_start + ph->p_filesz;
1362 digest_notes(obj, note_start, note_end);
1367 _rtld_error("%s: too few PT_LOAD segments", path);
1376 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
1378 const Elf_Note *note;
1379 const char *note_name;
1382 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
1383 note = (const Elf_Note *)((const char *)(note + 1) +
1384 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1385 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
1386 if (note->n_namesz != sizeof(NOTE_VENDOR) ||
1387 note->n_descsz != sizeof(int32_t))
1389 if (note->n_type != ABI_NOTETYPE && note->n_type != CRT_NOINIT_NOTETYPE)
1391 note_name = (const char *)(note + 1);
1392 if (strncmp(NOTE_VENDOR, note_name, sizeof(NOTE_VENDOR)) != 0)
1394 switch (note->n_type) {
1396 /* DragonFly osrel note */
1397 p = (uintptr_t)(note + 1);
1398 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
1399 obj->osrel = *(const int32_t *)(p);
1400 dbg("note osrel %d", obj->osrel);
1402 case CRT_NOINIT_NOTETYPE:
1403 /* DragonFly 'crt does not call init' note */
1404 obj->crt_no_init = true;
1405 dbg("note crt_no_init");
1412 dlcheck(void *handle)
1416 for (obj = obj_list; obj != NULL; obj = obj->next)
1417 if (obj == (Obj_Entry *) handle)
1420 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
1421 _rtld_error("Invalid shared object handle %p", handle);
1428 * If the given object is already in the donelist, return true. Otherwise
1429 * add the object to the list and return false.
1432 donelist_check(DoneList *dlp, const Obj_Entry *obj)
1436 for (i = 0; i < dlp->num_used; i++)
1437 if (dlp->objs[i] == obj)
1440 * Our donelist allocation should always be sufficient. But if
1441 * our threads locking isn't working properly, more shared objects
1442 * could have been loaded since we allocated the list. That should
1443 * never happen, but we'll handle it properly just in case it does.
1445 if (dlp->num_used < dlp->num_alloc)
1446 dlp->objs[dlp->num_used++] = obj;
1451 * Hash function for symbol table lookup. Don't even think about changing
1452 * this. It is specified by the System V ABI.
1455 elf_hash(const char *name)
1457 const unsigned char *p = (const unsigned char *) name;
1458 unsigned long h = 0;
1461 while (*p != '\0') {
1462 h = (h << 4) + *p++;
1463 if ((g = h & 0xf0000000) != 0)
1471 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
1472 * unsigned in case it's implemented with a wider type.
1474 static uint_fast32_t
1475 gnu_hash (const char *s)
1477 uint_fast32_t h = 5381;
1478 for (unsigned char c = *s; c != '\0'; c = *++s)
1480 return h & 0xffffffff;
1484 * Find the library with the given name, and return its full pathname.
1485 * The returned string is dynamically allocated. Generates an error
1486 * message and returns NULL if the library cannot be found.
1488 * If the second argument is non-NULL, then it refers to an already-
1489 * loaded shared object, whose library search path will be searched.
1491 * The search order is:
1492 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
1493 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
1495 * DT_RUNPATH in the referencing file
1496 * ldconfig hints (if -z nodefaultlib, filter out /usr/lib from list)
1497 * /usr/lib _unless_ the referencing file is linked with -z nodefaultlib
1499 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
1502 find_library(const char *xname, const Obj_Entry *refobj)
1506 bool objgiven = (refobj != NULL);
1508 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */
1509 if (xname[0] != '/' && !trust) {
1510 _rtld_error("Absolute pathname required for shared object \"%s\"",
1514 if (objgiven && refobj->z_origin)
1515 return origin_subst(xname, refobj->origin_path);
1517 return xstrdup(xname);
1520 if (libmap_disable || !objgiven ||
1521 (name = lm_find(refobj->path, xname)) == NULL)
1522 name = (char *)xname;
1524 dbg(" Searching for \"%s\"", name);
1527 (pathname = search_library_path(name, refobj->rpath)) != NULL) ||
1528 (objgiven && (refobj->runpath == NULL) && (refobj != obj_main) &&
1529 (pathname = search_library_path(name, obj_main->rpath)) != NULL) ||
1530 (pathname = search_library_path(name, ld_library_path)) != NULL ||
1532 (pathname = search_library_path(name, refobj->runpath)) != NULL) ||
1533 (pathname = search_library_path(name, gethints(refobj))) != NULL ||
1534 (objgiven && !refobj->z_nodeflib &&
1535 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL))
1538 if(objgiven && refobj->path != NULL) {
1539 _rtld_error("Shared object \"%s\" not found, required by \"%s\"",
1540 name, basename(refobj->path));
1542 _rtld_error("Shared object \"%s\" not found", name);
1548 * Given a symbol number in a referencing object, find the corresponding
1549 * definition of the symbol. Returns a pointer to the symbol, or NULL if
1550 * no definition was found. Returns a pointer to the Obj_Entry of the
1551 * defining object via the reference parameter DEFOBJ_OUT.
1554 find_symdef(unsigned long symnum, const Obj_Entry *refobj,
1555 const Obj_Entry **defobj_out, int flags, SymCache *cache,
1556 RtldLockState *lockstate)
1560 const Obj_Entry *defobj;
1566 * If we have already found this symbol, get the information from
1569 if (symnum >= refobj->dynsymcount)
1570 return NULL; /* Bad object */
1571 if (cache != NULL && cache[symnum].sym != NULL) {
1572 *defobj_out = cache[symnum].obj;
1573 return cache[symnum].sym;
1576 ref = refobj->symtab + symnum;
1577 name = refobj->strtab + ref->st_name;
1582 * We don't have to do a full scale lookup if the symbol is local.
1583 * We know it will bind to the instance in this load module; to
1584 * which we already have a pointer (ie ref). By not doing a lookup,
1585 * we not only improve performance, but it also avoids unresolvable
1586 * symbols when local symbols are not in the hash table.
1588 * This might occur for TLS module relocations, which simply use
1591 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
1592 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
1593 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
1596 symlook_init(&req, name);
1598 req.ventry = fetch_ventry(refobj, symnum);
1599 req.lockstate = lockstate;
1600 res = symlook_default(&req, refobj);
1603 defobj = req.defobj_out;
1611 * If we found no definition and the reference is weak, treat the
1612 * symbol as having the value zero.
1614 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
1620 *defobj_out = defobj;
1621 /* Record the information in the cache to avoid subsequent lookups. */
1622 if (cache != NULL) {
1623 cache[symnum].sym = def;
1624 cache[symnum].obj = defobj;
1627 if (refobj != &obj_rtld)
1628 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
1634 * Return the search path from the ldconfig hints file, reading it if
1635 * necessary. Returns NULL if there are problems with the hints file,
1636 * or if the search path there is empty.
1637 * If DF_1_NODEFLIB flag set, omit STANDARD_LIBRARY_PATH directories
1640 gethints(const Obj_Entry *obj)
1644 if (hints == NULL) {
1646 struct elfhints_hdr hdr;
1649 /* Keep from trying again in case the hints file is bad. */
1652 if ((fd = open(ld_elf_hints_path, O_RDONLY)) == -1)
1654 if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
1655 hdr.magic != ELFHINTS_MAGIC ||
1660 p = xmalloc(hdr.dirlistlen + 1);
1661 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
1662 read(fd, p, hdr.dirlistlen + 1) != (ssize_t)hdr.dirlistlen + 1) {
1667 /* skip stdlib if compiled with -z nodeflib */
1668 if ((obj != NULL) && obj->z_nodeflib) {
1669 struct fill_search_info_args sargs, hargs;
1670 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
1671 struct dl_serpath *SLPpath, *hintpath;
1672 unsigned int SLPndx, hintndx, fndx, fcount;
1673 char *filtered_path;
1677 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1679 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1682 sargs.request = RTLD_DI_SERINFOSIZE;
1683 sargs.serinfo = &smeta;
1684 hargs.request = RTLD_DI_SERINFOSIZE;
1685 hargs.serinfo = &hmeta;
1687 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs);
1688 path_enumerate(p, fill_search_info, &hargs);
1690 SLPinfo = malloc(smeta.dls_size);
1691 hintinfo = malloc(hmeta.dls_size);
1693 sargs.request = RTLD_DI_SERINFO;
1694 sargs.serinfo = SLPinfo;
1695 sargs.serpath = &SLPinfo->dls_serpath[0];
1696 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
1698 hargs.request = RTLD_DI_SERINFO;
1699 hargs.serinfo = hintinfo;
1700 hargs.serpath = &hintinfo->dls_serpath[0];
1701 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
1703 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs);
1704 path_enumerate(p, fill_search_info, &hargs);
1708 filtered_path = xmalloc(hdr.dirlistlen + 1);
1709 hintpath = &hintinfo->dls_serpath[0];
1710 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++) {
1712 SLPpath = &SLPinfo->dls_serpath[0];
1713 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++) {
1714 if (strcmp(hintpath->dls_name, SLPpath->dls_name) == 0)
1720 filtered_path[fndx] = ':';
1724 flen = strlen(hintpath->dls_name);
1725 strncpy((filtered_path + fndx), hintpath->dls_name, flen);
1730 filtered_path[fndx] = '\0';
1735 hints = filtered_path;
1741 return hints[0] != '\0' ? hints : NULL;
1745 init_dag(Obj_Entry *root)
1747 const Needed_Entry *needed;
1748 const Objlist_Entry *elm;
1751 if (root->dag_inited)
1753 donelist_init(&donelist);
1755 /* Root object belongs to own DAG. */
1756 objlist_push_tail(&root->dldags, root);
1757 objlist_push_tail(&root->dagmembers, root);
1758 donelist_check(&donelist, root);
1761 * Add dependencies of root object to DAG in breadth order
1762 * by exploiting the fact that each new object get added
1763 * to the tail of the dagmembers list.
1765 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1766 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
1767 if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
1769 objlist_push_tail(&needed->obj->dldags, root);
1770 objlist_push_tail(&root->dagmembers, needed->obj);
1773 root->dag_inited = true;
1777 * Initialize the dynamic linker. The argument is the address at which
1778 * the dynamic linker has been mapped into memory. The primary task of
1779 * this function is to relocate the dynamic linker.
1782 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
1784 Obj_Entry objtmp; /* Temporary rtld object */
1785 const Elf_Dyn *dyn_rpath;
1786 const Elf_Dyn *dyn_soname;
1787 const Elf_Dyn *dyn_runpath;
1790 * Conjure up an Obj_Entry structure for the dynamic linker.
1792 * The "path" member can't be initialized yet because string constants
1793 * cannot yet be accessed. Below we will set it correctly.
1795 memset(&objtmp, 0, sizeof(objtmp));
1798 objtmp.mapbase = mapbase;
1800 objtmp.relocbase = mapbase;
1802 if (RTLD_IS_DYNAMIC()) {
1803 objtmp.dynamic = rtld_dynamic(&objtmp);
1804 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
1805 assert(objtmp.needed == NULL);
1806 assert(!objtmp.textrel);
1809 * Temporarily put the dynamic linker entry into the object list, so
1810 * that symbols can be found.
1813 relocate_objects(&objtmp, true, &objtmp, NULL);
1816 /* Initialize the object list. */
1817 obj_tail = &obj_list;
1819 /* Now that non-local variables can be accesses, copy out obj_rtld. */
1820 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
1822 #ifdef ENABLE_OSRELDATE
1823 if (aux_info[AT_OSRELDATE] != NULL)
1824 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
1827 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
1829 /* Replace the path with a dynamically allocated copy. */
1830 obj_rtld.path = xstrdup(PATH_RTLD);
1832 r_debug.r_brk = r_debug_state;
1833 r_debug.r_state = RT_CONSISTENT;
1837 * Add the init functions from a needed object list (and its recursive
1838 * needed objects) to "list". This is not used directly; it is a helper
1839 * function for initlist_add_objects(). The write lock must be held
1840 * when this function is called.
1843 initlist_add_neededs(Needed_Entry *needed, Objlist *list)
1845 /* Recursively process the successor needed objects. */
1846 if (needed->next != NULL)
1847 initlist_add_neededs(needed->next, list);
1849 /* Process the current needed object. */
1850 if (needed->obj != NULL)
1851 initlist_add_objects(needed->obj, &needed->obj->next, list);
1855 * Scan all of the DAGs rooted in the range of objects from "obj" to
1856 * "tail" and add their init functions to "list". This recurses over
1857 * the DAGs and ensure the proper init ordering such that each object's
1858 * needed libraries are initialized before the object itself. At the
1859 * same time, this function adds the objects to the global finalization
1860 * list "list_fini" in the opposite order. The write lock must be
1861 * held when this function is called.
1864 initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list)
1866 if (obj->init_scanned || obj->init_done)
1868 obj->init_scanned = true;
1870 /* Recursively process the successor objects. */
1871 if (&obj->next != tail)
1872 initlist_add_objects(obj->next, tail, list);
1874 /* Recursively process the needed objects. */
1875 if (obj->needed != NULL)
1876 initlist_add_neededs(obj->needed, list);
1878 /* Add the object to the init list. */
1879 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL ||
1880 obj->init_array != (Elf_Addr)NULL)
1881 objlist_push_tail(list, obj);
1883 /* Add the object to the global fini list in the reverse order. */
1884 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
1885 && !obj->on_fini_list) {
1886 objlist_push_head(&list_fini, obj);
1887 obj->on_fini_list = true;
1892 #define FPTR_TARGET(f) ((Elf_Addr) (f))
1896 is_exported(const Elf_Sym *def)
1899 const func_ptr_type *p;
1901 value = (Elf_Addr)(obj_rtld.relocbase + def->st_value);
1902 for (p = exports; *p != NULL; p++)
1903 if (FPTR_TARGET(*p) == value)
1909 free_needed_filtees(Needed_Entry *n)
1911 Needed_Entry *needed, *needed1;
1913 for (needed = n; needed != NULL; needed = needed->next) {
1914 if (needed->obj != NULL) {
1915 dlclose(needed->obj);
1919 for (needed = n; needed != NULL; needed = needed1) {
1920 needed1 = needed->next;
1926 unload_filtees(Obj_Entry *obj)
1929 free_needed_filtees(obj->needed_filtees);
1930 obj->needed_filtees = NULL;
1931 free_needed_filtees(obj->needed_aux_filtees);
1932 obj->needed_aux_filtees = NULL;
1933 obj->filtees_loaded = false;
1937 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags)
1940 for (; needed != NULL; needed = needed->next) {
1941 needed->obj = dlopen_object(obj->strtab + needed->name, obj,
1942 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
1948 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
1951 lock_restart_for_upgrade(lockstate);
1952 if (!obj->filtees_loaded) {
1953 load_filtee1(obj, obj->needed_filtees, flags);
1954 load_filtee1(obj, obj->needed_aux_filtees, flags);
1955 obj->filtees_loaded = true;
1960 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
1964 for (; needed != NULL; needed = needed->next) {
1965 obj1 = needed->obj = load_object(obj->strtab + needed->name, obj,
1966 flags & ~RTLD_LO_NOLOAD);
1967 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
1969 if (obj1 != NULL && obj1->z_nodelete && !obj1->ref_nodel) {
1970 dbg("obj %s nodelete", obj1->path);
1973 obj1->ref_nodel = true;
1980 * Given a shared object, traverse its list of needed objects, and load
1981 * each of them. Returns 0 on success. Generates an error message and
1982 * returns -1 on failure.
1985 load_needed_objects(Obj_Entry *first, int flags)
1989 for (obj = first; obj != NULL; obj = obj->next) {
1990 if (process_needed(obj, obj->needed, flags) == -1)
1997 load_preload_objects(void)
1999 char *p = ld_preload;
2000 static const char delim[] = " \t:;";
2005 p += strspn(p, delim);
2006 while (*p != '\0') {
2007 size_t len = strcspn(p, delim);
2015 obj = load_object(p, NULL, 0);
2017 return -1; /* XXX - cleanup */
2020 p += strspn(p, delim);
2022 /* Check for the magic tracing function */
2023 symlook_init(&req, RTLD_FUNCTRACE);
2024 res = symlook_obj(&req, obj);
2026 rtld_functrace = (void *)(req.defobj_out->relocbase +
2027 req.sym_out->st_value);
2028 rtld_functrace_obj = req.defobj_out;
2031 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
2036 * Load a shared object into memory, if it is not already loaded.
2038 * Returns a pointer to the Obj_Entry for the object. Returns NULL
2042 load_object(const char *name, const Obj_Entry *refobj, int flags)
2049 for (obj = obj_list->next; obj != NULL; obj = obj->next)
2050 if (object_match_name(obj, name))
2053 path = find_library(name, refobj);
2058 * If we didn't find a match by pathname, open the file and check
2059 * again by device and inode. This avoids false mismatches caused
2060 * by multiple links or ".." in pathnames.
2062 * To avoid a race, we open the file and use fstat() rather than
2065 if ((fd = open(path, O_RDONLY)) == -1) {
2066 _rtld_error("Cannot open \"%s\"", path);
2070 if (fstat(fd, &sb) == -1) {
2071 _rtld_error("Cannot fstat \"%s\"", path);
2076 for (obj = obj_list->next; obj != NULL; obj = obj->next)
2077 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
2080 object_add_name(obj, name);
2085 if (flags & RTLD_LO_NOLOAD) {
2091 /* First use of this object, so we must map it in */
2092 obj = do_load_object(fd, name, path, &sb, flags);
2101 do_load_object(int fd, const char *name, char *path, struct stat *sbp,
2108 * but first, make sure that environment variables haven't been
2109 * used to circumvent the noexec flag on a filesystem.
2111 if (dangerous_ld_env) {
2112 if (fstatfs(fd, &fs) != 0) {
2113 _rtld_error("Cannot fstatfs \"%s\"", path);
2116 if (fs.f_flags & MNT_NOEXEC) {
2117 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname);
2121 dbg("loading \"%s\"", path);
2122 obj = map_object(fd, path, sbp);
2126 object_add_name(obj, name);
2128 digest_dynamic(obj, 0);
2129 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
2131 dbg("refusing to load non-loadable \"%s\"", obj->path);
2132 _rtld_error("Cannot dlopen non-loadable %s", obj->path);
2133 munmap(obj->mapbase, obj->mapsize);
2139 obj_tail = &obj->next;
2142 linkmap_add(obj); /* for GDB & dlinfo() */
2143 max_stack_flags |= obj->stack_flags;
2145 dbg(" %p .. %p: %s", obj->mapbase,
2146 obj->mapbase + obj->mapsize - 1, obj->path);
2148 dbg(" WARNING: %s has impure text", obj->path);
2149 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
2156 obj_from_addr(const void *addr)
2160 for (obj = obj_list; obj != NULL; obj = obj->next) {
2161 if (addr < (void *) obj->mapbase)
2163 if (addr < (void *) (obj->mapbase + obj->mapsize))
2170 * Call the finalization functions for each of the objects in "list"
2171 * belonging to the DAG of "root" and referenced once. If NULL "root"
2172 * is specified, every finalization function will be called regardless
2173 * of the reference count and the list elements won't be freed. All of
2174 * the objects are expected to have non-NULL fini functions.
2177 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
2181 Elf_Addr *fini_addr;
2184 assert(root == NULL || root->refcount == 1);
2187 * Preserve the current error message since a fini function might
2188 * call into the dynamic linker and overwrite it.
2190 saved_msg = errmsg_save();
2192 STAILQ_FOREACH(elm, list, link) {
2193 if (root != NULL && (elm->obj->refcount != 1 ||
2194 objlist_find(&root->dagmembers, elm->obj) == NULL))
2197 /* Remove object from fini list to prevent recursive invocation. */
2198 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2200 * XXX: If a dlopen() call references an object while the
2201 * fini function is in progress, we might end up trying to
2202 * unload the referenced object in dlclose() or the object
2203 * won't be unloaded although its fini function has been
2206 lock_release(rtld_bind_lock, lockstate);
2209 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined. When this
2210 * happens, DT_FINI_ARRAY is processed first, and it is also processed
2211 * backwards. It is possible to encounter DT_FINI_ARRAY elements with
2212 * values of 0 or 1, but they need to be ignored.
2214 fini_addr = (Elf_Addr *)elm->obj->fini_array;
2215 if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
2216 for (index = elm->obj->fini_array_num - 1; index >= 0; index--) {
2217 if (fini_addr[index] != 0 && fini_addr[index] != 1) {
2218 dbg("calling fini array function for %s at %p",
2219 elm->obj->path, (void *)fini_addr[index]);
2220 LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
2221 (void *)fini_addr[index], 0, 0, elm->obj->path);
2222 call_initfini_pointer(elm->obj, fini_addr[index]);
2226 if (elm->obj->fini != (Elf_Addr)NULL) {
2227 dbg("calling fini function for %s at %p", elm->obj->path,
2228 (void *)elm->obj->fini);
2229 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
2230 0, 0, elm->obj->path);
2231 call_initfini_pointer(elm->obj, elm->obj->fini);
2233 wlock_acquire(rtld_bind_lock, lockstate);
2234 /* No need to free anything if process is going down. */
2238 * We must restart the list traversal after every fini call
2239 * because a dlclose() call from the fini function or from
2240 * another thread might have modified the reference counts.
2244 } while (elm != NULL);
2245 errmsg_restore(saved_msg);
2249 * If the main program is defined with a .preinit_array section, call
2250 * each function in order. This must occur before the initialization
2251 * of any shared object or the main program.
2254 preinitialize_main_object (void)
2256 Elf_Addr *preinit_addr;
2259 preinit_addr = (Elf_Addr *)obj_main->preinit_array;
2260 if (preinit_addr == NULL)
2263 for (index = 0; index < obj_main->preinit_array_num; index++) {
2264 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
2265 dbg("calling preinit function for %s at %p", obj_main->path,
2266 (void *)preinit_addr[index]);
2267 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
2268 0, 0, obj_main->path);
2269 call_init_pointer(obj_main, preinit_addr[index]);
2275 * Call the initialization functions for each of the objects in
2276 * "list". All of the objects are expected to have non-NULL init
2280 objlist_call_init(Objlist *list, RtldLockState *lockstate)
2285 Elf_Addr *init_addr;
2289 * Clean init_scanned flag so that objects can be rechecked and
2290 * possibly initialized earlier if any of vectors called below
2291 * cause the change by using dlopen.
2293 for (obj = obj_list; obj != NULL; obj = obj->next)
2294 obj->init_scanned = false;
2297 * Preserve the current error message since an init function might
2298 * call into the dynamic linker and overwrite it.
2300 saved_msg = errmsg_save();
2301 STAILQ_FOREACH(elm, list, link) {
2302 if (elm->obj->init_done) /* Initialized early. */
2306 * Race: other thread might try to use this object before current
2307 * one completes the initilization. Not much can be done here
2308 * without better locking.
2310 elm->obj->init_done = true;
2311 lock_release(rtld_bind_lock, lockstate);
2314 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined. When
2315 * this happens, DT_INIT is processed first. It is possible to
2316 * encounter DT_INIT_ARRAY elements with values of 0 or 1, but they
2317 * need to be ignored.
2319 if (elm->obj->init != (Elf_Addr)NULL) {
2320 dbg("calling init function for %s at %p", elm->obj->path,
2321 (void *)elm->obj->init);
2322 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
2323 0, 0, elm->obj->path);
2324 call_initfini_pointer(elm->obj, elm->obj->init);
2326 init_addr = (Elf_Addr *)elm->obj->init_array;
2327 if (init_addr != NULL) {
2328 for (index = 0; index < elm->obj->init_array_num; index++) {
2329 if (init_addr[index] != 0 && init_addr[index] != 1) {
2330 dbg("calling init array function for %s at %p", elm->obj->path,
2331 (void *)init_addr[index]);
2332 LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
2333 (void *)init_addr[index], 0, 0, elm->obj->path);
2334 call_init_pointer(elm->obj, init_addr[index]);
2338 wlock_acquire(rtld_bind_lock, lockstate);
2340 errmsg_restore(saved_msg);
2344 objlist_clear(Objlist *list)
2348 while (!STAILQ_EMPTY(list)) {
2349 elm = STAILQ_FIRST(list);
2350 STAILQ_REMOVE_HEAD(list, link);
2355 static Objlist_Entry *
2356 objlist_find(Objlist *list, const Obj_Entry *obj)
2360 STAILQ_FOREACH(elm, list, link)
2361 if (elm->obj == obj)
2367 objlist_init(Objlist *list)
2373 objlist_push_head(Objlist *list, Obj_Entry *obj)
2377 elm = NEW(Objlist_Entry);
2379 STAILQ_INSERT_HEAD(list, elm, link);
2383 objlist_push_tail(Objlist *list, Obj_Entry *obj)
2387 elm = NEW(Objlist_Entry);
2389 STAILQ_INSERT_TAIL(list, elm, link);
2393 objlist_remove(Objlist *list, Obj_Entry *obj)
2397 if ((elm = objlist_find(list, obj)) != NULL) {
2398 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2404 * Relocate newly-loaded shared objects. The argument is a pointer to
2405 * the Obj_Entry for the first such object. All objects from the first
2406 * to the end of the list of objects are relocated. Returns 0 on success,
2410 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
2411 RtldLockState *lockstate)
2415 for (obj = first; obj != NULL; obj = obj->next) {
2417 dbg("relocating \"%s\"", obj->path);
2418 if (obj->symtab == NULL || obj->strtab == NULL ||
2419 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
2420 _rtld_error("%s: Shared object has no run-time symbol table",
2426 /* There are relocations to the write-protected text segment. */
2427 if (mprotect(obj->mapbase, obj->textsize,
2428 PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
2429 _rtld_error("%s: Cannot write-enable text segment: %s",
2430 obj->path, rtld_strerror(errno));
2435 /* Process the non-PLT relocations. */
2436 if (reloc_non_plt(obj, rtldobj, lockstate))
2440 * Reprotect the text segment. Make sure it is included in the
2441 * core dump since we modified it. This unfortunately causes the
2442 * entire text segment to core-out but we don't have much of a
2443 * choice. We could try to only reenable core dumps on pages
2444 * in which relocations occured but that is likely most of the text
2445 * pages anyway, and even that would not work because the rest of
2446 * the text pages would wind up as a read-only OBJT_DEFAULT object
2447 * (created due to our modifications) backed by the original OBJT_VNODE
2448 * object, and the ELF coredump code is currently only able to dump
2449 * vnode records for pure vnode-backed mappings, not vnode backings
2450 * to memory objects.
2453 madvise(obj->mapbase, obj->textsize, MADV_CORE);
2454 if (mprotect(obj->mapbase, obj->textsize,
2455 PROT_READ|PROT_EXEC) == -1) {
2456 _rtld_error("%s: Cannot write-protect text segment: %s",
2457 obj->path, rtld_strerror(errno));
2463 /* Set the special PLT or GOT entries. */
2466 /* Process the PLT relocations. */
2467 if (reloc_plt(obj) == -1)
2469 /* Relocate the jump slots if we are doing immediate binding. */
2470 if (obj->bind_now || bind_now)
2471 if (reloc_jmpslots(obj, lockstate) == -1)
2475 * Set up the magic number and version in the Obj_Entry. These
2476 * were checked in the crt1.o from the original ElfKit, so we
2477 * set them for backward compatibility.
2479 obj->magic = RTLD_MAGIC;
2480 obj->version = RTLD_VERSION;
2483 * Set relocated data to read-only status if protection specified
2486 if (obj->relro_size) {
2487 if (mprotect(obj->relro_page, obj->relro_size, PROT_READ) == -1) {
2488 _rtld_error("%s: Cannot enforce relro relocation: %s",
2489 obj->path, rtld_strerror(errno));
2499 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
2500 * referencing STT_GNU_IFUNC symbols is postponed till the other
2501 * relocations are done. The indirect functions specified as
2502 * ifunc are allowed to call other symbols, so we need to have
2503 * objects relocated before asking for resolution from indirects.
2505 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
2506 * instead of the usual lazy handling of PLT slots. It is
2507 * consistent with how GNU does it.
2510 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, RtldLockState *lockstate)
2512 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1)
2514 if ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
2515 reloc_gnu_ifunc(obj, lockstate) == -1)
2521 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, RtldLockState *lockstate)
2525 for (obj = first; obj != NULL; obj = obj->next) {
2526 if (resolve_object_ifunc(obj, bind_now, lockstate) == -1)
2533 initlist_objects_ifunc(Objlist *list, bool bind_now, RtldLockState *lockstate)
2537 STAILQ_FOREACH(elm, list, link) {
2538 if (resolve_object_ifunc(elm->obj, bind_now, lockstate) == -1)
2545 * Cleanup procedure. It will be called (by the atexit mechanism) just
2546 * before the process exits.
2551 RtldLockState lockstate;
2553 wlock_acquire(rtld_bind_lock, &lockstate);
2555 objlist_call_fini(&list_fini, NULL, &lockstate);
2556 /* No need to remove the items from the list, since we are exiting. */
2557 if (!libmap_disable)
2559 lock_release(rtld_bind_lock, &lockstate);
2563 path_enumerate(const char *path, path_enum_proc callback, void *arg)
2568 path += strspn(path, ":;");
2569 while (*path != '\0') {
2573 len = strcspn(path, ":;");
2574 res = callback(path, len, arg);
2580 path += strspn(path, ":;");
2586 struct try_library_args {
2594 try_library_path(const char *dir, size_t dirlen, void *param)
2596 struct try_library_args *arg;
2599 if (*dir == '/' || trust) {
2602 if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
2605 pathname = arg->buffer;
2606 strncpy(pathname, dir, dirlen);
2607 pathname[dirlen] = '/';
2608 strcpy(pathname + dirlen + 1, arg->name);
2610 dbg(" Trying \"%s\"", pathname);
2611 if (access(pathname, F_OK) == 0) { /* We found it */
2612 pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
2613 strcpy(pathname, arg->buffer);
2621 search_library_path(const char *name, const char *path)
2624 struct try_library_args arg;
2630 arg.namelen = strlen(name);
2631 arg.buffer = xmalloc(PATH_MAX);
2632 arg.buflen = PATH_MAX;
2634 p = path_enumerate(path, try_library_path, &arg);
2642 dlclose(void *handle)
2645 RtldLockState lockstate;
2647 wlock_acquire(rtld_bind_lock, &lockstate);
2648 root = dlcheck(handle);
2650 lock_release(rtld_bind_lock, &lockstate);
2653 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
2656 /* Unreference the object and its dependencies. */
2657 root->dl_refcount--;
2659 if (root->refcount == 1) {
2661 * The object will be no longer referenced, so we must unload it.
2662 * First, call the fini functions.
2664 objlist_call_fini(&list_fini, root, &lockstate);
2668 /* Finish cleaning up the newly-unreferenced objects. */
2669 GDB_STATE(RT_DELETE,&root->linkmap);
2670 unload_object(root);
2671 GDB_STATE(RT_CONSISTENT,NULL);
2675 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
2676 lock_release(rtld_bind_lock, &lockstate);
2683 char *msg = error_message;
2684 error_message = NULL;
2689 dlopen(const char *name, int mode)
2691 RtldLockState lockstate;
2694 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
2695 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
2696 if (ld_tracing != NULL) {
2697 rlock_acquire(rtld_bind_lock, &lockstate);
2698 if (sigsetjmp(lockstate.env, 0) != 0)
2699 lock_upgrade(rtld_bind_lock, &lockstate);
2700 environ = (char **)*get_program_var_addr("environ", &lockstate);
2701 lock_release(rtld_bind_lock, &lockstate);
2703 lo_flags = RTLD_LO_DLOPEN;
2704 if (mode & RTLD_NODELETE)
2705 lo_flags |= RTLD_LO_NODELETE;
2706 if (mode & RTLD_NOLOAD)
2707 lo_flags |= RTLD_LO_NOLOAD;
2708 if (ld_tracing != NULL)
2709 lo_flags |= RTLD_LO_TRACE;
2711 return (dlopen_object(name, obj_main, lo_flags,
2712 mode & (RTLD_MODEMASK | RTLD_GLOBAL)));
2716 dlopen_cleanup(Obj_Entry *obj)
2721 if (obj->refcount == 0)
2726 dlopen_object(const char *name, Obj_Entry *refobj, int lo_flags, int mode)
2728 Obj_Entry **old_obj_tail;
2731 RtldLockState lockstate;
2734 objlist_init(&initlist);
2736 wlock_acquire(rtld_bind_lock, &lockstate);
2737 GDB_STATE(RT_ADD,NULL);
2739 old_obj_tail = obj_tail;
2745 obj = load_object(name, refobj, lo_flags);
2750 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
2751 objlist_push_tail(&list_global, obj);
2752 if (*old_obj_tail != NULL) { /* We loaded something new. */
2753 assert(*old_obj_tail == obj);
2754 result = load_needed_objects(obj, lo_flags & RTLD_LO_DLOPEN);
2758 result = rtld_verify_versions(&obj->dagmembers);
2759 if (result != -1 && ld_tracing)
2761 if (result == -1 || (relocate_objects(obj, (mode & RTLD_MODEMASK)
2762 == RTLD_NOW, &obj_rtld, &lockstate)) == -1) {
2763 dlopen_cleanup(obj);
2766 /* Make list of init functions to call. */
2767 initlist_add_objects(obj, &obj->next, &initlist);
2772 * Bump the reference counts for objects on this DAG. If
2773 * this is the first dlopen() call for the object that was
2774 * already loaded as a dependency, initialize the dag
2780 if ((lo_flags & RTLD_LO_TRACE) != 0)
2783 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
2784 obj->z_nodelete) && !obj->ref_nodel) {
2785 dbg("obj %s nodelete", obj->path);
2787 obj->z_nodelete = obj->ref_nodel = true;
2791 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
2793 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
2795 map_stacks_exec(&lockstate);
2797 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
2798 &lockstate) == -1) {
2799 objlist_clear(&initlist);
2800 dlopen_cleanup(obj);
2801 lock_release(rtld_bind_lock, &lockstate);
2805 /* Call the init functions. */
2806 objlist_call_init(&initlist, &lockstate);
2807 objlist_clear(&initlist);
2808 lock_release(rtld_bind_lock, &lockstate);
2811 trace_loaded_objects(obj);
2812 lock_release(rtld_bind_lock, &lockstate);
2817 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
2821 const Obj_Entry *obj, *defobj;
2824 RtldLockState lockstate;
2829 symlook_init(&req, name);
2831 req.flags = flags | SYMLOOK_IN_PLT;
2832 req.lockstate = &lockstate;
2834 rlock_acquire(rtld_bind_lock, &lockstate);
2835 if (sigsetjmp(lockstate.env, 0) != 0)
2836 lock_upgrade(rtld_bind_lock, &lockstate);
2837 if (handle == NULL || handle == RTLD_NEXT ||
2838 handle == RTLD_DEFAULT || handle == RTLD_SELF) {
2840 if ((obj = obj_from_addr(retaddr)) == NULL) {
2841 _rtld_error("Cannot determine caller's shared object");
2842 lock_release(rtld_bind_lock, &lockstate);
2845 if (handle == NULL) { /* Just the caller's shared object. */
2846 res = symlook_obj(&req, obj);
2849 defobj = req.defobj_out;
2851 } else if (handle == RTLD_NEXT || /* Objects after caller's */
2852 handle == RTLD_SELF) { /* ... caller included */
2853 if (handle == RTLD_NEXT)
2855 for (; obj != NULL; obj = obj->next) {
2856 res = symlook_obj(&req, obj);
2859 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
2861 defobj = req.defobj_out;
2862 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
2868 * Search the dynamic linker itself, and possibly resolve the
2869 * symbol from there. This is how the application links to
2870 * dynamic linker services such as dlopen.
2872 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
2873 res = symlook_obj(&req, &obj_rtld);
2874 if (res == 0 && is_exported(req.sym_out)) {
2876 defobj = req.defobj_out;
2880 assert(handle == RTLD_DEFAULT);
2881 res = symlook_default(&req, obj);
2883 defobj = req.defobj_out;
2888 if ((obj = dlcheck(handle)) == NULL) {
2889 lock_release(rtld_bind_lock, &lockstate);
2893 donelist_init(&donelist);
2894 if (obj->mainprog) {
2895 /* Handle obtained by dlopen(NULL, ...) implies global scope. */
2896 res = symlook_global(&req, &donelist);
2899 defobj = req.defobj_out;
2902 * Search the dynamic linker itself, and possibly resolve the
2903 * symbol from there. This is how the application links to
2904 * dynamic linker services such as dlopen.
2906 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
2907 res = symlook_obj(&req, &obj_rtld);
2910 defobj = req.defobj_out;
2915 /* Search the whole DAG rooted at the given object. */
2916 res = symlook_list(&req, &obj->dagmembers, &donelist);
2919 defobj = req.defobj_out;
2925 lock_release(rtld_bind_lock, &lockstate);
2928 * The value required by the caller is derived from the value
2929 * of the symbol. For the ia64 architecture, we need to
2930 * construct a function descriptor which the caller can use to
2931 * call the function with the right 'gp' value. For other
2932 * architectures and for non-functions, the value is simply
2933 * the relocated value of the symbol.
2935 if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
2936 return (make_function_pointer(def, defobj));
2937 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
2938 return (rtld_resolve_ifunc(defobj, def));
2939 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
2941 ti.ti_module = defobj->tlsindex;
2942 ti.ti_offset = def->st_value;
2943 return (__tls_get_addr(&ti));
2945 return (defobj->relocbase + def->st_value);
2948 _rtld_error("Undefined symbol \"%s\"", name);
2949 lock_release(rtld_bind_lock, &lockstate);
2954 dlsym(void *handle, const char *name)
2956 return do_dlsym(handle, name, __builtin_return_address(0), NULL,
2961 dlfunc(void *handle, const char *name)
2968 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
2974 dlvsym(void *handle, const char *name, const char *version)
2978 ventry.name = version;
2980 ventry.hash = elf_hash(version);
2982 return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
2987 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
2989 const Obj_Entry *obj;
2990 RtldLockState lockstate;
2992 rlock_acquire(rtld_bind_lock, &lockstate);
2993 obj = obj_from_addr(addr);
2995 _rtld_error("No shared object contains address");
2996 lock_release(rtld_bind_lock, &lockstate);
2999 rtld_fill_dl_phdr_info(obj, phdr_info);
3000 lock_release(rtld_bind_lock, &lockstate);
3005 dladdr(const void *addr, Dl_info *info)
3007 const Obj_Entry *obj;
3010 unsigned long symoffset;
3011 RtldLockState lockstate;
3013 rlock_acquire(rtld_bind_lock, &lockstate);
3014 obj = obj_from_addr(addr);
3016 _rtld_error("No shared object contains address");
3017 lock_release(rtld_bind_lock, &lockstate);
3020 info->dli_fname = obj->path;
3021 info->dli_fbase = obj->mapbase;
3022 info->dli_saddr = NULL;
3023 info->dli_sname = NULL;
3026 * Walk the symbol list looking for the symbol whose address is
3027 * closest to the address sent in.
3029 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
3030 def = obj->symtab + symoffset;
3033 * For skip the symbol if st_shndx is either SHN_UNDEF or
3036 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
3040 * If the symbol is greater than the specified address, or if it
3041 * is further away from addr than the current nearest symbol,
3044 symbol_addr = obj->relocbase + def->st_value;
3045 if (symbol_addr > addr || symbol_addr < info->dli_saddr)
3048 /* Update our idea of the nearest symbol. */
3049 info->dli_sname = obj->strtab + def->st_name;
3050 info->dli_saddr = symbol_addr;
3053 if (info->dli_saddr == addr)
3056 lock_release(rtld_bind_lock, &lockstate);
3061 dlinfo(void *handle, int request, void *p)
3063 const Obj_Entry *obj;
3064 RtldLockState lockstate;
3067 rlock_acquire(rtld_bind_lock, &lockstate);
3069 if (handle == NULL || handle == RTLD_SELF) {
3072 retaddr = __builtin_return_address(0); /* __GNUC__ only */
3073 if ((obj = obj_from_addr(retaddr)) == NULL)
3074 _rtld_error("Cannot determine caller's shared object");
3076 obj = dlcheck(handle);
3079 lock_release(rtld_bind_lock, &lockstate);
3085 case RTLD_DI_LINKMAP:
3086 *((struct link_map const **)p) = &obj->linkmap;
3088 case RTLD_DI_ORIGIN:
3089 error = rtld_dirname(obj->path, p);
3092 case RTLD_DI_SERINFOSIZE:
3093 case RTLD_DI_SERINFO:
3094 error = do_search_info(obj, request, (struct dl_serinfo *)p);
3098 _rtld_error("Invalid request %d passed to dlinfo()", request);
3102 lock_release(rtld_bind_lock, &lockstate);
3108 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
3111 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
3112 phdr_info->dlpi_name = STAILQ_FIRST(&obj->names) ?
3113 STAILQ_FIRST(&obj->names)->name : obj->path;
3114 phdr_info->dlpi_phdr = obj->phdr;
3115 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
3116 phdr_info->dlpi_tls_modid = obj->tlsindex;
3117 phdr_info->dlpi_tls_data = obj->tlsinit;
3118 phdr_info->dlpi_adds = obj_loads;
3119 phdr_info->dlpi_subs = obj_loads - obj_count;
3123 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
3125 struct dl_phdr_info phdr_info;
3126 const Obj_Entry *obj;
3127 RtldLockState bind_lockstate, phdr_lockstate;
3130 wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
3131 rlock_acquire(rtld_bind_lock, &bind_lockstate);
3135 for (obj = obj_list; obj != NULL; obj = obj->next) {
3136 rtld_fill_dl_phdr_info(obj, &phdr_info);
3137 if ((error = callback(&phdr_info, sizeof phdr_info, param)) != 0)
3141 lock_release(rtld_bind_lock, &bind_lockstate);
3142 lock_release(rtld_phdr_lock, &phdr_lockstate);
3148 fill_search_info(const char *dir, size_t dirlen, void *param)
3150 struct fill_search_info_args *arg;
3154 if (arg->request == RTLD_DI_SERINFOSIZE) {
3155 arg->serinfo->dls_cnt ++;
3156 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
3158 struct dl_serpath *s_entry;
3160 s_entry = arg->serpath;
3161 s_entry->dls_name = arg->strspace;
3162 s_entry->dls_flags = arg->flags;
3164 strncpy(arg->strspace, dir, dirlen);
3165 arg->strspace[dirlen] = '\0';
3167 arg->strspace += dirlen + 1;
3175 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
3177 struct dl_serinfo _info;
3178 struct fill_search_info_args args;
3180 args.request = RTLD_DI_SERINFOSIZE;
3181 args.serinfo = &_info;
3183 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
3186 path_enumerate(obj->rpath, fill_search_info, &args);
3187 path_enumerate(ld_library_path, fill_search_info, &args);
3188 path_enumerate(obj->runpath, fill_search_info, &args);
3189 path_enumerate(gethints(obj), fill_search_info, &args);
3190 if (!obj->z_nodeflib)
3191 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args);
3194 if (request == RTLD_DI_SERINFOSIZE) {
3195 info->dls_size = _info.dls_size;
3196 info->dls_cnt = _info.dls_cnt;
3200 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
3201 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
3205 args.request = RTLD_DI_SERINFO;
3206 args.serinfo = info;
3207 args.serpath = &info->dls_serpath[0];
3208 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
3210 args.flags = LA_SER_RUNPATH;
3211 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL)
3214 args.flags = LA_SER_LIBPATH;
3215 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL)
3218 args.flags = LA_SER_RUNPATH;
3219 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL)
3222 args.flags = LA_SER_CONFIG;
3223 if (path_enumerate(gethints(obj), fill_search_info, &args) != NULL)
3226 args.flags = LA_SER_DEFAULT;
3227 if (!obj->z_nodeflib &&
3228 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL)
3234 rtld_dirname(const char *path, char *bname)
3238 /* Empty or NULL string gets treated as "." */
3239 if (path == NULL || *path == '\0') {
3245 /* Strip trailing slashes */
3246 endp = path + strlen(path) - 1;
3247 while (endp > path && *endp == '/')
3250 /* Find the start of the dir */
3251 while (endp > path && *endp != '/')
3254 /* Either the dir is "/" or there are no slashes */
3256 bname[0] = *endp == '/' ? '/' : '.';
3262 } while (endp > path && *endp == '/');
3265 if (endp - path + 2 > PATH_MAX)
3267 _rtld_error("Filename is too long: %s", path);
3271 strncpy(bname, path, endp - path + 1);
3272 bname[endp - path + 1] = '\0';
3277 rtld_dirname_abs(const char *path, char *base)
3279 char base_rel[PATH_MAX];
3281 if (rtld_dirname(path, base) == -1)
3285 if (getcwd(base_rel, sizeof(base_rel)) == NULL ||
3286 strlcat(base_rel, "/", sizeof(base_rel)) >= sizeof(base_rel) ||
3287 strlcat(base_rel, base, sizeof(base_rel)) >= sizeof(base_rel))
3289 strcpy(base, base_rel);
3294 linkmap_add(Obj_Entry *obj)
3296 struct link_map *l = &obj->linkmap;
3297 struct link_map *prev;
3299 obj->linkmap.l_name = obj->path;
3300 obj->linkmap.l_addr = obj->mapbase;
3301 obj->linkmap.l_ld = obj->dynamic;
3303 /* GDB needs load offset on MIPS to use the symbols */
3304 obj->linkmap.l_offs = obj->relocbase;
3307 if (r_debug.r_map == NULL) {
3313 * Scan to the end of the list, but not past the entry for the
3314 * dynamic linker, which we want to keep at the very end.
3316 for (prev = r_debug.r_map;
3317 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
3318 prev = prev->l_next)
3321 /* Link in the new entry. */
3323 l->l_next = prev->l_next;
3324 if (l->l_next != NULL)
3325 l->l_next->l_prev = l;
3330 linkmap_delete(Obj_Entry *obj)
3332 struct link_map *l = &obj->linkmap;
3334 if (l->l_prev == NULL) {
3335 if ((r_debug.r_map = l->l_next) != NULL)
3336 l->l_next->l_prev = NULL;
3340 if ((l->l_prev->l_next = l->l_next) != NULL)
3341 l->l_next->l_prev = l->l_prev;
3345 * Function for the debugger to set a breakpoint on to gain control.
3347 * The two parameters allow the debugger to easily find and determine
3348 * what the runtime loader is doing and to whom it is doing it.
3350 * When the loadhook trap is hit (r_debug_state, set at program
3351 * initialization), the arguments can be found on the stack:
3353 * +8 struct link_map *m
3354 * +4 struct r_debug *rd
3358 r_debug_state(struct r_debug* rd, struct link_map *m)
3361 * The following is a hack to force the compiler to emit calls to
3362 * this function, even when optimizing. If the function is empty,
3363 * the compiler is not obliged to emit any code for calls to it,
3364 * even when marked __noinline. However, gdb depends on those
3367 __asm __volatile("" : : : "memory");
3371 * Get address of the pointer variable in the main program.
3372 * Prefer non-weak symbol over the weak one.
3374 static const void **
3375 get_program_var_addr(const char *name, RtldLockState *lockstate)
3380 symlook_init(&req, name);
3381 req.lockstate = lockstate;
3382 donelist_init(&donelist);
3383 if (symlook_global(&req, &donelist) != 0)
3385 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
3386 return ((const void **)make_function_pointer(req.sym_out,
3388 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
3389 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
3391 return ((const void **)(req.defobj_out->relocbase + req.sym_out->st_value));
3395 * Set a pointer variable in the main program to the given value. This
3396 * is used to set key variables such as "environ" before any of the
3397 * init functions are called.
3400 set_program_var(const char *name, const void *value)
3404 if ((addr = get_program_var_addr(name, NULL)) != NULL) {
3405 dbg("\"%s\": *%p <-- %p", name, addr, value);
3411 * Search the global objects, including dependencies and main object,
3412 * for the given symbol.
3415 symlook_global(SymLook *req, DoneList *donelist)
3418 const Objlist_Entry *elm;
3421 symlook_init_from_req(&req1, req);
3423 /* Search all objects loaded at program start up. */
3424 if (req->defobj_out == NULL ||
3425 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
3426 res = symlook_list(&req1, &list_main, donelist);
3427 if (res == 0 && (req->defobj_out == NULL ||
3428 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3429 req->sym_out = req1.sym_out;
3430 req->defobj_out = req1.defobj_out;
3431 assert(req->defobj_out != NULL);
3435 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
3436 STAILQ_FOREACH(elm, &list_global, link) {
3437 if (req->defobj_out != NULL &&
3438 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3440 res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
3441 if (res == 0 && (req->defobj_out == NULL ||
3442 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3443 req->sym_out = req1.sym_out;
3444 req->defobj_out = req1.defobj_out;
3445 assert(req->defobj_out != NULL);
3449 return (req->sym_out != NULL ? 0 : ESRCH);
3453 * This is a special version of getenv which is far more efficient
3454 * at finding LD_ environment vars.
3458 _getenv_ld(const char *id)
3462 int idlen = strlen(id);
3464 if (ld_index == LD_ARY_CACHE)
3466 if (ld_index == 0) {
3467 for (i = j = 0; (envp = environ[i]) != NULL && j < LD_ARY_CACHE; ++i) {
3468 if (envp[0] == 'L' && envp[1] == 'D' && envp[2] == '_')
3475 for (i = ld_index - 1; i >= 0; --i) {
3476 if (strncmp(ld_ary[i], id, idlen) == 0 && ld_ary[i][idlen] == '=')
3477 return(ld_ary[i] + idlen + 1);
3483 * Given a symbol name in a referencing object, find the corresponding
3484 * definition of the symbol. Returns a pointer to the symbol, or NULL if
3485 * no definition was found. Returns a pointer to the Obj_Entry of the
3486 * defining object via the reference parameter DEFOBJ_OUT.
3489 symlook_default(SymLook *req, const Obj_Entry *refobj)
3492 const Objlist_Entry *elm;
3496 donelist_init(&donelist);
3497 symlook_init_from_req(&req1, req);
3499 /* Look first in the referencing object if linked symbolically. */
3500 if (refobj->symbolic && !donelist_check(&donelist, refobj)) {
3501 res = symlook_obj(&req1, refobj);
3503 req->sym_out = req1.sym_out;
3504 req->defobj_out = req1.defobj_out;
3505 assert(req->defobj_out != NULL);
3509 symlook_global(req, &donelist);
3511 /* Search all dlopened DAGs containing the referencing object. */
3512 STAILQ_FOREACH(elm, &refobj->dldags, link) {
3513 if (req->sym_out != NULL &&
3514 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3516 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
3517 if (res == 0 && (req->sym_out == NULL ||
3518 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3519 req->sym_out = req1.sym_out;
3520 req->defobj_out = req1.defobj_out;
3521 assert(req->defobj_out != NULL);
3526 * Search the dynamic linker itself, and possibly resolve the
3527 * symbol from there. This is how the application links to
3528 * dynamic linker services such as dlopen. Only the values listed
3529 * in the "exports" array can be resolved from the dynamic linker.
3531 if (req->sym_out == NULL ||
3532 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
3533 res = symlook_obj(&req1, &obj_rtld);
3534 if (res == 0 && is_exported(req1.sym_out)) {
3535 req->sym_out = req1.sym_out;
3536 req->defobj_out = req1.defobj_out;
3537 assert(req->defobj_out != NULL);
3541 return (req->sym_out != NULL ? 0 : ESRCH);
3545 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
3548 const Obj_Entry *defobj;
3549 const Objlist_Entry *elm;
3555 STAILQ_FOREACH(elm, objlist, link) {
3556 if (donelist_check(dlp, elm->obj))
3558 symlook_init_from_req(&req1, req);
3559 if ((res = symlook_obj(&req1, elm->obj)) == 0) {
3560 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
3562 defobj = req1.defobj_out;
3563 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3570 req->defobj_out = defobj;
3577 * Search the chain of DAGS cointed to by the given Needed_Entry
3578 * for a symbol of the given name. Each DAG is scanned completely
3579 * before advancing to the next one. Returns a pointer to the symbol,
3580 * or NULL if no definition was found.
3583 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
3586 const Needed_Entry *n;
3587 const Obj_Entry *defobj;
3593 symlook_init_from_req(&req1, req);
3594 for (n = needed; n != NULL; n = n->next) {
3595 if (n->obj == NULL ||
3596 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
3598 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
3600 defobj = req1.defobj_out;
3601 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3607 req->defobj_out = defobj;
3614 * Search the symbol table of a single shared object for a symbol of
3615 * the given name and version, if requested. Returns a pointer to the
3616 * symbol, or NULL if no definition was found. If the object is
3617 * filter, return filtered symbol from filtee.
3619 * The symbol's hash value is passed in for efficiency reasons; that
3620 * eliminates many recomputations of the hash value.
3623 symlook_obj(SymLook *req, const Obj_Entry *obj)
3630 * There is at least one valid hash at this point, and we prefer to use
3631 * the faster GNU version if available.
3633 if (obj->valid_hash_gnu)
3634 mres = symlook_obj2(req, obj);
3636 mres = symlook_obj1(req, obj);
3639 if (obj->needed_filtees != NULL) {
3640 load_filtees(__DECONST(Obj_Entry *, obj), 0, req->lockstate);
3641 donelist_init(&donelist);
3642 symlook_init_from_req(&req1, req);
3643 res = symlook_needed(&req1, obj->needed_filtees, &donelist);
3645 req->sym_out = req1.sym_out;
3646 req->defobj_out = req1.defobj_out;
3650 if (obj->needed_aux_filtees != NULL) {
3651 load_filtees(__DECONST(Obj_Entry *, obj), 0, req->lockstate);
3652 donelist_init(&donelist);
3653 symlook_init_from_req(&req1, req);
3654 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
3656 req->sym_out = req1.sym_out;
3657 req->defobj_out = req1.defobj_out;
3665 /* Symbol match routine common to both hash functions */
3667 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
3668 const unsigned long symnum)
3671 const Elf_Sym *symp = obj->symtab + symnum;
3672 const char *strp = obj->strtab + symp->st_name;
3674 switch (ELF_ST_TYPE(symp->st_info)) {
3680 if (symp->st_value == 0)
3684 if (symp->st_shndx != SHN_UNDEF)
3686 else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
3687 (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
3693 if (strcmp(req->name, strp) != 0)
3696 if (req->ventry == NULL) {
3697 if (obj->versyms != NULL) {
3698 verndx = VER_NDX(obj->versyms[symnum]);
3699 if (verndx > obj->vernum) {
3700 _rtld_error("%s: symbol %s references wrong version %d",
3701 obj->path, obj->strtab + symnum, verndx);
3705 * If we are not called from dlsym (i.e. this is a normal relocation
3706 * from unversioned binary), accept the symbol immediately if it happens
3707 * to have first version after this shared object became versioned.
3708 * Otherwise, if symbol is versioned and not hidden, remember it. If it
3709 * is the only symbol with this name exported by the shared object, it
3710 * will be returned as a match by the calling function. If symbol is
3711 * global (verndx < 2) accept it unconditionally.
3713 if ((req->flags & SYMLOOK_DLSYM) == 0 && verndx == VER_NDX_GIVEN) {
3714 result->sym_out = symp;
3717 else if (verndx >= VER_NDX_GIVEN) {
3718 if ((obj->versyms[symnum] & VER_NDX_HIDDEN) == 0) {
3719 if (result->vsymp == NULL)
3720 result->vsymp = symp;
3726 result->sym_out = symp;
3729 if (obj->versyms == NULL) {
3730 if (object_match_name(obj, req->ventry->name)) {
3731 _rtld_error("%s: object %s should provide version %s for "
3732 "symbol %s", obj_rtld.path, obj->path,
3733 req->ventry->name, obj->strtab + symnum);
3737 verndx = VER_NDX(obj->versyms[symnum]);
3738 if (verndx > obj->vernum) {
3739 _rtld_error("%s: symbol %s references wrong version %d",
3740 obj->path, obj->strtab + symnum, verndx);
3743 if (obj->vertab[verndx].hash != req->ventry->hash ||
3744 strcmp(obj->vertab[verndx].name, req->ventry->name)) {
3746 * Version does not match. Look if this is a global symbol and if it is
3747 * not hidden. If global symbol (verndx < 2) is available, use it. Do not
3748 * return symbol if we are called by dlvsym, because dlvsym looks for a
3749 * specific version and default one is not what dlvsym wants.
3751 if ((req->flags & SYMLOOK_DLSYM) || (verndx >= VER_NDX_GIVEN) ||
3752 (obj->versyms[symnum] & VER_NDX_HIDDEN))
3756 result->sym_out = symp;
3761 * Search for symbol using SysV hash function.
3762 * obj->buckets is known not to be NULL at this point; the test for this was
3763 * performed with the obj->valid_hash_sysv assignment.
3766 symlook_obj1(SymLook *req, const Obj_Entry *obj)
3768 unsigned long symnum;
3769 Sym_Match_Result matchres;
3771 matchres.sym_out = NULL;
3772 matchres.vsymp = NULL;
3773 matchres.vcount = 0;
3775 for (symnum = obj->buckets[req->hash % obj->nbuckets];
3776 symnum != STN_UNDEF;
3777 symnum = obj->chains[symnum]) {
3779 if (symnum >= obj->nchains)
3780 return (ESRCH); /* Bad object */
3782 if (matched_symbol(req, obj, &matchres, symnum)) {
3783 req->sym_out = matchres.sym_out;
3784 req->defobj_out = obj;
3788 if (matchres.vcount == 1) {
3789 req->sym_out = matchres.vsymp;
3790 req->defobj_out = obj;
3796 /* Search for symbol using GNU hash function */
3798 symlook_obj2(SymLook *req, const Obj_Entry *obj)
3800 Elf_Addr bloom_word;
3802 unsigned int h1, h2;
3803 unsigned long symnum;
3804 const int c = __ELF_WORD_SIZE;
3805 Sym_Match_Result matchres;
3807 matchres.sym_out = NULL;
3808 matchres.vsymp = NULL;
3809 matchres.vcount = 0;
3811 /* pick right bitmask word from Bloom filter array*/
3812 bloom_word = obj->bloom_gnu[(req->hash_gnu / c) & obj->maskwords_bm_gnu];
3814 /* calculate modulus 32 (64 for x86_64) of gnu hash and its derivative */
3815 h1 = req->hash_gnu & (c - 1);
3816 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (c - 1));
3818 /* Filter out the "definitely not in set" queries */
3819 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
3822 /* Locate hash chain and corresponding value element*/
3823 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
3826 const Elf32_Word *hashval = &obj->chain_zero_gnu[bucket];
3828 if (((*hashval ^ req->hash_gnu) >> 1) == 0)
3830 symnum = hashval - obj->chain_zero_gnu;
3831 if (matched_symbol(req, obj, &matchres, symnum)) {
3832 req->sym_out = matchres.sym_out;
3833 req->defobj_out = obj;
3837 while ((*hashval++ & 1u) == 0);
3838 if (matchres.vcount == 1) {
3839 req->sym_out = matchres.vsymp;
3840 req->defobj_out = obj;
3847 trace_loaded_objects(Obj_Entry *obj)
3849 const char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
3852 if ((main_local = _getenv_ld("LD_TRACE_LOADED_OBJECTS_PROGNAME")) == NULL)
3855 if ((fmt1 = _getenv_ld("LD_TRACE_LOADED_OBJECTS_FMT1")) == NULL)
3856 fmt1 = "\t%o => %p (%x)\n";
3858 if ((fmt2 = _getenv_ld("LD_TRACE_LOADED_OBJECTS_FMT2")) == NULL)
3859 fmt2 = "\t%o (%x)\n";
3861 list_containers = _getenv_ld("LD_TRACE_LOADED_OBJECTS_ALL");
3863 for (; obj; obj = obj->next) {
3864 Needed_Entry *needed;
3868 if (list_containers && obj->needed != NULL)
3869 rtld_printf("%s:\n", obj->path);
3870 for (needed = obj->needed; needed; needed = needed->next) {
3871 if (needed->obj != NULL) {
3872 if (needed->obj->traced && !list_containers)
3874 needed->obj->traced = true;
3875 path = needed->obj->path;
3879 name = (char *)obj->strtab + needed->name;
3880 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
3882 fmt = is_lib ? fmt1 : fmt2;
3883 while ((c = *fmt++) != '\0') {
3909 rtld_putstr(main_local);
3912 rtld_putstr(obj_main->path);
3921 rtld_printf("%p", needed->obj ? needed->obj->mapbase :
3934 * Unload a dlopened object and its dependencies from memory and from
3935 * our data structures. It is assumed that the DAG rooted in the
3936 * object has already been unreferenced, and that the object has a
3937 * reference count of 0.
3940 unload_object(Obj_Entry *root)
3945 assert(root->refcount == 0);
3948 * Pass over the DAG removing unreferenced objects from
3949 * appropriate lists.
3951 unlink_object(root);
3953 /* Unmap all objects that are no longer referenced. */
3954 linkp = &obj_list->next;
3955 while ((obj = *linkp) != NULL) {
3956 if (obj->refcount == 0) {
3957 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
3959 dbg("unloading \"%s\"", obj->path);
3960 unload_filtees(root);
3961 munmap(obj->mapbase, obj->mapsize);
3962 linkmap_delete(obj);
3973 unlink_object(Obj_Entry *root)
3977 if (root->refcount == 0) {
3978 /* Remove the object from the RTLD_GLOBAL list. */
3979 objlist_remove(&list_global, root);
3981 /* Remove the object from all objects' DAG lists. */
3982 STAILQ_FOREACH(elm, &root->dagmembers, link) {
3983 objlist_remove(&elm->obj->dldags, root);
3984 if (elm->obj != root)
3985 unlink_object(elm->obj);
3991 ref_dag(Obj_Entry *root)
3995 assert(root->dag_inited);
3996 STAILQ_FOREACH(elm, &root->dagmembers, link)
3997 elm->obj->refcount++;
4001 unref_dag(Obj_Entry *root)
4005 assert(root->dag_inited);
4006 STAILQ_FOREACH(elm, &root->dagmembers, link)
4007 elm->obj->refcount--;
4011 * Common code for MD __tls_get_addr().
4014 tls_get_addr_common(Elf_Addr** dtvp, int index, size_t offset)
4016 Elf_Addr* dtv = *dtvp;
4017 RtldLockState lockstate;
4019 /* Check dtv generation in case new modules have arrived */
4020 if (dtv[0] != tls_dtv_generation) {
4024 wlock_acquire(rtld_bind_lock, &lockstate);
4025 newdtv = calloc(1, (tls_max_index + 2) * sizeof(Elf_Addr));
4027 if (to_copy > tls_max_index)
4028 to_copy = tls_max_index;
4029 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
4030 newdtv[0] = tls_dtv_generation;
4031 newdtv[1] = tls_max_index;
4033 lock_release(rtld_bind_lock, &lockstate);
4034 dtv = *dtvp = newdtv;
4037 /* Dynamically allocate module TLS if necessary */
4038 if (!dtv[index + 1]) {
4039 /* Signal safe, wlock will block out signals. */
4040 wlock_acquire(rtld_bind_lock, &lockstate);
4041 if (!dtv[index + 1])
4042 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
4043 lock_release(rtld_bind_lock, &lockstate);
4045 return (void*) (dtv[index + 1] + offset);
4048 #if defined(RTLD_STATIC_TLS_VARIANT_II)
4051 * Allocate the static TLS area. Return a pointer to the TCB. The
4052 * static area is based on negative offsets relative to the tcb.
4054 * The TCB contains an errno pointer for the system call layer, but because
4055 * we are the RTLD we really have no idea how the caller was compiled so
4056 * the information has to be passed in. errno can either be:
4058 * type 0 errno is a simple non-TLS global pointer.
4059 * (special case for e.g. libc_rtld)
4060 * type 1 errno accessed by GOT entry (dynamically linked programs)
4061 * type 2 errno accessed by %gs:OFFSET (statically linked programs)
4064 allocate_tls(Obj_Entry *objs)
4069 struct tls_tcb *tcb;
4074 * Allocate the new TCB. static TLS storage is placed just before the
4075 * TCB to support the %gs:OFFSET (negative offset) model.
4077 data_size = (tls_static_space + RTLD_STATIC_TLS_ALIGN_MASK) &
4078 ~RTLD_STATIC_TLS_ALIGN_MASK;
4079 tcb = malloc(data_size + sizeof(*tcb));
4080 tcb = (void *)((char *)tcb + data_size); /* actual tcb location */
4082 dtv_size = (tls_max_index + 2) * sizeof(Elf_Addr);
4083 dtv = malloc(dtv_size);
4084 bzero(dtv, dtv_size);
4086 #ifdef RTLD_TCB_HAS_SELF_POINTER
4087 tcb->tcb_self = tcb;
4090 tcb->tcb_pthread = NULL;
4092 dtv[0] = tls_dtv_generation;
4093 dtv[1] = tls_max_index;
4095 for (obj = objs; obj; obj = obj->next) {
4096 if (obj->tlsoffset) {
4097 addr = (Elf_Addr)tcb - obj->tlsoffset;
4098 memset((void *)(addr + obj->tlsinitsize),
4099 0, obj->tlssize - obj->tlsinitsize);
4101 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4102 dtv[obj->tlsindex + 1] = addr;
4109 free_tls(struct tls_tcb *tcb)
4113 Elf_Addr tls_start, tls_end;
4116 data_size = (tls_static_space + RTLD_STATIC_TLS_ALIGN_MASK) &
4117 ~RTLD_STATIC_TLS_ALIGN_MASK;
4121 tls_end = (Elf_Addr)tcb;
4122 tls_start = (Elf_Addr)tcb - data_size;
4123 for (i = 0; i < dtv_size; i++) {
4124 if (dtv[i+2] != 0 && (dtv[i+2] < tls_start || dtv[i+2] > tls_end)) {
4125 free((void *)dtv[i+2]);
4129 free((void*) tls_start);
4133 #error "Unsupported TLS layout"
4137 * Allocate TLS block for module with given index.
4140 allocate_module_tls(int index)
4145 for (obj = obj_list; obj; obj = obj->next) {
4146 if (obj->tlsindex == index)
4150 _rtld_error("Can't find module with TLS index %d", index);
4154 p = malloc(obj->tlssize);
4156 _rtld_error("Cannot allocate TLS block for index %d", index);
4159 memcpy(p, obj->tlsinit, obj->tlsinitsize);
4160 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
4166 allocate_tls_offset(Obj_Entry *obj)
4173 if (obj->tlssize == 0) {
4174 obj->tls_done = true;
4178 if (obj->tlsindex == 1)
4179 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
4181 off = calculate_tls_offset(tls_last_offset, tls_last_size,
4182 obj->tlssize, obj->tlsalign);
4185 * If we have already fixed the size of the static TLS block, we
4186 * must stay within that size. When allocating the static TLS, we
4187 * leave a small amount of space spare to be used for dynamically
4188 * loading modules which use static TLS.
4190 if (tls_static_space) {
4191 if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
4195 tls_last_offset = obj->tlsoffset = off;
4196 tls_last_size = obj->tlssize;
4197 obj->tls_done = true;
4203 free_tls_offset(Obj_Entry *obj)
4205 #ifdef RTLD_STATIC_TLS_VARIANT_II
4207 * If we were the last thing to allocate out of the static TLS
4208 * block, we give our space back to the 'allocator'. This is a
4209 * simplistic workaround to allow libGL.so.1 to be loaded and
4210 * unloaded multiple times. We only handle the Variant II
4211 * mechanism for now - this really needs a proper allocator.
4213 if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
4214 == calculate_tls_end(tls_last_offset, tls_last_size)) {
4215 tls_last_offset -= obj->tlssize;
4222 _rtld_allocate_tls(void)
4224 struct tls_tcb *new_tcb;
4225 RtldLockState lockstate;
4227 wlock_acquire(rtld_bind_lock, &lockstate);
4228 new_tcb = allocate_tls(obj_list);
4229 lock_release(rtld_bind_lock, &lockstate);
4234 _rtld_free_tls(struct tls_tcb *tcb)
4236 RtldLockState lockstate;
4238 wlock_acquire(rtld_bind_lock, &lockstate);
4240 lock_release(rtld_bind_lock, &lockstate);
4244 object_add_name(Obj_Entry *obj, const char *name)
4250 entry = malloc(sizeof(Name_Entry) + len);
4252 if (entry != NULL) {
4253 strcpy(entry->name, name);
4254 STAILQ_INSERT_TAIL(&obj->names, entry, link);
4259 object_match_name(const Obj_Entry *obj, const char *name)
4263 STAILQ_FOREACH(entry, &obj->names, link) {
4264 if (strcmp(name, entry->name) == 0)
4271 locate_dependency(const Obj_Entry *obj, const char *name)
4273 const Objlist_Entry *entry;
4274 const Needed_Entry *needed;
4276 STAILQ_FOREACH(entry, &list_main, link) {
4277 if (object_match_name(entry->obj, name))
4281 for (needed = obj->needed; needed != NULL; needed = needed->next) {
4282 if (strcmp(obj->strtab + needed->name, name) == 0 ||
4283 (needed->obj != NULL && object_match_name(needed->obj, name))) {
4285 * If there is DT_NEEDED for the name we are looking for,
4286 * we are all set. Note that object might not be found if
4287 * dependency was not loaded yet, so the function can
4288 * return NULL here. This is expected and handled
4289 * properly by the caller.
4291 return (needed->obj);
4294 _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
4300 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
4301 const Elf_Vernaux *vna)
4303 const Elf_Verdef *vd;
4304 const char *vername;
4306 vername = refobj->strtab + vna->vna_name;
4307 vd = depobj->verdef;
4309 _rtld_error("%s: version %s required by %s not defined",
4310 depobj->path, vername, refobj->path);
4314 if (vd->vd_version != VER_DEF_CURRENT) {
4315 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
4316 depobj->path, vd->vd_version);
4319 if (vna->vna_hash == vd->vd_hash) {
4320 const Elf_Verdaux *aux = (const Elf_Verdaux *)
4321 ((char *)vd + vd->vd_aux);
4322 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
4325 if (vd->vd_next == 0)
4327 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4329 if (vna->vna_flags & VER_FLG_WEAK)
4331 _rtld_error("%s: version %s required by %s not found",
4332 depobj->path, vername, refobj->path);
4337 rtld_verify_object_versions(Obj_Entry *obj)
4339 const Elf_Verneed *vn;
4340 const Elf_Verdef *vd;
4341 const Elf_Verdaux *vda;
4342 const Elf_Vernaux *vna;
4343 const Obj_Entry *depobj;
4344 int maxvernum, vernum;
4348 * Walk over defined and required version records and figure out
4349 * max index used by any of them. Do very basic sanity checking
4353 while (vn != NULL) {
4354 if (vn->vn_version != VER_NEED_CURRENT) {
4355 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
4356 obj->path, vn->vn_version);
4359 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
4361 vernum = VER_NEED_IDX(vna->vna_other);
4362 if (vernum > maxvernum)
4364 if (vna->vna_next == 0)
4366 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
4368 if (vn->vn_next == 0)
4370 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
4374 while (vd != NULL) {
4375 if (vd->vd_version != VER_DEF_CURRENT) {
4376 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
4377 obj->path, vd->vd_version);
4380 vernum = VER_DEF_IDX(vd->vd_ndx);
4381 if (vernum > maxvernum)
4383 if (vd->vd_next == 0)
4385 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4392 * Store version information in array indexable by version index.
4393 * Verify that object version requirements are satisfied along the
4396 obj->vernum = maxvernum + 1;
4397 obj->vertab = calloc(obj->vernum, sizeof(Ver_Entry));
4400 while (vd != NULL) {
4401 if ((vd->vd_flags & VER_FLG_BASE) == 0) {
4402 vernum = VER_DEF_IDX(vd->vd_ndx);
4403 assert(vernum <= maxvernum);
4404 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux);
4405 obj->vertab[vernum].hash = vd->vd_hash;
4406 obj->vertab[vernum].name = obj->strtab + vda->vda_name;
4407 obj->vertab[vernum].file = NULL;
4408 obj->vertab[vernum].flags = 0;
4410 if (vd->vd_next == 0)
4412 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4416 while (vn != NULL) {
4417 depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
4420 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
4422 if (check_object_provided_version(obj, depobj, vna))
4424 vernum = VER_NEED_IDX(vna->vna_other);
4425 assert(vernum <= maxvernum);
4426 obj->vertab[vernum].hash = vna->vna_hash;
4427 obj->vertab[vernum].name = obj->strtab + vna->vna_name;
4428 obj->vertab[vernum].file = obj->strtab + vn->vn_file;
4429 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
4430 VER_INFO_HIDDEN : 0;
4431 if (vna->vna_next == 0)
4433 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
4435 if (vn->vn_next == 0)
4437 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
4443 rtld_verify_versions(const Objlist *objlist)
4445 Objlist_Entry *entry;
4449 STAILQ_FOREACH(entry, objlist, link) {
4451 * Skip dummy objects or objects that have their version requirements
4454 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
4456 if (rtld_verify_object_versions(entry->obj) == -1) {
4458 if (ld_tracing == NULL)
4462 if (rc == 0 || ld_tracing != NULL)
4463 rc = rtld_verify_object_versions(&obj_rtld);
4468 fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
4473 vernum = VER_NDX(obj->versyms[symnum]);
4474 if (vernum >= obj->vernum) {
4475 _rtld_error("%s: symbol %s has wrong verneed value %d",
4476 obj->path, obj->strtab + symnum, vernum);
4477 } else if (obj->vertab[vernum].hash != 0) {
4478 return &obj->vertab[vernum];
4485 _rtld_get_stack_prot(void)
4488 return (stack_prot);
4492 map_stacks_exec(RtldLockState *lockstate)
4496 * Stack protection must be implemented in the kernel before the dynamic
4497 * linker can handle PT_GNU_STACK sections.
4498 * The following is the FreeBSD implementation of map_stacks_exec()
4499 * void (*thr_map_stacks_exec)(void);
4501 * if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
4503 * thr_map_stacks_exec = (void (*)(void))(uintptr_t)
4504 * get_program_var_addr("__pthread_map_stacks_exec", lockstate);
4505 * if (thr_map_stacks_exec != NULL) {
4506 * stack_prot |= PROT_EXEC;
4507 * thr_map_stacks_exec();
4513 symlook_init(SymLook *dst, const char *name)
4516 bzero(dst, sizeof(*dst));
4518 dst->hash = elf_hash(name);
4519 dst->hash_gnu = gnu_hash(name);
4523 symlook_init_from_req(SymLook *dst, const SymLook *src)
4526 dst->name = src->name;
4527 dst->hash = src->hash;
4528 dst->hash_gnu = src->hash_gnu;
4529 dst->ventry = src->ventry;
4530 dst->flags = src->flags;
4531 dst->defobj_out = NULL;
4532 dst->sym_out = NULL;
4533 dst->lockstate = src->lockstate;
4536 #ifdef ENABLE_OSRELDATE
4538 * Overrides for libc_pic-provided functions.
4542 __getosreldate(void)
4552 oid[1] = KERN_OSRELDATE;
4554 len = sizeof(osrel);
4555 error = sysctl(oid, 2, &osrel, &len, NULL, 0);
4556 if (error == 0 && osrel > 0 && len == sizeof(osrel))
4563 * No unresolved symbols for rtld.
4566 __pthread_cxa_finalize(struct dl_phdr_info *a)
4571 rtld_strerror(int errnum)
4574 if (errnum < 0 || errnum >= sys_nerr)
4575 return ("Unknown error");
4576 return (sys_errlist[errnum]);