2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * $FreeBSD: src/lib/libpthread/thread/thr_stack.c,v 1.9 2004/10/06 08:11:07 davidxu Exp $
29 #include <sys/types.h>
31 #include <sys/queue.h>
33 #include <sys/resource.h>
34 #include <sys/sysctl.h>
35 #include <machine/tls.h>
36 #include <machine/vmparam.h>
39 #include "thr_private.h"
41 /* Spare thread stack. */
43 LIST_ENTRY(stack) qe; /* Stack queue linkage. */
44 size_t stacksize; /* Stack size (rounded up). */
45 size_t guardsize; /* Guard size. */
46 void *stackaddr; /* Stack address. */
50 * Default sized (stack and guard) spare stack queue. Stacks are cached
51 * to avoid additional complexity managing mmap()ed stack regions. Spare
52 * stacks are used in LIFO order to increase cache locality.
54 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
57 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
58 * Stacks are cached to avoid additional complexity managing mmap()ed
59 * stack regions. This list is unordered, since ordering on both stack
60 * size and guard size would be more trouble than it's worth. Stacks are
61 * allocated from this cache on a first size match basis.
63 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
66 * Thread stack base for mmap() hint, starts
67 * at _usrstack - kern.maxssiz - kern.maxthrssiz
69 static char *base_stack = NULL;
72 * Round size up to the nearest multiple of
78 if (size % _thr_page_size != 0)
79 size = ((size / _thr_page_size) + 1) *
85 _thr_stack_alloc(struct pthread_attr *attr)
87 struct pthread *curthread = tls_get_curthread();
88 struct stack *spare_stack;
94 * Round up stack size to nearest multiple of _thr_page_size so
95 * that mmap() * will work. If the stack size is not an even
96 * multiple, we end up initializing things such that there is
97 * unused space above the beginning of the stack, so the stack
98 * sits snugly against its guard.
100 stacksize = round_up(attr->stacksize_attr);
101 guardsize = round_up(attr->guardsize_attr);
103 attr->stackaddr_attr = NULL;
104 attr->flags &= ~THR_STACK_USER;
107 * Use the garbage collector lock for synchronization of the
108 * spare stack lists and allocations from usrstack.
110 THREAD_LIST_LOCK(curthread);
112 * If the stack and guard sizes are default, try to allocate a stack
113 * from the default-size stack cache:
115 if ((stacksize == THR_STACK_DEFAULT) &&
116 (guardsize == _thr_guard_default)) {
117 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
118 /* Use the spare stack. */
119 LIST_REMOVE(spare_stack, qe);
120 attr->stackaddr_attr = spare_stack->stackaddr;
124 * The user specified a non-default stack and/or guard size, so try to
125 * allocate a stack from the non-default size stack cache, using the
126 * rounded up stack size (stack_size) in the search:
129 LIST_FOREACH(spare_stack, &mstackq, qe) {
130 if (spare_stack->stacksize == stacksize &&
131 spare_stack->guardsize == guardsize) {
132 LIST_REMOVE(spare_stack, qe);
133 attr->stackaddr_attr = spare_stack->stackaddr;
138 if (attr->stackaddr_attr != NULL) {
139 /* A cached stack was found. Release the lock. */
140 THREAD_LIST_UNLOCK(curthread);
143 * Calculate base_stack on first use (race ok).
146 if (base_stack == NULL) {
152 if (getrlimit(RLIMIT_STACK, &rl) == 0)
153 maxssiz = rl.rlim_max;
156 len = sizeof(maxssiz);
157 sysctlbyname("kern.maxssiz", &maxssiz, &len, NULL, 0);
158 len = sizeof(maxthrssiz);
159 if (sysctlbyname("kern.maxthrssiz",
160 &maxthrssiz, &len, NULL, 0) < 0) {
161 maxthrssiz = MAXTHRSSIZ;
163 base_stack = _usrstack - maxssiz - maxthrssiz;
166 /* Release the lock before mmap'ing it. */
167 THREAD_LIST_UNLOCK(curthread);
170 * Map the stack and guard page together then split the
171 * guard page from allocated space.
173 * We no longer use MAP_STACK and we define an area far
174 * away from the default user stack (even though this will
175 * cost us another few 4K page-table pages). DFly no longer
176 * allows new MAP_STACK mappings to be made inside ungrown
177 * portions of existing mappings.
179 stackaddr = mmap(base_stack, stacksize + guardsize,
180 PROT_READ | PROT_WRITE,
181 MAP_ANON | MAP_PRIVATE, -1, 0);
182 if (stackaddr != MAP_FAILED && guardsize) {
183 if (mmap(stackaddr, guardsize, 0,
184 MAP_ANON | MAP_FIXED, -1, 0) == MAP_FAILED) {
185 munmap(stackaddr, stacksize + guardsize);
186 stackaddr = MAP_FAILED;
188 stackaddr += guardsize;
191 if (stackaddr == MAP_FAILED)
193 attr->stackaddr_attr = stackaddr;
195 if (attr->stackaddr_attr != NULL)
201 /* This function must be called with _thread_list_lock held. */
203 _thr_stack_free(struct pthread_attr *attr)
205 struct stack *spare_stack;
207 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
208 && (attr->stackaddr_attr != NULL)) {
209 spare_stack = (struct stack *)((char *)attr->stackaddr_attr +
210 attr->stacksize_attr - sizeof(struct stack));
211 spare_stack->stacksize = round_up(attr->stacksize_attr);
212 spare_stack->guardsize = round_up(attr->guardsize_attr);
213 spare_stack->stackaddr = attr->stackaddr_attr;
215 if (spare_stack->stacksize == THR_STACK_DEFAULT &&
216 spare_stack->guardsize == _thr_guard_default) {
217 /* Default stack/guard size. */
218 LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
220 /* Non-default stack/guard size. */
221 LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
223 attr->stackaddr_attr = NULL;
228 _thr_stack_cleanup(void)
232 while ((spare = LIST_FIRST(&dstackq)) != NULL) {
233 LIST_REMOVE(spare, qe);
234 munmap(spare->stackaddr,
235 spare->stacksize + spare->guardsize);