2 * Copyright (c) 2005-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 #include <sys/kernel.h>
38 #include <sys/libkern.h>
39 #include <sys/malloc.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/spinlock2.h>
43 #include <sys/limits.h>
45 #define IDR_DEFAULT_SIZE 32
47 MALLOC_DEFINE(M_IDR, "idr", "Integer ID management");
49 static void idr_grow(struct idr *idp, int want);
50 static void idr_reserve(struct idr *idp, int id, int incr);
51 int idr_alloc(struct idr *idp, int want, int lim, int *result);
52 void idr_set(struct idr *idp, int id, void *ptr);
53 int idr_find_free(struct idr *idp, int want, int lim);
54 int idr_pre_get1(struct idr *idp, int want, int lim);
57 * Number of nodes in right subtree, including the root.
60 right_subtree_size(int n)
62 return (n ^ (n | (n + 1)));
80 return ((n & (n + 1)) - 1);
85 idrfixup(struct idr *idp, int id)
87 if (id < idp->idr_freeindex) {
88 idp->idr_freeindex = id;
90 while (idp->idr_lastindex >= 0 &&
91 idp->idr_nodes[idp->idr_lastindex].data == NULL &&
92 idp->idr_nodes[idp->idr_lastindex].reserved == 0
100 idr_get_node(struct idr *idp, int id)
102 struct idr_node *idrnp;
103 if (id >= idp->idr_count)
105 idrnp = &idp->idr_nodes[id];
106 if (idrnp->allocated == 0)
112 idr_reserve(struct idr *idp, int id, int incr)
115 idp->idr_nodes[id].allocated += incr;
116 KKASSERT(idp->idr_nodes[id].allocated >= 0);
117 id = left_ancestor(id);
122 idr_find_free(struct idr *idp, int want, int lim)
124 int id, rsum, rsize, node;
126 * Search for a free descriptor starting at the higher
127 * of want or fd_freefile. If that fails, consider
128 * expanding the ofile array.
130 * NOTE! the 'allocated' field is a cumulative recursive allocation
131 * count. If we happen to see a value of 0 then we can shortcut
132 * our search. Otherwise we run through through the tree going
133 * down branches we know have free descriptor(s) until we hit a
134 * leaf node. The leaf node will be free but will not necessarily
135 * have an allocated field of 0.
138 /* move up the tree looking for a subtree with a free node */
139 for (id = max(want, idp->idr_freeindex); id < min(idp->idr_count, lim);
140 id = right_ancestor(id)) {
141 if (idp->idr_nodes[id].allocated == 0)
144 rsize = right_subtree_size(id);
145 if (idp->idr_nodes[id].allocated == rsize)
146 continue; /* right subtree full */
149 * Free fd is in the right subtree of the tree rooted at fd.
150 * Call that subtree R. Look for the smallest (leftmost)
151 * subtree of R with an unallocated fd: continue moving
152 * down the left branch until encountering a full left
153 * subtree, then move to the right.
155 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
157 rsum += idp->idr_nodes[node].allocated;
158 if (idp->idr_nodes[id].allocated == rsum + rsize) {
159 id = node; /* move to the right */
160 if (idp->idr_nodes[node].allocated == 0)
171 idr_pre_get1(struct idr *idp, int want, int lim)
175 spin_lock(&idp->idr_spin);
176 if (want >= idp->idr_count)
180 id = idr_find_free(idp, want, lim);
185 * No space in current array. Expand?
187 if (idp->idr_count >= lim) {
188 spin_unlock(&idp->idr_spin);
195 spin_unlock(&idp->idr_spin);
200 idr_pre_get(struct idr *idp)
202 int error = idr_pre_get1(idp, idp->idr_maxwant, INT_MAX);
207 idr_get_new(struct idr *idp, void *ptr, int *id)
214 resid = idr_find_free(idp, 0, INT_MAX);
218 if (resid > idp->idr_lastindex)
219 idp->idr_lastindex = resid;
220 idp->idr_freeindex = resid;
222 KKASSERT(idp->idr_nodes[resid].reserved == 0);
223 idp->idr_nodes[resid].reserved = 1;
224 idr_reserve(idp, resid, 1);
225 idr_set(idp, resid, ptr);
230 idr_get_new_above(struct idr *idp, void *ptr, int sid, int *id)
233 if (sid >= idp->idr_count) {
234 idp->idr_maxwant = max(idp->idr_maxwant, sid);
241 resid = idr_find_free(idp, sid, INT_MAX);
245 if (resid > idp->idr_lastindex)
246 idp->idr_lastindex = resid;
247 if (sid <= idp->idr_freeindex)
248 idp->idr_freeindex = resid;
250 KKASSERT(idp->idr_nodes[resid].reserved == 0);
251 idp->idr_nodes[resid].reserved = 1;
252 idr_reserve(idp, resid, 1);
253 idr_set(idp, resid, ptr);
258 * Grow the file table so it can hold through descriptor (want).
260 * The fdp's spinlock must be held exclusively on entry and may be held
261 * exclusively on return. The spinlock may be cycled by the routine.
266 idr_grow(struct idr *idp, int want)
268 struct idr_node *newnodes;
269 struct idr_node *oldnodes;
274 /* nf has to be of the form 2^n - 1 */
276 } while (nf <= want);
278 spin_unlock(&idp->idr_spin);
279 newnodes = kmalloc(nf * sizeof(struct idr_node), M_IDR, M_WAITOK);
280 spin_lock(&idp->idr_spin);
283 * We could have raced another extend while we were not holding
286 if (idp->idr_count >= nf) {
287 spin_unlock(&idp->idr_spin);
288 kfree(newnodes, M_IDR);
289 spin_lock(&idp->idr_spin);
293 * Copy the existing ofile and ofileflags arrays
294 * and zero the new portion of each array.
296 extra = nf - idp->idr_count;
297 if (idp->idr_nodes != NULL)
298 bcopy(idp->idr_nodes, newnodes, idp->idr_count * sizeof(struct idr_node));
299 bzero(&newnodes[idp->idr_count], extra * sizeof(struct idr_node));
301 oldnodes = idp->idr_nodes;
302 idp->idr_nodes = newnodes;
305 if (oldnodes != NULL) {
306 spin_unlock(&idp->idr_spin);
307 kfree(oldnodes, M_IDR);
308 spin_lock(&idp->idr_spin);
315 idr_remove(struct idr *idp, int id)
319 if (id >= idp->idr_count)
321 if ((ptr = idp->idr_nodes[id].data) == NULL)
323 idp->idr_nodes[id].data = NULL;
325 idr_reserve(idp, id, -1);
330 idr_remove_all(struct idr *idp)
332 kfree(idp->idr_nodes, M_IDR);
333 idp->idr_nodes = kmalloc(idp->idr_count * sizeof *idp, M_IDR, M_WAITOK | M_ZERO);
334 idp->idr_lastindex = -1;
335 idp->idr_freeindex = 0;
336 idp->idr_nexpands = 0;
337 idp->idr_maxwant = 0;
338 spin_init(&idp->idr_spin);
342 idr_destroy(struct idr *idp)
344 kfree(idp->idr_nodes, M_IDR);
345 memset(idp, 0, sizeof(struct idr));
349 idr_find(struct idr *idp, int id)
351 if (id > idp->idr_count) {
353 } else if (idp->idr_nodes[id].allocated == 0) {
356 KKASSERT(idp->idr_nodes[id].data != NULL);
357 return idp->idr_nodes[id].data;
361 idr_set(struct idr *idp, int id, void *ptr)
363 KKASSERT(id < idp->idr_count);
364 KKASSERT(idp->idr_nodes[id].reserved != 0);
366 idp->idr_nodes[id].data = ptr;
367 idp->idr_nodes[id].reserved = 0;
369 idp->idr_nodes[id].reserved = 0;
370 idr_reserve(idp, id, -1);
376 idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data)
379 struct idr_node *nodes = idp->idr_nodes;
380 for (i = 0; i < idp->idr_count; i++) {
381 if (nodes[i].data != NULL && nodes[i].allocated > 0) {
382 error = fn(i, nodes[i].data, data);
391 idr_replace(struct idr *idp, void *ptr, int id)
393 struct idr_node *idrnp;
396 idrnp = idr_get_node(idp, id);
398 if (idrnp == NULL || ptr == NULL)
408 idr_init1(struct idr *idp, int size)
410 memset(idp, 0, sizeof(struct idr));
411 idp->idr_nodes = kmalloc(size * sizeof *idp, M_IDR, M_WAITOK | M_ZERO);
412 idp->idr_count = size;
413 idp->idr_lastindex = -1;
414 idp->idr_maxwant = 0;
415 spin_init(&idp->idr_spin);
419 idr_init(struct idr *idp)
421 idr_init1(idp, IDR_DEFAULT_SIZE);