2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sysctl.h>
51 #include <sys/vnode.h>
52 #include <sys/thread2.h>
53 #include <machine/limits.h>
55 #include <cpu/lwbuf.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_map.h>
61 SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
62 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
65 * UIO_READ: copy the kernelspace cp to the user or kernelspace UIO
66 * UIO_WRITE: copy the user or kernelspace UIO to the kernelspace cp
68 * For userspace UIO's, uio_td must be the current thread.
70 * The syscall interface is responsible for limiting the length to
71 * ssize_t for things like read() or write() which return the bytes
72 * read or written as ssize_t. These functions work with unsigned
76 uiomove(caddr_t cp, size_t n, struct uio *uio)
78 thread_t td = curthread;
84 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
86 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
90 save = td->td_flags & TDF_DEADLKTREAT;
91 td->td_flags |= TDF_DEADLKTREAT;
94 while (n > 0 && uio->uio_resid) {
105 switch (uio->uio_segflg) {
109 if (uio->uio_rw == UIO_READ)
110 error = copyout(cp, iov->iov_base, cnt);
112 error = copyin(iov->iov_base, cp, cnt);
118 if (uio->uio_rw == UIO_READ)
119 bcopy(cp, iov->iov_base, cnt);
121 bcopy(iov->iov_base, cp, cnt);
126 iov->iov_base = (char *)iov->iov_base + cnt;
128 uio->uio_resid -= cnt;
129 uio->uio_offset += cnt;
134 td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
140 * Like uiomove() but copies zero-fill. Only allowed for UIO_READ,
141 * for obvious reasons.
144 uiomovez(size_t n, struct uio *uio)
150 KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode"));
151 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
154 while (n > 0 && uio->uio_resid) {
165 switch (uio->uio_segflg) {
167 error = copyout(ZeroPage, iov->iov_base, cnt);
172 bzero(iov->iov_base, cnt);
177 iov->iov_base = (char *)iov->iov_base + cnt;
179 uio->uio_resid -= cnt;
180 uio->uio_offset += cnt;
187 * Wrapper for uiomove() that validates the arguments against a known-good
188 * kernel buffer. This function automatically indexes the buffer by
189 * uio_offset and handles all range checking.
192 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
196 offset = (size_t)uio->uio_offset;
197 if ((off_t)offset != uio->uio_offset)
199 if (buflen == 0 || offset >= buflen)
201 return (uiomove((char *)buf + offset, buflen - offset, uio));
205 * Give next character to user as result of read.
208 ureadc(int c, struct uio *uio)
214 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
217 if (iov->iov_len == 0) {
222 switch (uio->uio_segflg) {
225 if (subyte(iov->iov_base, c) < 0)
230 iov_base = iov->iov_base;
232 iov->iov_base = iov_base;
238 iov->iov_base = (char *)iov->iov_base + 1;
246 * General routine to allocate a hash table. Make the hash table size a
247 * power of 2 greater or equal to the number of elements requested, and
248 * store the masking value in *hashmask.
251 hashinit(int elements, struct malloc_type *type, u_long *hashmask)
254 LIST_HEAD(generic, generic) *hashtbl;
258 panic("hashinit: bad elements");
259 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
261 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
262 for (i = 0; i < hashsize; i++)
263 LIST_INIT(&hashtbl[i]);
264 *hashmask = hashsize - 1;
269 hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
271 LIST_HEAD(generic, generic) *hashtbl, *hp;
274 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
275 KASSERT(LIST_EMPTY(hp), ("%s: hash not empty", __func__));
276 kfree(hashtbl, type);
280 * This is a newer version which allocates a hash table of structures.
282 * The returned array will be zero'd. The caller is responsible for
283 * initializing the structures.
286 hashinit_ext(int elements, size_t size, struct malloc_type *type,
293 panic("hashinit: bad elements");
294 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
296 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
297 *hashmask = hashsize - 1;
301 static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
302 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
303 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
304 #define NPRIMES NELEM(primes)
307 * General routine to allocate a prime number sized hash table.
310 phashinit(int elements, struct malloc_type *type, u_long *nentries)
313 LIST_HEAD(generic, generic) *hashtbl;
317 panic("phashinit: bad elements");
318 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
322 hashsize = primes[i];
324 hashsize = primes[i - 1];
325 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
326 for (i = 0; i < hashsize; i++)
327 LIST_INIT(&hashtbl[i]);
328 *nentries = hashsize;
333 * This is a newer version which allocates a hash table of structures
334 * in a prime-number size.
336 * The returned array will be zero'd. The caller is responsible for
337 * initializing the structures.
340 phashinit_ext(int elements, size_t size, struct malloc_type *type,
348 panic("phashinit: bad elements");
349 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
353 hashsize = primes[i];
355 hashsize = primes[i - 1];
356 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
357 *nentries = hashsize;
362 * Copyin an iovec. If the iovec array fits, use the preallocated small
363 * iovec structure. If it is too big, dynamically allocate an iovec array
364 * of sufficient size.
369 iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
370 size_t iov_cnt, size_t *iov_len)
376 if (iov_cnt > UIO_MAXIOV)
378 if (iov_cnt > UIO_SMALLIOV) {
379 *kiov = kmalloc(sizeof(struct iovec) * iov_cnt, M_IOV,
384 error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
387 for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) {
389 * Check for both *iov_len overflows and out of
390 * range iovp->iov_len's. We limit to the
391 * capabilities of signed integers.
393 * GCC4 - overflow check opt requires assign/test.
395 len = *iov_len + iovp->iov_len;
403 * From userland disallow iovec's which exceed the sized size
404 * limit as the system calls return ssize_t.
406 * NOTE: Internal kernel interfaces can handle the unsigned
409 if (error == 0 && (ssize_t)*iov_len < 0)
413 iovec_free(kiov, siov);
419 * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
420 * Copyright (c) 1982, 1986, 1991, 1993
421 * The Regents of the University of California. All rights reserved.
422 * (c) UNIX System Laboratories, Inc.
423 * All or some portions of this file are derived from material licensed
424 * to the University of California by American Telephone and Telegraph
425 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
426 * the permission of UNIX System Laboratories, Inc.
428 * Redistribution and use in source and binary forms, with or without
429 * modification, are permitted provided that the following conditions
431 * 1. Redistributions of source code must retain the above copyright
432 * notice, this list of conditions and the following disclaimer.
433 * 2. Redistributions in binary form must reproduce the above copyright
434 * notice, this list of conditions and the following disclaimer in the
435 * documentation and/or other materials provided with the distribution.
436 * 4. Neither the name of the University nor the names of its contributors
437 * may be used to endorse or promote products derived from this software
438 * without specific prior written permission.
440 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
441 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
442 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
443 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
444 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
445 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
446 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
447 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
448 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
449 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
452 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
453 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
457 * Implement uiomove(9) from physical memory using lwbuf's to reduce
458 * the creation and destruction of ephemeral mappings.
461 uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio)
463 struct lwbuf lwb_cache;
465 struct thread *td = curthread;
468 vm_offset_t page_offset;
474 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
475 ("uiomove_fromphys: mode"));
476 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
477 ("uiomove_fromphys proc"));
480 save = td->td_flags & TDF_DEADLKTREAT;
481 td->td_flags |= TDF_DEADLKTREAT;
484 while (n > 0 && uio->uio_resid) {
494 page_offset = offset & PAGE_MASK;
495 cnt = min(cnt, PAGE_SIZE - page_offset);
496 m = ma[offset >> PAGE_SHIFT];
497 lwb = lwbuf_alloc(m, &lwb_cache);
498 cp = (char *)lwbuf_kva(lwb) + page_offset;
499 switch (uio->uio_segflg) {
502 * note: removed uioyield (it was the wrong place to
505 if (uio->uio_rw == UIO_READ)
506 error = copyout(cp, iov->iov_base, cnt);
508 error = copyin(iov->iov_base, cp, cnt);
515 if (uio->uio_rw == UIO_READ)
516 bcopy(cp, iov->iov_base, cnt);
518 bcopy(iov->iov_base, cp, cnt);
524 iov->iov_base = (char *)iov->iov_base + cnt;
526 uio->uio_resid -= cnt;
527 uio->uio_offset += cnt;
534 td->td_flags &= ~TDF_DEADLKTREAT;