kernel - Adjust UFS and HAMMER to use uiomovebp()
[dragonfly.git] / sys / kern / kern_subr.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
40 */
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/malloc.h>
47#include <sys/lock.h>
48#include <sys/resourcevar.h>
cbd0496d
JS
49#include <sys/sysctl.h>
50#include <sys/uio.h>
984263bc 51#include <sys/vnode.h>
a358fd45 52#include <sys/thread2.h>
25e80b06 53#include <machine/limits.h>
984263bc 54
5c5185ae
SG
55#include <cpu/lwbuf.h>
56
984263bc
MD
57#include <vm/vm.h>
58#include <vm/vm_page.h>
59#include <vm/vm_map.h>
60
cbd0496d
JS
61SYSCTL_INT(_kern, KERN_IOV_MAX, iov_max, CTLFLAG_RD, NULL, UIO_MAXIOV,
62 "Maximum number of elements in an I/O vector; sysconf(_SC_IOV_MAX)");
63
1301b4d8
MD
64/*
65 * UIO_READ: copy the kernelspace cp to the user or kernelspace UIO
8ddc6004 66 * UIO_WRITE: copy the user or kernelspace UIO to the kernelspace cp
1301b4d8
MD
67 *
68 * For userspace UIO's, uio_td must be the current thread.
e54488bb
MD
69 *
70 * The syscall interface is responsible for limiting the length to
71 * ssize_t for things like read() or write() which return the bytes
72 * read or written as ssize_t. These functions work with unsigned
73 * lengths.
1301b4d8 74 */
984263bc 75int
e54488bb 76uiomove(caddr_t cp, size_t n, struct uio *uio)
984263bc 77{
616516c8 78 thread_t td = curthread;
1fd87d54 79 struct iovec *iov;
e54488bb 80 size_t cnt;
984263bc
MD
81 int error = 0;
82 int save = 0;
83
84 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
85 ("uiomove: mode"));
616516c8 86 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
984263bc
MD
87 ("uiomove proc"));
88
616516c8
MD
89 crit_enter();
90 save = td->td_flags & TDF_DEADLKTREAT;
91 td->td_flags |= TDF_DEADLKTREAT;
92 crit_exit();
984263bc
MD
93
94 while (n > 0 && uio->uio_resid) {
95 iov = uio->uio_iov;
96 cnt = iov->iov_len;
97 if (cnt == 0) {
98 uio->uio_iov++;
99 uio->uio_iovcnt--;
100 continue;
101 }
102 if (cnt > n)
103 cnt = n;
104
105 switch (uio->uio_segflg) {
106
107 case UIO_USERSPACE:
f9235b6d 108 lwkt_user_yield();
984263bc
MD
109 if (uio->uio_rw == UIO_READ)
110 error = copyout(cp, iov->iov_base, cnt);
111 else
112 error = copyin(iov->iov_base, cp, cnt);
113 if (error)
114 break;
115 break;
116
117 case UIO_SYSSPACE:
118 if (uio->uio_rw == UIO_READ)
51295aee 119 bcopy(cp, iov->iov_base, cnt);
984263bc 120 else
51295aee 121 bcopy(iov->iov_base, cp, cnt);
984263bc
MD
122 break;
123 case UIO_NOCOPY:
124 break;
125 }
656849c6 126 iov->iov_base = (char *)iov->iov_base + cnt;
984263bc
MD
127 iov->iov_len -= cnt;
128 uio->uio_resid -= cnt;
129 uio->uio_offset += cnt;
130 cp += cnt;
131 n -= cnt;
132 }
616516c8
MD
133 crit_enter();
134 td->td_flags = (td->td_flags & ~TDF_DEADLKTREAT) | save;
135 crit_exit();
984263bc
MD
136 return (error);
137}
c7841cbe
MD
138
139/*
44480e31
MD
140 * This is the same as uiomove() except (cp, n) is within the bounds of
141 * the passed, locked buffer. Under certain circumstances a VM fault
142 * occuring with a locked buffer held can result in a deadlock or an
143 * attempt to recursively lock the buffer.
144 *
145 * This procedure deals with these cases.
146 *
147 * If the buffer represents a regular file, is B_CACHE, but the last VM page
148 * is not fully valid we fix-up the last VM page. This should handle the
149 * recursive lock issue.
150 *
151 * Deadlocks are another issue. We are holding the vp and the bp locked
152 * and could deadlock against a different vp and/or bp if another thread is
153 * trying to access us while we accessing it. The only solution here is
154 * to release the bp and vnode lock and do the uio to/from a system buffer,
155 * then regain the locks and copyback (if applicable). XXX TODO.
156 */
157int
158uiomovebp(struct buf *bp, caddr_t cp, size_t n, struct uio *uio)
159{
160 int count;
161 vm_page_t m;
162
163 if (bp->b_vp && bp->b_vp->v_type == VREG &&
164 (bp->b_flags & B_CACHE) &&
165 (count = bp->b_xio.xio_npages) != 0 &&
166 (m = bp->b_xio.xio_pages[count-1])->valid != VM_PAGE_BITS_ALL) {
167 vm_page_zero_invalid(m, TRUE);
168 }
169 return (uiomove(cp, n, uio));
170}
171
172/*
c7841cbe
MD
173 * Like uiomove() but copies zero-fill. Only allowed for UIO_READ,
174 * for obvious reasons.
175 */
176int
177uiomovez(size_t n, struct uio *uio)
178{
179 struct iovec *iov;
180 size_t cnt;
181 int error = 0;
182
183 KASSERT(uio->uio_rw == UIO_READ, ("uiomovez: mode"));
184 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
185 ("uiomove proc"));
186
187 while (n > 0 && uio->uio_resid) {
188 iov = uio->uio_iov;
189 cnt = iov->iov_len;
190 if (cnt == 0) {
191 uio->uio_iov++;
192 uio->uio_iovcnt--;
193 continue;
194 }
195 if (cnt > n)
196 cnt = n;
197
198 switch (uio->uio_segflg) {
199 case UIO_USERSPACE:
200 error = copyout(ZeroPage, iov->iov_base, cnt);
201 if (error)
202 break;
203 break;
204 case UIO_SYSSPACE:
205 bzero(iov->iov_base, cnt);
206 break;
207 case UIO_NOCOPY:
208 break;
209 }
210 iov->iov_base = (char *)iov->iov_base + cnt;
211 iov->iov_len -= cnt;
212 uio->uio_resid -= cnt;
213 uio->uio_offset += cnt;
214 n -= cnt;
215 }
216 return (error);
217}
218
25e80b06
DR
219/*
220 * Wrapper for uiomove() that validates the arguments against a known-good
47538602
MD
221 * kernel buffer. This function automatically indexes the buffer by
222 * uio_offset and handles all range checking.
25e80b06
DR
223 */
224int
e54488bb 225uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
25e80b06 226{
e54488bb 227 size_t offset;
25e80b06 228
e54488bb
MD
229 offset = (size_t)uio->uio_offset;
230 if ((off_t)offset != uio->uio_offset)
25e80b06 231 return (EINVAL);
e54488bb 232 if (buflen == 0 || offset >= buflen)
25e80b06 233 return (0);
e54488bb 234 return (uiomove((char *)buf + offset, buflen - offset, uio));
984263bc
MD
235}
236
984263bc
MD
237/*
238 * Give next character to user as result of read.
239 */
240int
c972a82f 241ureadc(int c, struct uio *uio)
984263bc 242{
1fd87d54 243 struct iovec *iov;
656849c6 244 char *iov_base;
984263bc
MD
245
246again:
247 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
248 panic("ureadc");
249 iov = uio->uio_iov;
250 if (iov->iov_len == 0) {
251 uio->uio_iovcnt--;
252 uio->uio_iov++;
253 goto again;
254 }
255 switch (uio->uio_segflg) {
256
257 case UIO_USERSPACE:
258 if (subyte(iov->iov_base, c) < 0)
259 return (EFAULT);
260 break;
261
262 case UIO_SYSSPACE:
656849c6
SW
263 iov_base = iov->iov_base;
264 *iov_base = c;
265 iov->iov_base = iov_base;
984263bc
MD
266 break;
267
984263bc
MD
268 case UIO_NOCOPY:
269 break;
270 }
656849c6 271 iov->iov_base = (char *)iov->iov_base + 1;
984263bc
MD
272 iov->iov_len--;
273 uio->uio_resid--;
274 uio->uio_offset++;
275 return (0);
276}
277
984263bc 278/*
1c86b25b
MD
279 * General routine to allocate a hash table. Make the hash table size a
280 * power of 2 greater or equal to the number of elements requested, and
281 * store the masking value in *hashmask.
984263bc
MD
282 */
283void *
c972a82f 284hashinit(int elements, struct malloc_type *type, u_long *hashmask)
984263bc
MD
285{
286 long hashsize;
287 LIST_HEAD(generic, generic) *hashtbl;
288 int i;
289
290 if (elements <= 0)
291 panic("hashinit: bad elements");
8f17cbc6 292 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
984263bc 293 continue;
efda3bd0 294 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
984263bc
MD
295 for (i = 0; i < hashsize; i++)
296 LIST_INIT(&hashtbl[i]);
297 *hashmask = hashsize - 1;
298 return (hashtbl);
299}
300
add23450
SW
301void
302hashdestroy(void *vhashtbl, struct malloc_type *type, u_long hashmask)
303{
304 LIST_HEAD(generic, generic) *hashtbl, *hp;
305
306 hashtbl = vhashtbl;
307 for (hp = hashtbl; hp <= &hashtbl[hashmask]; hp++)
308 KASSERT(LIST_EMPTY(hp), ("%s: hash not empty", __func__));
309 kfree(hashtbl, type);
310}
311
c02197ac
MD
312/*
313 * This is a newer version which allocates a hash table of structures.
314 *
315 * The returned array will be zero'd. The caller is responsible for
316 * initializing the structures.
317 */
318void *
319hashinit_ext(int elements, size_t size, struct malloc_type *type,
320 u_long *hashmask)
321{
322 long hashsize;
323 void *hashtbl;
324
325 if (elements <= 0)
326 panic("hashinit: bad elements");
327 for (hashsize = 2; hashsize < elements; hashsize <<= 1)
328 continue;
329 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
330 *hashmask = hashsize - 1;
331 return (hashtbl);
332}
333
984263bc
MD
334static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
335 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
336 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
c157ff7a 337#define NPRIMES NELEM(primes)
984263bc
MD
338
339/*
340 * General routine to allocate a prime number sized hash table.
341 */
342void *
c972a82f 343phashinit(int elements, struct malloc_type *type, u_long *nentries)
984263bc
MD
344{
345 long hashsize;
346 LIST_HEAD(generic, generic) *hashtbl;
347 int i;
348
349 if (elements <= 0)
350 panic("phashinit: bad elements");
351 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
352 i++;
353 if (i == NPRIMES)
354 break;
355 hashsize = primes[i];
356 }
357 hashsize = primes[i - 1];
efda3bd0 358 hashtbl = kmalloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
984263bc
MD
359 for (i = 0; i < hashsize; i++)
360 LIST_INIT(&hashtbl[i]);
361 *nentries = hashsize;
362 return (hashtbl);
363}
75a872f8
DRJ
364
365/*
c02197ac
MD
366 * This is a newer version which allocates a hash table of structures
367 * in a prime-number size.
368 *
369 * The returned array will be zero'd. The caller is responsible for
370 * initializing the structures.
371 */
372void *
373phashinit_ext(int elements, size_t size, struct malloc_type *type,
374 u_long *nentries)
375{
376 long hashsize;
377 void *hashtbl;
378 int i;
379
380 if (elements <= 0)
381 panic("phashinit: bad elements");
382 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
383 i++;
384 if (i == NPRIMES)
385 break;
386 hashsize = primes[i];
387 }
388 hashsize = primes[i - 1];
389 hashtbl = kmalloc((size_t)hashsize * size, type, M_WAITOK | M_ZERO);
390 *nentries = hashsize;
391 return (hashtbl);
392}
393
394/*
75a872f8
DRJ
395 * Copyin an iovec. If the iovec array fits, use the preallocated small
396 * iovec structure. If it is too big, dynamically allocate an iovec array
397 * of sufficient size.
f832287e
MD
398 *
399 * MPSAFE
75a872f8
DRJ
400 */
401int
402iovec_copyin(struct iovec *uiov, struct iovec **kiov, struct iovec *siov,
e54488bb 403 size_t iov_cnt, size_t *iov_len)
75a872f8
DRJ
404{
405 struct iovec *iovp;
406 int error, i;
e54488bb 407 size_t len;
75a872f8 408
fe57a30b 409 if (iov_cnt > UIO_MAXIOV)
75a872f8 410 return EMSGSIZE;
fe57a30b 411 if (iov_cnt > UIO_SMALLIOV) {
884717e1
SW
412 *kiov = kmalloc(sizeof(struct iovec) * iov_cnt, M_IOV,
413 M_WAITOK);
75a872f8
DRJ
414 } else {
415 *kiov = siov;
416 }
417 error = copyin(uiov, *kiov, iov_cnt * sizeof(struct iovec));
ef5c76d7
MD
418 if (error == 0) {
419 *iov_len = 0;
420 for (i = 0, iovp = *kiov; i < iov_cnt; i++, iovp++) {
421 /*
422 * Check for both *iov_len overflows and out of
423 * range iovp->iov_len's. We limit to the
424 * capabilities of signed integers.
e54488bb
MD
425 *
426 * GCC4 - overflow check opt requires assign/test.
ef5c76d7 427 */
e54488bb
MD
428 len = *iov_len + iovp->iov_len;
429 if (len < *iov_len)
ef5c76d7 430 error = EINVAL;
e54488bb 431 *iov_len = len;
ef5c76d7
MD
432 }
433 }
e54488bb
MD
434
435 /*
436 * From userland disallow iovec's which exceed the sized size
437 * limit as the system calls return ssize_t.
438 *
439 * NOTE: Internal kernel interfaces can handle the unsigned
440 * limit.
441 */
442 if (error == 0 && (ssize_t)*iov_len < 0)
443 error = EINVAL;
444
75a872f8
DRJ
445 if (error)
446 iovec_free(kiov, siov);
447 return (error);
448}
a358fd45
MD
449
450
451/*
452 * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
453 * Copyright (c) 1982, 1986, 1991, 1993
454 * The Regents of the University of California. All rights reserved.
455 * (c) UNIX System Laboratories, Inc.
456 * All or some portions of this file are derived from material licensed
457 * to the University of California by American Telephone and Telegraph
458 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
459 * the permission of UNIX System Laboratories, Inc.
460 *
461 * Redistribution and use in source and binary forms, with or without
462 * modification, are permitted provided that the following conditions
463 * are met:
464 * 1. Redistributions of source code must retain the above copyright
465 * notice, this list of conditions and the following disclaimer.
466 * 2. Redistributions in binary form must reproduce the above copyright
467 * notice, this list of conditions and the following disclaimer in the
468 * documentation and/or other materials provided with the distribution.
469 * 4. Neither the name of the University nor the names of its contributors
470 * may be used to endorse or promote products derived from this software
471 * without specific prior written permission.
472 *
473 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
474 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
475 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
476 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
477 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
478 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
479 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
480 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
481 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
482 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
483 * SUCH DAMAGE.
484 *
485 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
486 * $FreeBSD: src/sys/i386/i386/uio_machdep.c,v 1.1 2004/03/21 20:28:36 alc Exp $
487 */
488
489/*
5c5185ae 490 * Implement uiomove(9) from physical memory using lwbuf's to reduce
a358fd45
MD
491 * the creation and destruction of ephemeral mappings.
492 */
493int
e54488bb 494uiomove_fromphys(vm_page_t *ma, vm_offset_t offset, size_t n, struct uio *uio)
a358fd45 495{
7a683a24 496 struct lwbuf lwb_cache;
5c5185ae 497 struct lwbuf *lwb;
a358fd45
MD
498 struct thread *td = curthread;
499 struct iovec *iov;
500 void *cp;
501 vm_offset_t page_offset;
502 vm_page_t m;
503 size_t cnt;
504 int error = 0;
505 int save = 0;
506
507 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
508 ("uiomove_fromphys: mode"));
509 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
510 ("uiomove_fromphys proc"));
511
512 crit_enter();
513 save = td->td_flags & TDF_DEADLKTREAT;
514 td->td_flags |= TDF_DEADLKTREAT;
515 crit_exit();
516
517 while (n > 0 && uio->uio_resid) {
518 iov = uio->uio_iov;
519 cnt = iov->iov_len;
520 if (cnt == 0) {
521 uio->uio_iov++;
522 uio->uio_iovcnt--;
523 continue;
524 }
525 if (cnt > n)
526 cnt = n;
527 page_offset = offset & PAGE_MASK;
528 cnt = min(cnt, PAGE_SIZE - page_offset);
529 m = ma[offset >> PAGE_SHIFT];
7a683a24 530 lwb = lwbuf_alloc(m, &lwb_cache);
5c5185ae 531 cp = (char *)lwbuf_kva(lwb) + page_offset;
a358fd45
MD
532 switch (uio->uio_segflg) {
533 case UIO_USERSPACE:
534 /*
535 * note: removed uioyield (it was the wrong place to
536 * put it).
537 */
538 if (uio->uio_rw == UIO_READ)
539 error = copyout(cp, iov->iov_base, cnt);
540 else
541 error = copyin(iov->iov_base, cp, cnt);
542 if (error) {
5c5185ae 543 lwbuf_free(lwb);
a358fd45
MD
544 goto out;
545 }
546 break;
547 case UIO_SYSSPACE:
548 if (uio->uio_rw == UIO_READ)
549 bcopy(cp, iov->iov_base, cnt);
550 else
551 bcopy(iov->iov_base, cp, cnt);
552 break;
553 case UIO_NOCOPY:
554 break;
555 }
5c5185ae 556 lwbuf_free(lwb);
a358fd45
MD
557 iov->iov_base = (char *)iov->iov_base + cnt;
558 iov->iov_len -= cnt;
559 uio->uio_resid -= cnt;
560 uio->uio_offset += cnt;
561 offset += cnt;
562 n -= cnt;
563 }
564out:
565 if (save == 0) {
566 crit_enter();
567 td->td_flags &= ~TDF_DEADLKTREAT;
568 crit_exit();
569 }
570 return (error);
571}
572