proc->thread stage 4: rework the VFS and DEVICE subsystems to take thread
[dragonfly.git] / sys / kern / kern_subr.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/kern_subr.c,v 1.31.2.2 2002/04/21 08:09:37 bde Exp $
dadab5e9 40 * $DragonFly: src/sys/kern/kern_subr.c,v 1.5 2003/06/25 03:55:57 dillon Exp $
984263bc
MD
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/kernel.h>
46#include <sys/proc.h>
47#include <sys/malloc.h>
48#include <sys/lock.h>
49#include <sys/resourcevar.h>
50#include <sys/vnode.h>
51
52#include <vm/vm.h>
53#include <vm/vm_page.h>
54#include <vm/vm_map.h>
55
56int
57uiomove(cp, n, uio)
58 register caddr_t cp;
59 register int n;
60 register struct uio *uio;
61{
62 register struct iovec *iov;
63 u_int cnt;
64 int error = 0;
65 int save = 0;
d16a8831 66 int baseticks = ticks;
984263bc
MD
67
68 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
69 ("uiomove: mode"));
dadab5e9 70 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
984263bc
MD
71 ("uiomove proc"));
72
73 if (curproc) {
74 save = curproc->p_flag & P_DEADLKTREAT;
75 curproc->p_flag |= P_DEADLKTREAT;
76 }
77
78 while (n > 0 && uio->uio_resid) {
79 iov = uio->uio_iov;
80 cnt = iov->iov_len;
81 if (cnt == 0) {
82 uio->uio_iov++;
83 uio->uio_iovcnt--;
84 continue;
85 }
86 if (cnt > n)
87 cnt = n;
88
89 switch (uio->uio_segflg) {
90
91 case UIO_USERSPACE:
92 case UIO_USERISPACE:
d16a8831 93 if (ticks - baseticks >= hogticks) {
984263bc 94 uio_yield();
d16a8831
MD
95 baseticks = ticks;
96 }
984263bc
MD
97 if (uio->uio_rw == UIO_READ)
98 error = copyout(cp, iov->iov_base, cnt);
99 else
100 error = copyin(iov->iov_base, cp, cnt);
101 if (error)
102 break;
103 break;
104
105 case UIO_SYSSPACE:
106 if (uio->uio_rw == UIO_READ)
107 bcopy((caddr_t)cp, iov->iov_base, cnt);
108 else
109 bcopy(iov->iov_base, (caddr_t)cp, cnt);
110 break;
111 case UIO_NOCOPY:
112 break;
113 }
114 iov->iov_base += cnt;
115 iov->iov_len -= cnt;
116 uio->uio_resid -= cnt;
117 uio->uio_offset += cnt;
118 cp += cnt;
119 n -= cnt;
120 }
121 if (curproc)
122 curproc->p_flag = (curproc->p_flag & ~P_DEADLKTREAT) | save;
123 return (error);
124}
125
126int
127uiomoveco(cp, n, uio, obj)
128 caddr_t cp;
129 int n;
130 struct uio *uio;
131 struct vm_object *obj;
132{
133 struct iovec *iov;
134 u_int cnt;
135 int error;
d16a8831 136 int baseticks = ticks;
984263bc
MD
137
138 KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
139 ("uiomoveco: mode"));
dadab5e9 140 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
984263bc
MD
141 ("uiomoveco proc"));
142
143 while (n > 0 && uio->uio_resid) {
144 iov = uio->uio_iov;
145 cnt = iov->iov_len;
146 if (cnt == 0) {
147 uio->uio_iov++;
148 uio->uio_iovcnt--;
149 continue;
150 }
151 if (cnt > n)
152 cnt = n;
153
154 switch (uio->uio_segflg) {
155
156 case UIO_USERSPACE:
157 case UIO_USERISPACE:
d16a8831 158 if (ticks - baseticks >= hogticks) {
984263bc 159 uio_yield();
d16a8831
MD
160 baseticks = ticks;
161 }
984263bc
MD
162 if (uio->uio_rw == UIO_READ) {
163#ifdef ENABLE_VFS_IOOPT
164 if (vfs_ioopt && ((cnt & PAGE_MASK) == 0) &&
165 ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
166 ((uio->uio_offset & PAGE_MASK) == 0) &&
167 ((((intptr_t) cp) & PAGE_MASK) == 0)) {
168 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
169 uio->uio_offset, cnt,
170 (vm_offset_t) iov->iov_base, NULL);
171 } else
172#endif
173 {
174 error = copyout(cp, iov->iov_base, cnt);
175 }
176 } else {
177 error = copyin(iov->iov_base, cp, cnt);
178 }
179 if (error)
180 return (error);
181 break;
182
183 case UIO_SYSSPACE:
184 if (uio->uio_rw == UIO_READ)
185 bcopy((caddr_t)cp, iov->iov_base, cnt);
186 else
187 bcopy(iov->iov_base, (caddr_t)cp, cnt);
188 break;
189 case UIO_NOCOPY:
190 break;
191 }
192 iov->iov_base += cnt;
193 iov->iov_len -= cnt;
194 uio->uio_resid -= cnt;
195 uio->uio_offset += cnt;
196 cp += cnt;
197 n -= cnt;
198 }
199 return (0);
200}
201
202#ifdef ENABLE_VFS_IOOPT
203
204int
205uioread(n, uio, obj, nread)
206 int n;
207 struct uio *uio;
208 struct vm_object *obj;
209 int *nread;
210{
211 int npagesmoved;
212 struct iovec *iov;
213 u_int cnt, tcnt;
214 int error;
215
216 *nread = 0;
217 if (vfs_ioopt < 2)
218 return 0;
219
220 error = 0;
221
222 while (n > 0 && uio->uio_resid) {
223 iov = uio->uio_iov;
224 cnt = iov->iov_len;
225 if (cnt == 0) {
226 uio->uio_iov++;
227 uio->uio_iovcnt--;
228 continue;
229 }
230 if (cnt > n)
231 cnt = n;
232
233 if ((uio->uio_segflg == UIO_USERSPACE) &&
234 ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
235 ((uio->uio_offset & PAGE_MASK) == 0) ) {
236
237 if (cnt < PAGE_SIZE)
238 break;
239
240 cnt &= ~PAGE_MASK;
241
d16a8831 242 if (ticks - mycpu->gd_switchticks >= hogticks)
984263bc
MD
243 uio_yield();
244 error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
245 uio->uio_offset, cnt,
246 (vm_offset_t) iov->iov_base, &npagesmoved);
247
248 if (npagesmoved == 0)
249 break;
250
251 tcnt = npagesmoved * PAGE_SIZE;
252 cnt = tcnt;
253
254 if (error)
255 break;
256
257 iov->iov_base += cnt;
258 iov->iov_len -= cnt;
259 uio->uio_resid -= cnt;
260 uio->uio_offset += cnt;
261 *nread += cnt;
262 n -= cnt;
263 } else {
264 break;
265 }
266 }
267 return error;
268}
269
270#endif
271
272/*
273 * Give next character to user as result of read.
274 */
275int
276ureadc(c, uio)
277 register int c;
278 register struct uio *uio;
279{
280 register struct iovec *iov;
281
282again:
283 if (uio->uio_iovcnt == 0 || uio->uio_resid == 0)
284 panic("ureadc");
285 iov = uio->uio_iov;
286 if (iov->iov_len == 0) {
287 uio->uio_iovcnt--;
288 uio->uio_iov++;
289 goto again;
290 }
291 switch (uio->uio_segflg) {
292
293 case UIO_USERSPACE:
294 if (subyte(iov->iov_base, c) < 0)
295 return (EFAULT);
296 break;
297
298 case UIO_SYSSPACE:
299 *iov->iov_base = c;
300 break;
301
302 case UIO_USERISPACE:
303 if (suibyte(iov->iov_base, c) < 0)
304 return (EFAULT);
305 break;
306 case UIO_NOCOPY:
307 break;
308 }
309 iov->iov_base++;
310 iov->iov_len--;
311 uio->uio_resid--;
312 uio->uio_offset++;
313 return (0);
314}
315
316#ifdef vax /* unused except by ct.c, other oddities XXX */
317/*
318 * Get next character written in by user from uio.
319 */
320int
321uwritec(uio)
322 struct uio *uio;
323{
324 register struct iovec *iov;
325 register int c;
326
327 if (uio->uio_resid <= 0)
328 return (-1);
329again:
330 if (uio->uio_iovcnt <= 0)
331 panic("uwritec");
332 iov = uio->uio_iov;
333 if (iov->iov_len == 0) {
334 uio->uio_iov++;
335 if (--uio->uio_iovcnt == 0)
336 return (-1);
337 goto again;
338 }
339 switch (uio->uio_segflg) {
340
341 case UIO_USERSPACE:
342 c = fubyte(iov->iov_base);
343 break;
344
345 case UIO_SYSSPACE:
346 c = *(u_char *) iov->iov_base;
347 break;
348
349 case UIO_USERISPACE:
350 c = fuibyte(iov->iov_base);
351 break;
352 }
353 if (c < 0)
354 return (-1);
355 iov->iov_base++;
356 iov->iov_len--;
357 uio->uio_resid--;
358 uio->uio_offset++;
359 return (c);
360}
361#endif /* vax */
362
363/*
364 * General routine to allocate a hash table.
365 */
366void *
367hashinit(elements, type, hashmask)
368 int elements;
369 struct malloc_type *type;
370 u_long *hashmask;
371{
372 long hashsize;
373 LIST_HEAD(generic, generic) *hashtbl;
374 int i;
375
376 if (elements <= 0)
377 panic("hashinit: bad elements");
378 for (hashsize = 1; hashsize <= elements; hashsize <<= 1)
379 continue;
380 hashsize >>= 1;
381 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
382 for (i = 0; i < hashsize; i++)
383 LIST_INIT(&hashtbl[i]);
384 *hashmask = hashsize - 1;
385 return (hashtbl);
386}
387
388static int primes[] = { 1, 13, 31, 61, 127, 251, 509, 761, 1021, 1531, 2039,
389 2557, 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653,
390 7159, 7673, 8191, 12281, 16381, 24571, 32749 };
391#define NPRIMES (sizeof(primes) / sizeof(primes[0]))
392
393/*
394 * General routine to allocate a prime number sized hash table.
395 */
396void *
397phashinit(elements, type, nentries)
398 int elements;
399 struct malloc_type *type;
400 u_long *nentries;
401{
402 long hashsize;
403 LIST_HEAD(generic, generic) *hashtbl;
404 int i;
405
406 if (elements <= 0)
407 panic("phashinit: bad elements");
408 for (i = 1, hashsize = primes[1]; hashsize <= elements;) {
409 i++;
410 if (i == NPRIMES)
411 break;
412 hashsize = primes[i];
413 }
414 hashsize = primes[i - 1];
415 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK);
416 for (i = 0; i < hashsize; i++)
417 LIST_INIT(&hashtbl[i]);
418 *nentries = hashsize;
419 return (hashtbl);
420}
421
422void
423uio_yield()
424{
425 struct proc *p;
426 int s;
427
428 p = curproc;
429 s = splhigh();
430 p->p_priority = p->p_usrpri;
431 setrunqueue(p);
432 p->p_stats->p_ru.ru_nivcsw++;
433 mi_switch();
434 splx(s);
435}