kernel - Fix vm.max_proc_mmap
[dragonfly.git] / sys / vm / vm_pager.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1991, 1993
5  *      The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * The Mach Operating System project at Carnegie-Mellon University.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      from: @(#)vm_pager.c    8.6 (Berkeley) 1/12/94
35  *
36  *
37  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38  * All rights reserved.
39  *
40  * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41  *
42  * Permission to use, copy, modify and distribute this software and
43  * its documentation is hereby granted, provided that both the copyright
44  * notice and this permission notice appear in all copies of the
45  * software, derivative works or modified versions, and any portions
46  * thereof, and that both notices appear in supporting documentation.
47  *
48  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51  *
52  * Carnegie Mellon requests users of this software to return to
53  *
54  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55  *  School of Computer Science
56  *  Carnegie Mellon University
57  *  Pittsburgh PA 15213-3890
58  *
59  * any improvements or extensions that they make and grant Carnegie the
60  * rights to redistribute these changes.
61  *
62  * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
63  */
64
65 /*
66  *      Paging space routine stubs.  Emulates a matchmaker-like interface
67  *      for builtin pagers.
68  */
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/vnode.h>
74 #include <sys/buf.h>
75 #include <sys/ucred.h>
76 #include <sys/dsched.h>
77 #include <sys/proc.h>
78 #include <sys/sysctl.h>
79 #include <sys/thread2.h>
80
81 #include <vm/vm.h>
82 #include <vm/vm_param.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
88
89 #include <sys/buf2.h>
90 #include <vm/vm_page2.h>
91
92 extern struct pagerops defaultpagerops;
93 extern struct pagerops swappagerops;
94 extern struct pagerops vnodepagerops;
95 extern struct pagerops devicepagerops;
96 extern struct pagerops physpagerops;
97
98 static int dead_pager_getpage (vm_object_t, vm_page_t *, int);
99 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *);
100 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t);
101 static void dead_pager_dealloc (vm_object_t);
102
103 /*
104  * No requirements.
105  */
106 static int
107 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess)
108 {
109         return VM_PAGER_FAIL;
110 }
111
112 /*
113  * No requirements.
114  */
115 static void
116 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags,
117                     int *rtvals)
118 {
119         int i;
120
121         for (i = 0; i < count; i++) {
122                 rtvals[i] = VM_PAGER_AGAIN;
123         }
124 }
125
126 /*
127  * No requirements.
128  */
129 static boolean_t
130 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex)
131 {
132         return FALSE;
133 }
134
135 /*
136  * No requirements.
137  */
138 static void
139 dead_pager_dealloc(vm_object_t object)
140 {
141         KKASSERT(object->swblock_count == 0);
142         return;
143 }
144
145 static struct pagerops deadpagerops = {
146         dead_pager_dealloc,
147         dead_pager_getpage,
148         dead_pager_putpages,
149         dead_pager_haspage
150 };
151
152 struct pagerops *pagertab[] = {
153         &defaultpagerops,       /* OBJT_DEFAULT */
154         &swappagerops,          /* OBJT_SWAP */
155         &vnodepagerops,         /* OBJT_VNODE */
156         &devicepagerops,        /* OBJT_DEVICE */
157         &devicepagerops,        /* OBJT_MGTDEVICE */
158         &physpagerops,          /* OBJT_PHYS */
159         &deadpagerops           /* OBJT_DEAD */
160 };
161
162 int npagers = NELEM(pagertab);
163
164 /*
165  * Kernel address space for mapping pages.
166  * Used by pagers where KVAs are needed for IO.
167  *
168  * XXX needs to be large enough to support the number of pending async
169  * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
170  * (MAXPHYS == 64k) if you want to get the most efficiency.
171  */
172 #define PAGER_MAP_SIZE  (8 * 1024 * 1024)
173
174 #define BSWHSIZE        16
175 #define BSWHMASK        (BSWHSIZE - 1)
176
177 TAILQ_HEAD(swqueue, buf);
178
179 int pager_map_size = PAGER_MAP_SIZE;
180 struct vm_map pager_map;
181
182 static vm_offset_t swapbkva_mem;        /* swap buffers kva */
183 static vm_offset_t swapbkva_kva;        /* swap buffers kva */
184 static struct swqueue bswlist_mem[BSWHSIZE];    /* with preallocated memory */
185 static struct swqueue bswlist_kva[BSWHSIZE];    /* with kva */
186 static struct swqueue bswlist_raw[BSWHSIZE];    /* without kva */
187 static struct spinlock bswspin_mem[BSWHSIZE];
188 static struct spinlock bswspin_kva[BSWHSIZE];
189 static struct spinlock bswspin_raw[BSWHSIZE];
190 static int pbuf_raw_count;
191 static int pbuf_kva_count;
192 static int pbuf_mem_count;
193
194 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0,
195     "Kernel pbuf raw reservations");
196 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0,
197     "Kernel pbuf kva reservations");
198 SYSCTL_INT(_vfs, OID_AUTO, pbuf_mem_count, CTLFLAG_RD, &pbuf_mem_count, 0,
199     "Kernel pbuf mem reservations");
200
201 /*
202  * Initialize the swap buffer list.
203  *
204  * Called from the low level boot code only.
205  */
206 static void
207 vm_pager_init(void *arg __unused)
208 {
209         int i;
210
211         for (i = 0; i < BSWHSIZE; ++i) {
212                 TAILQ_INIT(&bswlist_mem[i]);
213                 TAILQ_INIT(&bswlist_kva[i]);
214                 TAILQ_INIT(&bswlist_raw[i]);
215                 spin_init(&bswspin_mem[i], "bswmem");
216                 spin_init(&bswspin_kva[i], "bswkva");
217                 spin_init(&bswspin_raw[i], "bswraw");
218         }
219 }
220 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL);
221
222 /*
223  * Called from the low level boot code only.
224  */
225 static
226 void
227 vm_pager_bufferinit(void *dummy __unused)
228 {
229         struct buf *bp;
230         long i;
231
232         /*
233          * Reserve KVM space for pbuf data.
234          */
235         swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS,
236                                            VM_SUBSYS_BUFDATA);
237         if (!swapbkva_mem)
238                 panic("Not enough pager_map VM space for physical buffers");
239         swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS,
240                                            VM_SUBSYS_BUFDATA);
241         if (!swapbkva_kva)
242                 panic("Not enough pager_map VM space for physical buffers");
243
244         /*
245          * Initial pbuf setup.
246          *
247          * mem - These pbufs have permanently allocated memory
248          * kva - These pbufs have unallocated kva reservations
249          * raw - These pbufs have no kva reservations
250          */
251
252         /*
253          * Buffers with pre-allocated kernel memory can be convenient for
254          * copyin/copyout because no SMP page invalidation or other pmap
255          * operations are needed.
256          */
257         bp = swbuf_mem;
258         for (i = 0; i < nswbuf_mem; ++i, ++bp) {
259                 vm_page_t m;
260                 vm_pindex_t pg;
261                 int j;
262
263                 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_mem;
264                 bp->b_kvasize = MAXPHYS;
265                 bp->b_swindex = i & BSWHMASK;
266                 bp->b_cpumask = smp_active_mask;
267                 BUF_LOCKINIT(bp);
268                 buf_dep_init(bp);
269                 TAILQ_INSERT_HEAD(&bswlist_mem[i & BSWHMASK], bp, b_freelist);
270                 atomic_add_int(&pbuf_mem_count, 1);
271                 bp->b_data = bp->b_kvabase;
272                 bp->b_bcount = MAXPHYS;
273                 bp->b_xio.xio_pages = bp->b_xio.xio_internal_pages;
274
275                 pg = (vm_offset_t)bp->b_kvabase >> PAGE_SHIFT;
276                 vm_object_hold(&kernel_object);
277                 for (j = 0; j < MAXPHYS / PAGE_SIZE; ++j) {
278                         m = vm_page_alloc(&kernel_object, pg, VM_ALLOC_NORMAL |
279                                                               VM_ALLOC_SYSTEM);
280                         KKASSERT(m != NULL);
281                         bp->b_xio.xio_internal_pages[j] = m;
282                         vm_page_wire(m);
283                         /* early boot, no other cpus running yet */
284                         pmap_kenter_noinval(pg * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
285                         cpu_invlpg((void *)(pg * PAGE_SIZE));
286                         vm_page_wakeup(m);
287                         ++pg;
288                 }
289                 vm_object_drop(&kernel_object);
290                 bp->b_xio.xio_npages = j;
291         }
292
293         /*
294          * Buffers with pre-assigned KVA bases.  The KVA has no memory pages
295          * assigned to it.  Saves the caller from having to reserve KVA for
296          * the page map.
297          */
298         bp = swbuf_kva;
299         for (i = 0; i < nswbuf_kva; ++i, ++bp) {
300                 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva_kva;
301                 bp->b_kvasize = MAXPHYS;
302                 bp->b_swindex = i & BSWHMASK;
303                 BUF_LOCKINIT(bp);
304                 buf_dep_init(bp);
305                 TAILQ_INSERT_HEAD(&bswlist_kva[i & BSWHMASK], bp, b_freelist);
306                 atomic_add_int(&pbuf_kva_count, 1);
307         }
308
309         /*
310          * RAW buffers with no KVA mappings.
311          *
312          * NOTE: We use KM_NOTLBSYNC here to reduce unnecessary IPIs
313          *       during startup, which can really slow down emulated
314          *       systems.
315          */
316         nswbuf_raw = nbuf * 2;
317         swbuf_raw = (void *)kmem_alloc3(&kernel_map,
318                                 round_page(nswbuf_raw * sizeof(struct buf)),
319                                 VM_SUBSYS_BUFDATA,
320                                 KM_NOTLBSYNC);
321         smp_invltlb();
322         bp = swbuf_raw;
323         for (i = 0; i < nswbuf_raw; ++i, ++bp) {
324                 bp->b_swindex = i & BSWHMASK;
325                 BUF_LOCKINIT(bp);
326                 buf_dep_init(bp);
327                 TAILQ_INSERT_HEAD(&bswlist_raw[i & BSWHMASK], bp, b_freelist);
328                 atomic_add_int(&pbuf_raw_count, 1);
329         }
330 }
331
332 SYSINIT(do_vmpg, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, vm_pager_bufferinit, NULL);
333
334 /*
335  * No requirements.
336  */
337 void
338 vm_pager_deallocate(vm_object_t object)
339 {
340         (*pagertab[object->type]->pgo_dealloc) (object);
341 }
342
343 /*
344  * vm_pager_get_pages() - inline, see vm/vm_pager.h
345  * vm_pager_put_pages() - inline, see vm/vm_pager.h
346  * vm_pager_has_page() - inline, see vm/vm_pager.h
347  * vm_pager_page_inserted() - inline, see vm/vm_pager.h
348  * vm_pager_page_removed() - inline, see vm/vm_pager.h
349  */
350
351 /*
352  * Search the specified pager object list for an object with the
353  * specified handle.  If an object with the specified handle is found,
354  * increase its reference count and return it.  Otherwise, return NULL.
355  *
356  * The pager object list must be locked.
357  */
358 vm_object_t
359 vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
360 {
361         vm_object_t object;
362
363         TAILQ_FOREACH(object, pg_list, pager_object_list) {
364                 if (object->handle == handle) {
365                         VM_OBJECT_LOCK(object);
366                         if ((object->flags & OBJ_DEAD) == 0) {
367                                 vm_object_reference_locked(object);
368                                 VM_OBJECT_UNLOCK(object);
369                                 break;
370                         }
371                         VM_OBJECT_UNLOCK(object);
372                 }
373         }
374         return (object);
375 }
376
377 /*
378  * Initialize a physical buffer.
379  *
380  * No requirements.
381  */
382 static void
383 initpbuf(struct buf *bp)
384 {
385         bp->b_qindex = 0;               /* BQUEUE_NONE */
386         bp->b_data = bp->b_kvabase;     /* NULL if pbuf sans kva */
387         bp->b_flags = B_PAGING;
388         bp->b_cmd = BUF_CMD_DONE;
389         bp->b_error = 0;
390         bp->b_bcount = 0;
391         bp->b_bufsize = MAXPHYS;
392         initbufbio(bp);
393         xio_init(&bp->b_xio);
394         BUF_LOCK(bp, LK_EXCLUSIVE);
395 }
396
397 /*
398  * Allocate a physical buffer
399  *
400  * If (pfreecnt != NULL) then *pfreecnt will be decremented on return and
401  * the function will block while it is <= 0.
402  *
403  * Physical buffers can be with or without KVA space reserved.  There
404  * are severe limitations on the ones with KVA reserved, and fewer
405  * limitations on the ones without.  getpbuf() gets one without,
406  * getpbuf_kva() gets one with.
407  *
408  * No requirements.
409  */
410 struct buf *
411 getpbuf(int *pfreecnt)
412 {
413         struct buf *bp;
414         int iter;
415         int loops;
416
417         for (;;) {
418                 while (pfreecnt && *pfreecnt <= 0) {
419                         tsleep_interlock(pfreecnt, 0);
420                         if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
421                                 tsleep(pfreecnt, PINTERLOCKED, "wswbuf0", 0);
422                 }
423                 if (pbuf_raw_count <= 0) {
424                         tsleep_interlock(&pbuf_raw_count, 0);
425                         if ((int)atomic_fetchadd_int(&pbuf_raw_count, 0) <= 0)
426                                 tsleep(&pbuf_raw_count, PINTERLOCKED,
427                                        "wswbuf1", 0);
428                         continue;
429                 }
430                 iter = mycpuid & BSWHMASK;
431                 for (loops = BSWHSIZE; loops; --loops) {
432                         if (TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
433                                 iter = (iter + 1) & BSWHMASK;
434                                 continue;
435                         }
436                         spin_lock(&bswspin_raw[iter]);
437                         if ((bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
438                                 spin_unlock(&bswspin_raw[iter]);
439                                 iter = (iter + 1) & BSWHMASK;
440                                 continue;
441                         }
442                         TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
443                         atomic_add_int(&pbuf_raw_count, -1);
444                         if (pfreecnt)
445                                 atomic_add_int(pfreecnt, -1);
446                         spin_unlock(&bswspin_raw[iter]);
447                         initpbuf(bp);
448
449                         return bp;
450                 }
451         }
452         /* not reached */
453 }
454
455 struct buf *
456 getpbuf_kva(int *pfreecnt)
457 {
458         struct buf *bp;
459         int iter;
460         int loops;
461
462         for (;;) {
463                 while (pfreecnt && *pfreecnt <= 0) {
464                         tsleep_interlock(pfreecnt, 0);
465                         if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
466                                 tsleep(pfreecnt, PINTERLOCKED, "wswbuf2", 0);
467                 }
468                 if (pbuf_kva_count <= 0) {
469                         tsleep_interlock(&pbuf_kva_count, 0);
470                         if ((int)atomic_fetchadd_int(&pbuf_kva_count, 0) <= 0)
471                                 tsleep(&pbuf_kva_count, PINTERLOCKED,
472                                        "wswbuf3", 0);
473                         continue;
474                 }
475                 iter = mycpuid & BSWHMASK;
476                 for (loops = BSWHSIZE; loops; --loops) {
477                         if (TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
478                                 iter = (iter + 1) & BSWHMASK;
479                                 continue;
480                         }
481                         spin_lock(&bswspin_kva[iter]);
482                         if ((bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
483                                 spin_unlock(&bswspin_kva[iter]);
484                                 iter = (iter + 1) & BSWHMASK;
485                                 continue;
486                         }
487                         TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
488                         atomic_add_int(&pbuf_kva_count, -1);
489                         if (pfreecnt)
490                                 atomic_add_int(pfreecnt, -1);
491                         spin_unlock(&bswspin_kva[iter]);
492                         initpbuf(bp);
493
494                         return bp;
495                 }
496         }
497         /* not reached */
498 }
499
500 /*
501  * Allocate a pbuf with kernel memory already preallocated.  Caller must
502  * not change the mapping.
503  */
504 struct buf *
505 getpbuf_mem(int *pfreecnt)
506 {
507         struct buf *bp;
508         int iter;
509         int loops;
510
511         for (;;) {
512                 while (pfreecnt && *pfreecnt <= 0) {
513                         tsleep_interlock(pfreecnt, 0);
514                         if ((int)atomic_fetchadd_int(pfreecnt, 0) <= 0)
515                                 tsleep(pfreecnt, PINTERLOCKED, "wswbuf4", 0);
516                 }
517                 if (pbuf_mem_count <= 0) {
518                         tsleep_interlock(&pbuf_mem_count, 0);
519                         if ((int)atomic_fetchadd_int(&pbuf_mem_count, 0) <= 0)
520                                 tsleep(&pbuf_mem_count, PINTERLOCKED,
521                                        "wswbuf5", 0);
522                         continue;
523                 }
524                 iter = mycpuid & BSWHMASK;
525                 for (loops = BSWHSIZE; loops; --loops) {
526                         if (TAILQ_FIRST(&bswlist_mem[iter]) == NULL) {
527                                 iter = (iter + 1) & BSWHMASK;
528                                 continue;
529                         }
530                         spin_lock(&bswspin_mem[iter]);
531                         if ((bp = TAILQ_FIRST(&bswlist_mem[iter])) == NULL) {
532                                 spin_unlock(&bswspin_mem[iter]);
533                                 iter = (iter + 1) & BSWHMASK;
534                                 continue;
535                         }
536                         TAILQ_REMOVE(&bswlist_mem[iter], bp, b_freelist);
537                         atomic_add_int(&pbuf_mem_count, -1);
538                         if (pfreecnt)
539                                 atomic_add_int(pfreecnt, -1);
540                         spin_unlock(&bswspin_mem[iter]);
541                         initpbuf(bp);
542
543                         return bp;
544                 }
545         }
546         /* not reached */
547 }
548
549 /*
550  * Allocate a physical buffer, if one is available.
551  *
552  * Note that there is no NULL hack here - all subsystems using this
553  * call are required to use a non-NULL pfreecnt.
554  *
555  * No requirements.
556  */
557 struct buf *
558 trypbuf(int *pfreecnt)
559 {
560         struct buf *bp;
561         int iter = mycpuid & BSWHMASK;
562         int loops;
563
564         for (loops = BSWHSIZE; loops; --loops) {
565                 if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_raw[iter]) == NULL) {
566                         iter = (iter + 1) & BSWHMASK;
567                         continue;
568                 }
569                 spin_lock(&bswspin_raw[iter]);
570                 if (*pfreecnt <= 0 ||
571                     (bp = TAILQ_FIRST(&bswlist_raw[iter])) == NULL) {
572                         spin_unlock(&bswspin_raw[iter]);
573                         iter = (iter + 1) & BSWHMASK;
574                         continue;
575                 }
576                 TAILQ_REMOVE(&bswlist_raw[iter], bp, b_freelist);
577                 atomic_add_int(&pbuf_raw_count, -1);
578                 atomic_add_int(pfreecnt, -1);
579
580                 spin_unlock(&bswspin_raw[iter]);
581
582                 initpbuf(bp);
583
584                 return bp;
585         }
586         return NULL;
587 }
588
589 struct buf *
590 trypbuf_kva(int *pfreecnt)
591 {
592         struct buf *bp;
593         int iter = mycpuid & BSWHMASK;
594         int loops;
595
596         for (loops = BSWHSIZE; loops; --loops) {
597                 if (*pfreecnt <= 0 || TAILQ_FIRST(&bswlist_kva[iter]) == NULL) {
598                         iter = (iter + 1) & BSWHMASK;
599                         continue;
600                 }
601                 spin_lock(&bswspin_kva[iter]);
602                 if (*pfreecnt <= 0 ||
603                     (bp = TAILQ_FIRST(&bswlist_kva[iter])) == NULL) {
604                         spin_unlock(&bswspin_kva[iter]);
605                         iter = (iter + 1) & BSWHMASK;
606                         continue;
607                 }
608                 TAILQ_REMOVE(&bswlist_kva[iter], bp, b_freelist);
609                 atomic_add_int(&pbuf_kva_count, -1);
610                 atomic_add_int(pfreecnt, -1);
611
612                 spin_unlock(&bswspin_kva[iter]);
613
614                 initpbuf(bp);
615
616                 return bp;
617         }
618         return NULL;
619 }
620
621 /*
622  * Release a physical buffer
623  *
624  *      NOTE: pfreecnt can be NULL, but this 'feature' will be removed
625  *      relatively soon when the rest of the subsystems get smart about it. XXX
626  *
627  * No requirements.
628  */
629 void
630 relpbuf(struct buf *bp, int *pfreecnt)
631 {
632         int wake = 0;
633         int wake_free = 0;
634         int iter = bp->b_swindex;
635
636         KKASSERT(bp->b_flags & B_PAGING);
637         dsched_buf_exit(bp);
638
639         BUF_UNLOCK(bp);
640
641         if (bp >= swbuf_mem && bp < &swbuf_mem[nswbuf_mem]) {
642                 KKASSERT(bp->b_kvabase);
643                 spin_lock(&bswspin_mem[iter]);
644                 TAILQ_INSERT_HEAD(&bswlist_mem[iter], bp, b_freelist);
645                 if (atomic_fetchadd_int(&pbuf_mem_count, 1) == nswbuf_mem / 4)
646                         wake = 1;
647                 if (pfreecnt) {
648                         if (atomic_fetchadd_int(pfreecnt, 1) == 1)
649                                 wake_free = 1;
650                 }
651                 spin_unlock(&bswspin_mem[iter]);
652                 if (wake)
653                         wakeup(&pbuf_mem_count);
654         } else if (bp >= swbuf_kva && bp < &swbuf_kva[nswbuf_kva]) {
655                 KKASSERT(bp->b_kvabase);
656                 CPUMASK_ASSZERO(bp->b_cpumask);
657                 spin_lock(&bswspin_kva[iter]);
658                 TAILQ_INSERT_HEAD(&bswlist_kva[iter], bp, b_freelist);
659                 if (atomic_fetchadd_int(&pbuf_kva_count, 1) == nswbuf_kva / 4)
660                         wake = 1;
661                 if (pfreecnt) {
662                         if (atomic_fetchadd_int(pfreecnt, 1) == 1)
663                                 wake_free = 1;
664                 }
665                 spin_unlock(&bswspin_kva[iter]);
666                 if (wake)
667                         wakeup(&pbuf_kva_count);
668         } else {
669                 KKASSERT(bp->b_kvabase == NULL);
670                 KKASSERT(bp >= swbuf_raw && bp < &swbuf_raw[nswbuf_raw]);
671                 CPUMASK_ASSZERO(bp->b_cpumask);
672                 spin_lock(&bswspin_raw[iter]);
673                 TAILQ_INSERT_HEAD(&bswlist_raw[iter], bp, b_freelist);
674                 if (atomic_fetchadd_int(&pbuf_raw_count, 1) == nswbuf_raw / 4)
675                         wake = 1;
676                 if (pfreecnt) {
677                         if (atomic_fetchadd_int(pfreecnt, 1) == 1)
678                                 wake_free = 1;
679                 }
680                 spin_unlock(&bswspin_raw[iter]);
681                 if (wake)
682                         wakeup(&pbuf_raw_count);
683         }
684         if (wake_free)
685                 wakeup(pfreecnt);
686 }
687
688 void
689 pbuf_adjcount(int *pfreecnt, int n)
690 {
691         if (n) {
692                 atomic_add_int(pfreecnt, n);
693                 wakeup(pfreecnt);
694         }
695 }