kernel/cam: Add some missing parameter names.
[dragonfly.git] / sys / vm / swap_pager.c
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1998-2010 The DragonFly Project.  All rights reserved.
5  * 
6  * This code is derived from software contributed to The DragonFly Project
7  * by Matthew Dillon <dillon@backplane.com>
8  * 
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in
17  *    the documentation and/or other materials provided with the
18  *    distribution.
19  * 3. Neither the name of The DragonFly Project nor the names of its
20  *    contributors may be used to endorse or promote products derived
21  *    from this software without specific, prior written permission.
22  * 
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
27  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  * 
36  * Copyright (c) 1994 John S. Dyson
37  * Copyright (c) 1990 University of Utah.
38  * Copyright (c) 1991, 1993
39  *      The Regents of the University of California.  All rights reserved.
40  *
41  * This code is derived from software contributed to Berkeley by
42  * the Systems Programming Group of the University of Utah Computer
43  * Science Department.
44  *
45  * Redistribution and use in source and binary forms, with or without
46  * modification, are permitted provided that the following conditions
47  * are met:
48  * 1. Redistributions of source code must retain the above copyright
49  *    notice, this list of conditions and the following disclaimer.
50  * 2. Redistributions in binary form must reproduce the above copyright
51  *    notice, this list of conditions and the following disclaimer in the
52  *    documentation and/or other materials provided with the distribution.
53  * 3. Neither the name of the University nor the names of its contributors
54  *    may be used to endorse or promote products derived from this software
55  *    without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67  * SUCH DAMAGE.
68  *
69  *                              New Swap System
70  *                              Matthew Dillon
71  *
72  * Radix Bitmap 'blists'.
73  *
74  *      - The new swapper uses the new radix bitmap code.  This should scale
75  *        to arbitrarily small or arbitrarily large swap spaces and an almost
76  *        arbitrary degree of fragmentation.
77  *
78  * Features:
79  *
80  *      - on the fly reallocation of swap during putpages.  The new system
81  *        does not try to keep previously allocated swap blocks for dirty
82  *        pages.  
83  *
84  *      - on the fly deallocation of swap
85  *
86  *      - No more garbage collection required.  Unnecessarily allocated swap
87  *        blocks only exist for dirty vm_page_t's now and these are already
88  *        cycled (in a high-load system) by the pager.  We also do on-the-fly
89  *        removal of invalidated swap blocks when a page is destroyed
90  *        or renamed.
91  *
92  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
93  * @(#)swap_pager.c     8.9 (Berkeley) 3/21/94
94  * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
95  */
96
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/conf.h>
100 #include <sys/kernel.h>
101 #include <sys/proc.h>
102 #include <sys/buf.h>
103 #include <sys/vnode.h>
104 #include <sys/malloc.h>
105 #include <sys/vmmeter.h>
106 #include <sys/sysctl.h>
107 #include <sys/blist.h>
108 #include <sys/lock.h>
109 #include <sys/kcollect.h>
110
111 #include <unistd.h>
112 #include "opt_swap.h"
113 #include <vm/vm.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <vm/vm_pager.h>
117 #include <vm/vm_pageout.h>
118 #include <vm/swap_pager.h>
119 #include <vm/vm_extern.h>
120 #include <vm/vm_zone.h>
121 #include <vm/vnode_pager.h>
122
123 #include <sys/thread2.h>
124 #include <sys/buf2.h>
125 #include <vm/vm_page2.h>
126
127 #ifndef MAX_PAGEOUT_CLUSTER
128 #define MAX_PAGEOUT_CLUSTER     SWB_NPAGES
129 #endif
130
131 #define SWM_FREE        0x02    /* free, period                 */
132 #define SWM_POP         0x04    /* pop out                      */
133
134 #define SWBIO_READ      0x01
135 #define SWBIO_WRITE     0x02
136 #define SWBIO_SYNC      0x04
137 #define SWBIO_TTC       0x08    /* for VM_PAGER_TRY_TO_CACHE */
138
139 struct swfreeinfo {
140         vm_object_t     object;
141         vm_pindex_t     basei;
142         vm_pindex_t     begi;
143         vm_pindex_t     endi;   /* inclusive */
144 };
145
146 struct swswapoffinfo {
147         vm_object_t     object;
148         int             devidx;
149         int             shared;
150 };
151
152 /*
153  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
154  * in the old system.
155  */
156
157 int swap_pager_full;            /* swap space exhaustion (task killing) */
158 int swap_fail_ticks;            /* when we became exhausted */
159 int swap_pager_almost_full;     /* swap space exhaustion (w/ hysteresis)*/
160 swblk_t vm_swap_cache_use;
161 swblk_t vm_swap_anon_use;
162 static int vm_report_swap_allocs;
163
164 static int nsw_rcount;          /* free read buffers                    */
165 static int nsw_wcount_sync;     /* limit write buffers / synchronous    */
166 static int nsw_wcount_async;    /* limit write buffers / asynchronous   */
167 static int nsw_wcount_async_max;/* assigned maximum                     */
168 static int nsw_cluster_max;     /* maximum VOP I/O allowed              */
169
170 struct blist *swapblist;
171 static int swap_async_max = 4;  /* maximum in-progress async I/O's      */
172 static int swap_burst_read = 0; /* allow burst reading */
173 static swblk_t swapiterator;    /* linearize allocations */
174 int swap_user_async = 0;        /* user swap pager operation can be async */
175
176 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin");
177
178 /* from vm_swap.c */
179 extern struct vnode *swapdev_vp;
180 extern struct swdevt *swdevt;
181 extern int nswdev;
182
183 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0)
184
185 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
186         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
187 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
188         CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
189 SYSCTL_INT(_vm, OID_AUTO, swap_user_async,
190         CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O");
191
192 #if SWBLK_BITS == 64
193 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use,
194         CTLFLAG_RD, &vm_swap_cache_use, 0, "");
195 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use,
196         CTLFLAG_RD, &vm_swap_anon_use, 0, "");
197 SYSCTL_LONG(_vm, OID_AUTO, swap_size,
198         CTLFLAG_RD, &vm_swap_size, 0, "");
199 #else
200 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
201         CTLFLAG_RD, &vm_swap_cache_use, 0, "");
202 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
203         CTLFLAG_RD, &vm_swap_anon_use, 0, "");
204 SYSCTL_INT(_vm, OID_AUTO, swap_size,
205         CTLFLAG_RD, &vm_swap_size, 0, "");
206 #endif
207 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs,
208         CTLFLAG_RW, &vm_report_swap_allocs, 0, "");
209
210 vm_zone_t               swap_zone;
211
212 /*
213  * Red-Black tree for swblock entries
214  *
215  * The caller must hold vm_token
216  */
217 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
218              vm_pindex_t, swb_index);
219
220 int
221 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
222 {
223         if (swb1->swb_index < swb2->swb_index)
224                 return(-1);
225         if (swb1->swb_index > swb2->swb_index)
226                 return(1);
227         return(0);
228 }
229
230 static
231 int
232 rb_swblock_scancmp(struct swblock *swb, void *data)
233 {
234         struct swfreeinfo *info = data;
235
236         if (swb->swb_index < info->basei)
237                 return(-1);
238         if (swb->swb_index > info->endi)
239                 return(1);
240         return(0);
241 }
242
243 static
244 int
245 rb_swblock_condcmp(struct swblock *swb, void *data)
246 {
247         struct swfreeinfo *info = data;
248
249         if (swb->swb_index < info->basei)
250                 return(-1);
251         return(0);
252 }
253
254 /*
255  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
256  * calls hooked from other parts of the VM system and do not appear here.
257  * (see vm/swap_pager.h).
258  */
259
260 static void     swap_pager_dealloc (vm_object_t object);
261 static int      swap_pager_getpage (vm_object_t, vm_page_t *, int);
262 static void     swap_chain_iodone(struct bio *biox);
263
264 struct pagerops swappagerops = {
265         swap_pager_dealloc,     /* deallocate an OBJT_SWAP object       */
266         swap_pager_getpage,     /* pagein                               */
267         swap_pager_putpages,    /* pageout                              */
268         swap_pager_haspage      /* get backing store status for page    */
269 };
270
271 /*
272  * SWB_DMMAX is in page-sized chunks with the new swap system.  It was
273  * dev-bsized chunks in the old.  SWB_DMMAX is always a power of 2.
274  *
275  * swap_*() routines are externally accessible.  swp_*() routines are
276  * internal.
277  */
278
279 int nswap_lowat = 128;          /* in pages, swap_pager_almost_full warn */
280 int nswap_hiwat = 512;          /* in pages, swap_pager_almost_full warn */
281
282 static __inline void    swp_sizecheck (void);
283 static void     swp_pager_async_iodone (struct bio *bio);
284
285 /*
286  * Swap bitmap functions
287  */
288
289 static __inline void    swp_pager_freeswapspace(vm_object_t object,
290                                                 swblk_t blk, int npages);
291 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages);
292
293 /*
294  * Metadata functions
295  */
296
297 static void swp_pager_meta_convert(vm_object_t);
298 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
299 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
300 static void swp_pager_meta_free_all(vm_object_t);
301 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
302
303 /*
304  * SWP_SIZECHECK() -    update swap_pager_full indication
305  *      
306  *      update the swap_pager_almost_full indication and warn when we are
307  *      about to run out of swap space, using lowat/hiwat hysteresis.
308  *
309  *      Clear swap_pager_full ( task killing ) indication when lowat is met.
310  *
311  * No restrictions on call
312  * This routine may not block.
313  * SMP races are ok.
314  */
315 static __inline void
316 swp_sizecheck(void)
317 {
318         if (vm_swap_size < nswap_lowat) {
319                 if (swap_pager_almost_full == 0) {
320                         kprintf("swap_pager: out of swap space\n");
321                         swap_pager_almost_full = 1;
322                         swap_fail_ticks = ticks;
323                 }
324         } else {
325                 swap_pager_full = 0;
326                 if (vm_swap_size > nswap_hiwat)
327                         swap_pager_almost_full = 0;
328         }
329 }
330
331 /*
332  * Long-term data collection on 10-second interval.  Return the value
333  * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC.
334  *
335  * Return total swap in the scale field.  This can change if swap is
336  * regularly added or removed and may cause some historical confusion
337  * in that case, but SWAPPCT will always be historically accurate.
338  */
339
340 #define PTOB(value)     ((uint64_t)(value) << PAGE_SHIFT)
341
342 static uint64_t
343 collect_swap_callback(int n)
344 {
345         uint64_t total = vm_swap_max;
346         uint64_t anon = vm_swap_anon_use;
347         uint64_t cache = vm_swap_cache_use;
348
349         if (total == 0)         /* avoid divide by zero */
350                 total = 1;
351         kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon));
352         kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache));
353         kcollect_setscale(KCOLLECT_SWAPANO,
354                           KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total)));
355         kcollect_setscale(KCOLLECT_SWAPCAC,
356                           KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total)));
357         return (((anon + cache) * 10000 + (total >> 1)) / total);
358 }
359
360 /*
361  * SWAP_PAGER_INIT() -  initialize the swap pager!
362  *
363  *      Expected to be started from system init.  NOTE:  This code is run 
364  *      before much else so be careful what you depend on.  Most of the VM
365  *      system has yet to be initialized at this point.
366  *
367  * Called from the low level boot code only.
368  */
369 static void
370 swap_pager_init(void *arg __unused)
371 {
372         kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback,
373                           KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0));
374         kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL,
375                           KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0));
376         kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL,
377                           KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0));
378 }
379 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL);
380
381 /*
382  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
383  *
384  *      Expected to be started from pageout process once, prior to entering
385  *      its main loop.
386  *
387  * Called from the low level boot code only.
388  */
389 void
390 swap_pager_swap_init(void)
391 {
392         int n, n2;
393
394         /*
395          * Number of in-transit swap bp operations.  Don't
396          * exhaust the pbufs completely.  Make sure we
397          * initialize workable values (0 will work for hysteresis
398          * but it isn't very efficient).
399          *
400          * The nsw_cluster_max is constrained by the number of pages an XIO
401          * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
402          * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
403          * constrained by the swap device interleave stripe size.
404          *
405          * Currently we hardwire nsw_wcount_async to 4.  This limit is 
406          * designed to prevent other I/O from having high latencies due to
407          * our pageout I/O.  The value 4 works well for one or two active swap
408          * devices but is probably a little low if you have more.  Even so,
409          * a higher value would probably generate only a limited improvement
410          * with three or four active swap devices since the system does not
411          * typically have to pageout at extreme bandwidths.   We will want
412          * at least 2 per swap devices, and 4 is a pretty good value if you
413          * have one NFS swap device due to the command/ack latency over NFS.
414          * So it all works out pretty well.
415          */
416
417         nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
418
419         nsw_rcount = (nswbuf_kva + 1) / 2;
420         nsw_wcount_sync = (nswbuf_kva + 3) / 4;
421         nsw_wcount_async = 4;
422         nsw_wcount_async_max = nsw_wcount_async;
423
424         /*
425          * The zone is dynamically allocated so generally size it to
426          * maxswzone (32MB to 256GB of KVM).  Set a minimum size based
427          * on physical memory of around 8x (each swblock can hold 16 pages).
428          *
429          * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
430          * has increased dramatically.
431          */
432         n = vmstats.v_page_count / 2;
433         if (maxswzone && n < maxswzone / sizeof(struct swblock))
434                 n = maxswzone / sizeof(struct swblock);
435         n2 = n;
436
437         do {
438                 swap_zone = zinit(
439                         "SWAPMETA", 
440                         sizeof(struct swblock), 
441                         n,
442                         ZONE_INTERRUPT);
443                 if (swap_zone != NULL)
444                         break;
445                 /*
446                  * if the allocation failed, try a zone two thirds the
447                  * size of the previous attempt.
448                  */
449                 n -= ((n + 2) / 3);
450         } while (n > 0);
451
452         if (swap_zone == NULL)
453                 panic("swap_pager_swap_init: swap_zone == NULL");
454         if (n2 != n)
455                 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
456 }
457
458 /*
459  * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
460  *                      its metadata structures.
461  *
462  *      This routine is called from the mmap and fork code to create a new
463  *      OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
464  *      and then converting it with swp_pager_meta_convert().
465  *
466  *      We only support unnamed objects.
467  *
468  * No restrictions.
469  */
470 vm_object_t
471 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
472 {
473         vm_object_t object;
474
475         KKASSERT(handle == NULL);
476         object = vm_object_allocate_hold(OBJT_DEFAULT,
477                                          OFF_TO_IDX(offset + PAGE_MASK + size));
478         swp_pager_meta_convert(object);
479         vm_object_drop(object);
480
481         return (object);
482 }
483
484 /*
485  * SWAP_PAGER_DEALLOC() -       remove swap metadata from object
486  *
487  *      The swap backing for the object is destroyed.  The code is 
488  *      designed such that we can reinstantiate it later, but this
489  *      routine is typically called only when the entire object is
490  *      about to be destroyed.
491  *
492  * The object must be locked or unreferenceable.
493  * No other requirements.
494  */
495 static void
496 swap_pager_dealloc(vm_object_t object)
497 {
498         vm_object_hold(object);
499         vm_object_pip_wait(object, "swpdea");
500
501         /*
502          * Free all remaining metadata.  We only bother to free it from 
503          * the swap meta data.  We do not attempt to free swapblk's still
504          * associated with vm_page_t's for this object.  We do not care
505          * if paging is still in progress on some objects.
506          */
507         swp_pager_meta_free_all(object);
508         vm_object_drop(object);
509 }
510
511 /************************************************************************
512  *                      SWAP PAGER BITMAP ROUTINES                      *
513  ************************************************************************/
514
515 /*
516  * SWP_PAGER_GETSWAPSPACE() -   allocate raw swap space
517  *
518  *      Allocate swap for the requested number of pages.  The starting
519  *      swap block number (a page index) is returned or SWAPBLK_NONE
520  *      if the allocation failed.
521  *
522  *      Also has the side effect of advising that somebody made a mistake
523  *      when they configured swap and didn't configure enough.
524  *
525  * The caller must hold the object.
526  * This routine may not block.
527  */
528 static __inline swblk_t
529 swp_pager_getswapspace(vm_object_t object, int npages)
530 {
531         swblk_t blk;
532
533         lwkt_gettoken(&vm_token);
534         blk = blist_allocat(swapblist, npages, swapiterator);
535         if (blk == SWAPBLK_NONE)
536                 blk = blist_allocat(swapblist, npages, 0);
537         if (blk == SWAPBLK_NONE) {
538                 if (swap_pager_full != 2) {
539                         if (vm_swap_max == 0)
540                                 kprintf("Warning: The system would like to "
541                                         "page to swap but no swap space "
542                                         "is configured!\n");
543                         else
544                                 kprintf("swap_pager_getswapspace: "
545                                         "swap full allocating %d pages\n",
546                                         npages);
547                         swap_pager_full = 2;
548                         if (swap_pager_almost_full == 0)
549                                 swap_fail_ticks = ticks;
550                         swap_pager_almost_full = 1;
551                 }
552         } else {
553                 /* swapiterator = blk; disable for now, doesn't work well */
554                 swapacctspace(blk, -npages);
555                 if (object->type == OBJT_SWAP)
556                         vm_swap_anon_use += npages;
557                 else
558                         vm_swap_cache_use += npages;
559                 swp_sizecheck();
560         }
561         lwkt_reltoken(&vm_token);
562         return(blk);
563 }
564
565 /*
566  * SWP_PAGER_FREESWAPSPACE() -  free raw swap space 
567  *
568  *      This routine returns the specified swap blocks back to the bitmap.
569  *
570  *      Note:  This routine may not block (it could in the old swap code),
571  *      and through the use of the new blist routines it does not block.
572  *
573  * This routine may not block.
574  */
575
576 static __inline void
577 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
578 {
579         struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
580
581         lwkt_gettoken(&vm_token);
582         sp->sw_nused -= npages;
583         if (object->type == OBJT_SWAP)
584                 vm_swap_anon_use -= npages;
585         else
586                 vm_swap_cache_use -= npages;
587
588         if (sp->sw_flags & SW_CLOSING) {
589                 lwkt_reltoken(&vm_token);
590                 return;
591         }
592
593         blist_free(swapblist, blk, npages);
594         vm_swap_size += npages;
595         swp_sizecheck();
596         lwkt_reltoken(&vm_token);
597 }
598
599 /*
600  * SWAP_PAGER_FREESPACE() -     frees swap blocks associated with a page
601  *                              range within an object.
602  *
603  *      This is a globally accessible routine.
604  *
605  *      This routine removes swapblk assignments from swap metadata.
606  *
607  *      The external callers of this routine typically have already destroyed 
608  *      or renamed vm_page_t's associated with this range in the object so 
609  *      we should be ok.
610  *
611  * No requirements.
612  */
613 void
614 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
615 {
616         vm_object_hold(object);
617         swp_pager_meta_free(object, start, size);
618         vm_object_drop(object);
619 }
620
621 /*
622  * No requirements.
623  */
624 void
625 swap_pager_freespace_all(vm_object_t object)
626 {
627         vm_object_hold(object);
628         swp_pager_meta_free_all(object);
629         vm_object_drop(object);
630 }
631
632 /*
633  * This function conditionally frees swap cache swap starting at
634  * (*basei) in the object.  (count) swap blocks will be nominally freed.
635  * The actual number of blocks freed can be more or less than the
636  * requested number.
637  *
638  * This function nominally returns the number of blocks freed.  However,
639  * the actual number of blocks freed may be less then the returned value.
640  * If the function is unable to exhaust the object or if it is able to
641  * free (approximately) the requested number of blocks it returns
642  * a value n > count.
643  *
644  * If we exhaust the object we will return a value n <= count.
645  *
646  * The caller must hold the object.
647  *
648  * WARNING!  If count == 0 then -1 can be returned as a degenerate case,
649  *           callers should always pass a count value > 0.
650  */
651 static int swap_pager_condfree_callback(struct swblock *swap, void *data);
652
653 int
654 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
655 {
656         struct swfreeinfo info;
657         int n;
658         int t;
659
660         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
661
662         info.object = object;
663         info.basei = *basei;    /* skip up to this page index */
664         info.begi = count;      /* max swap pages to destroy */
665         info.endi = count * 8;  /* max swblocks to scan */
666
667         swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
668                                 swap_pager_condfree_callback, &info);
669         *basei = info.basei;
670
671         /*
672          * Take the higher difference swblocks vs pages
673          */
674         n = count - (int)info.begi;
675         t = count * 8 - (int)info.endi;
676         if (n < t)
677                 n = t;
678         if (n < 1)
679                 n = 1;
680         return(n);
681 }
682
683 /*
684  * The idea is to free whole meta-block to avoid fragmenting
685  * the swap space or disk I/O.  We only do this if NO VM pages
686  * are present.
687  *
688  * We do not have to deal with clearing PG_SWAPPED in related VM
689  * pages because there are no related VM pages.
690  *
691  * The caller must hold the object.
692  */
693 static int
694 swap_pager_condfree_callback(struct swblock *swap, void *data)
695 {
696         struct swfreeinfo *info = data;
697         vm_object_t object = info->object;
698         int i;
699
700         for (i = 0; i < SWAP_META_PAGES; ++i) {
701                 if (vm_page_lookup(object, swap->swb_index + i))
702                         break;
703         }
704         info->basei = swap->swb_index + SWAP_META_PAGES;
705         if (i == SWAP_META_PAGES) {
706                 info->begi -= swap->swb_count;
707                 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
708         }
709         --info->endi;
710         if ((int)info->begi < 0 || (int)info->endi < 0)
711                 return(-1);
712         lwkt_yield();
713         return(0);
714 }
715
716 /*
717  * Called by vm_page_alloc() when a new VM page is inserted
718  * into a VM object.  Checks whether swap has been assigned to
719  * the page and sets PG_SWAPPED as necessary.
720  *
721  * (m) must be busied by caller and remains busied on return.
722  */
723 void
724 swap_pager_page_inserted(vm_page_t m)
725 {
726         if (m->object->swblock_count) {
727                 vm_object_hold(m->object);
728                 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
729                         vm_page_flag_set(m, PG_SWAPPED);
730                 vm_object_drop(m->object);
731         }
732 }
733
734 /*
735  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
736  *
737  *      Assigns swap blocks to the specified range within the object.  The 
738  *      swap blocks are not zerod.  Any previous swap assignment is destroyed.
739  *
740  *      Returns 0 on success, -1 on failure.
741  *
742  * The caller is responsible for avoiding races in the specified range.
743  * No other requirements.
744  */
745 int
746 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
747 {
748         int n = 0;
749         swblk_t blk = SWAPBLK_NONE;
750         vm_pindex_t beg = start;        /* save start index */
751
752         vm_object_hold(object);
753
754         while (size) {
755                 if (n == 0) {
756                         n = BLIST_MAX_ALLOC;
757                         while ((blk = swp_pager_getswapspace(object, n)) ==
758                                SWAPBLK_NONE)
759                         {
760                                 n >>= 1;
761                                 if (n == 0) {
762                                         swp_pager_meta_free(object, beg,
763                                                             start - beg);
764                                         vm_object_drop(object);
765                                         return(-1);
766                                 }
767                         }
768                 }
769                 swp_pager_meta_build(object, start, blk);
770                 --size;
771                 ++start;
772                 ++blk;
773                 --n;
774         }
775         swp_pager_meta_free(object, start, n);
776         vm_object_drop(object);
777         return(0);
778 }
779
780 /*
781  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
782  *                      and destroy the source.
783  *
784  *      Copy any valid swapblks from the source to the destination.  In
785  *      cases where both the source and destination have a valid swapblk,
786  *      we keep the destination's.
787  *
788  *      This routine is allowed to block.  It may block allocating metadata
789  *      indirectly through swp_pager_meta_build() or if paging is still in
790  *      progress on the source. 
791  *
792  *      XXX vm_page_collapse() kinda expects us not to block because we 
793  *      supposedly do not need to allocate memory, but for the moment we
794  *      *may* have to get a little memory from the zone allocator, but
795  *      it is taken from the interrupt memory.  We should be ok. 
796  *
797  *      The source object contains no vm_page_t's (which is just as well)
798  *      The source object is of type OBJT_SWAP.
799  *
800  *      The source and destination objects must be held by the caller.
801  */
802 void
803 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
804                 vm_pindex_t base_index, int destroysource)
805 {
806         vm_pindex_t i;
807
808         ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
809         ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
810
811         /*
812          * transfer source to destination.
813          */
814         for (i = 0; i < dstobject->size; ++i) {
815                 swblk_t dstaddr;
816
817                 /*
818                  * Locate (without changing) the swapblk on the destination,
819                  * unless it is invalid in which case free it silently, or
820                  * if the destination is a resident page, in which case the
821                  * source is thrown away.
822                  */
823                 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
824
825                 if (dstaddr == SWAPBLK_NONE) {
826                         /*
827                          * Destination has no swapblk and is not resident,
828                          * copy source.
829                          */
830                         swblk_t srcaddr;
831
832                         srcaddr = swp_pager_meta_ctl(srcobject,
833                                                      base_index + i, SWM_POP);
834
835                         if (srcaddr != SWAPBLK_NONE)
836                                 swp_pager_meta_build(dstobject, i, srcaddr);
837                 } else {
838                         /*
839                          * Destination has valid swapblk or it is represented
840                          * by a resident page.  We destroy the sourceblock.
841                          */
842                         swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
843                 }
844         }
845
846         /*
847          * Free left over swap blocks in source.
848          *
849          * We have to revert the type to OBJT_DEFAULT so we do not accidently
850          * double-remove the object from the swap queues.
851          */
852         if (destroysource) {
853                 /*
854                  * Reverting the type is not necessary, the caller is going
855                  * to destroy srcobject directly, but I'm doing it here
856                  * for consistency since we've removed the object from its
857                  * queues.
858                  */
859                 swp_pager_meta_free_all(srcobject);
860                 if (srcobject->type == OBJT_SWAP)
861                         srcobject->type = OBJT_DEFAULT;
862         }
863 }
864
865 /*
866  * SWAP_PAGER_HASPAGE() -       determine if we have good backing store for
867  *                              the requested page.
868  *
869  *      We determine whether good backing store exists for the requested
870  *      page and return TRUE if it does, FALSE if it doesn't.
871  *
872  *      If TRUE, we also try to determine how much valid, contiguous backing
873  *      store exists before and after the requested page within a reasonable
874  *      distance.  We do not try to restrict it to the swap device stripe
875  *      (that is handled in getpages/putpages).  It probably isn't worth
876  *      doing here.
877  *
878  * No requirements.
879  */
880 boolean_t
881 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
882 {
883         swblk_t blk0;
884
885         /*
886          * do we have good backing store at the requested index ?
887          */
888         vm_object_hold(object);
889         blk0 = swp_pager_meta_ctl(object, pindex, 0);
890
891         if (blk0 == SWAPBLK_NONE) {
892                 vm_object_drop(object);
893                 return (FALSE);
894         }
895         vm_object_drop(object);
896         return (TRUE);
897 }
898
899 /*
900  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
901  *
902  * This removes any associated swap backing store, whether valid or
903  * not, from the page.  This operates on any VM object, not just OBJT_SWAP
904  * objects.
905  *
906  * This routine is typically called when a page is made dirty, at
907  * which point any associated swap can be freed.  MADV_FREE also
908  * calls us in a special-case situation
909  *
910  * NOTE!!!  If the page is clean and the swap was valid, the caller
911  *          should make the page dirty before calling this routine.
912  *          This routine does NOT change the m->dirty status of the page.
913  *          Also: MADV_FREE depends on it.
914  *
915  * The page must be busied.
916  * The caller can hold the object to avoid blocking, else we might block.
917  * No other requirements.
918  */
919 void
920 swap_pager_unswapped(vm_page_t m)
921 {
922         if (m->flags & PG_SWAPPED) {
923                 vm_object_hold(m->object);
924                 KKASSERT(m->flags & PG_SWAPPED);
925                 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
926                 vm_page_flag_clear(m, PG_SWAPPED);
927                 vm_object_drop(m->object);
928         }
929 }
930
931 /*
932  * SWAP_PAGER_STRATEGY() - read, write, free blocks
933  *
934  * This implements a VM OBJECT strategy function using swap backing store.
935  * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
936  * types.
937  *
938  * This is intended to be a cacheless interface (i.e. caching occurs at
939  * higher levels), and is also used as a swap-based SSD cache for vnode
940  * and device objects.
941  *
942  * All I/O goes directly to and from the swap device.
943  *      
944  * We currently attempt to run I/O synchronously or asynchronously as
945  * the caller requests.  This isn't perfect because we loose error
946  * sequencing when we run multiple ops in parallel to satisfy a request.
947  * But this is swap, so we let it all hang out.
948  *
949  * No requirements.
950  */
951 void
952 swap_pager_strategy(vm_object_t object, struct bio *bio)
953 {
954         struct buf *bp = bio->bio_buf;
955         struct bio *nbio;
956         vm_pindex_t start;
957         vm_pindex_t biox_blkno = 0;
958         int count;
959         char *data;
960         struct bio *biox;
961         struct buf *bufx;
962 #if 0
963         struct bio_track *track;
964 #endif
965
966 #if 0
967         /*
968          * tracking for swapdev vnode I/Os
969          */
970         if (bp->b_cmd == BUF_CMD_READ)
971                 track = &swapdev_vp->v_track_read;
972         else
973                 track = &swapdev_vp->v_track_write;
974 #endif
975
976         if (bp->b_bcount & PAGE_MASK) {
977                 bp->b_error = EINVAL;
978                 bp->b_flags |= B_ERROR | B_INVAL;
979                 biodone(bio);
980                 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
981                         "not page bounded\n",
982                         bp, (long long)bio->bio_offset, (int)bp->b_bcount);
983                 return;
984         }
985
986         /*
987          * Clear error indication, initialize page index, count, data pointer.
988          */
989         bp->b_error = 0;
990         bp->b_flags &= ~B_ERROR;
991         bp->b_resid = bp->b_bcount;
992
993         start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
994         count = howmany(bp->b_bcount, PAGE_SIZE);
995         data = bp->b_data;
996
997         /*
998          * Deal with BUF_CMD_FREEBLKS
999          */
1000         if (bp->b_cmd == BUF_CMD_FREEBLKS) {
1001                 /*
1002                  * FREE PAGE(s) - destroy underlying swap that is no longer
1003                  *                needed.
1004                  */
1005                 vm_object_hold(object);
1006                 swp_pager_meta_free(object, start, count);
1007                 vm_object_drop(object);
1008                 bp->b_resid = 0;
1009                 biodone(bio);
1010                 return;
1011         }
1012
1013         /*
1014          * We need to be able to create a new cluster of I/O's.  We cannot
1015          * use the caller fields of the passed bio so push a new one.
1016          *
1017          * Because nbio is just a placeholder for the cluster links,
1018          * we can biodone() the original bio instead of nbio to make
1019          * things a bit more efficient.
1020          */
1021         nbio = push_bio(bio);
1022         nbio->bio_offset = bio->bio_offset;
1023         nbio->bio_caller_info1.cluster_head = NULL;
1024         nbio->bio_caller_info2.cluster_tail = NULL;
1025
1026         biox = NULL;
1027         bufx = NULL;
1028
1029         /*
1030          * Execute read or write
1031          */
1032         vm_object_hold(object);
1033
1034         while (count > 0) {
1035                 swblk_t blk;
1036
1037                 /*
1038                  * Obtain block.  If block not found and writing, allocate a
1039                  * new block and build it into the object.
1040                  */
1041                 blk = swp_pager_meta_ctl(object, start, 0);
1042                 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
1043                         blk = swp_pager_getswapspace(object, 1);
1044                         if (blk == SWAPBLK_NONE) {
1045                                 bp->b_error = ENOMEM;
1046                                 bp->b_flags |= B_ERROR;
1047                                 break;
1048                         }
1049                         swp_pager_meta_build(object, start, blk);
1050                 }
1051                         
1052                 /*
1053                  * Do we have to flush our current collection?  Yes if:
1054                  *
1055                  *      - no swap block at this index
1056                  *      - swap block is not contiguous
1057                  *      - we cross a physical disk boundry in the
1058                  *        stripe.
1059                  */
1060                 if (
1061                     biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
1062                      ((biox_blkno ^ blk) & ~SWB_DMMASK)
1063                     )
1064                 ) {
1065                         if (bp->b_cmd == BUF_CMD_READ) {
1066                                 ++mycpu->gd_cnt.v_swapin;
1067                                 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1068                         } else {
1069                                 ++mycpu->gd_cnt.v_swapout;
1070                                 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1071                                 bufx->b_dirtyend = bufx->b_bcount;
1072                         }
1073
1074                         /*
1075                          * Finished with this buf.
1076                          */
1077                         KKASSERT(bufx->b_bcount != 0);
1078                         if (bufx->b_cmd != BUF_CMD_READ)
1079                                 bufx->b_dirtyend = bufx->b_bcount;
1080                         biox = NULL;
1081                         bufx = NULL;
1082                 }
1083
1084                 /*
1085                  * Add new swapblk to biox, instantiating biox if necessary.
1086                  * Zero-fill reads are able to take a shortcut.
1087                  */
1088                 if (blk == SWAPBLK_NONE) {
1089                         /*
1090                          * We can only get here if we are reading.
1091                          */
1092                         bzero(data, PAGE_SIZE);
1093                         bp->b_resid -= PAGE_SIZE;
1094                 } else {
1095                         if (biox == NULL) {
1096                                 /* XXX chain count > 4, wait to <= 4 */
1097
1098                                 bufx = getpbuf(NULL);
1099                                 biox = &bufx->b_bio1;
1100                                 cluster_append(nbio, bufx);
1101                                 bufx->b_cmd = bp->b_cmd;
1102                                 biox->bio_done = swap_chain_iodone;
1103                                 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1104                                 biox->bio_caller_info1.cluster_parent = nbio;
1105                                 biox_blkno = blk;
1106                                 bufx->b_bcount = 0;
1107                                 bufx->b_data = data;
1108                         }
1109                         bufx->b_bcount += PAGE_SIZE;
1110                 }
1111                 --count;
1112                 ++start;
1113                 data += PAGE_SIZE;
1114         }
1115
1116         vm_object_drop(object);
1117
1118         /*
1119          *  Flush out last buffer
1120          */
1121         if (biox) {
1122                 if (bufx->b_cmd == BUF_CMD_READ) {
1123                         ++mycpu->gd_cnt.v_swapin;
1124                         mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1125                 } else {
1126                         ++mycpu->gd_cnt.v_swapout;
1127                         mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1128                         bufx->b_dirtyend = bufx->b_bcount;
1129                 }
1130                 KKASSERT(bufx->b_bcount);
1131                 if (bufx->b_cmd != BUF_CMD_READ)
1132                         bufx->b_dirtyend = bufx->b_bcount;
1133                 /* biox, bufx = NULL */
1134         }
1135
1136         /*
1137          * Now initiate all the I/O.  Be careful looping on our chain as
1138          * I/O's may complete while we are still initiating them.
1139          *
1140          * If the request is a 100% sparse read no bios will be present
1141          * and we just biodone() the buffer.
1142          */
1143         nbio->bio_caller_info2.cluster_tail = NULL;
1144         bufx = nbio->bio_caller_info1.cluster_head;
1145
1146         if (bufx) {
1147                 while (bufx) {
1148                         biox = &bufx->b_bio1;
1149                         BUF_KERNPROC(bufx);
1150                         bufx = bufx->b_cluster_next;
1151                         vn_strategy(swapdev_vp, biox);
1152                 }
1153         } else {
1154                 biodone(bio);
1155         }
1156
1157         /*
1158          * Completion of the cluster will also call biodone_chain(nbio).
1159          * We never call biodone(nbio) so we don't have to worry about
1160          * setting up a bio_done callback.  It's handled in the sub-IO.
1161          */
1162         /**/
1163 }
1164
1165 /*
1166  * biodone callback
1167  *
1168  * No requirements.
1169  */
1170 static void
1171 swap_chain_iodone(struct bio *biox)
1172 {
1173         struct buf **nextp;
1174         struct buf *bufx;       /* chained sub-buffer */
1175         struct bio *nbio;       /* parent nbio with chain glue */
1176         struct buf *bp;         /* original bp associated with nbio */
1177         int chain_empty;
1178
1179         bufx = biox->bio_buf;
1180         nbio = biox->bio_caller_info1.cluster_parent;
1181         bp = nbio->bio_buf;
1182
1183         /*
1184          * Update the original buffer
1185          */
1186         KKASSERT(bp != NULL);
1187         if (bufx->b_flags & B_ERROR) {
1188                 atomic_set_int(&bufx->b_flags, B_ERROR);
1189                 bp->b_error = bufx->b_error;    /* race ok */
1190         } else if (bufx->b_resid != 0) {
1191                 atomic_set_int(&bufx->b_flags, B_ERROR);
1192                 bp->b_error = EINVAL;           /* race ok */
1193         } else {
1194                 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1195         }
1196
1197         /*
1198          * Remove us from the chain.
1199          */
1200         spin_lock(&swapbp_spin);
1201         nextp = &nbio->bio_caller_info1.cluster_head;
1202         while (*nextp != bufx) {
1203                 KKASSERT(*nextp != NULL);
1204                 nextp = &(*nextp)->b_cluster_next;
1205         }
1206         *nextp = bufx->b_cluster_next;
1207         chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1208         spin_unlock(&swapbp_spin);
1209
1210         /*
1211          * Clean up bufx.  If the chain is now empty we finish out
1212          * the parent.  Note that we may be racing other completions
1213          * so we must use the chain_empty status from above.
1214          */
1215         if (chain_empty) {
1216                 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1217                         atomic_set_int(&bp->b_flags, B_ERROR);
1218                         bp->b_error = EINVAL;
1219                 }
1220                 biodone_chain(nbio);
1221         }
1222         relpbuf(bufx, NULL);
1223 }
1224
1225 /*
1226  * SWAP_PAGER_GETPAGES() - bring page in from swap
1227  *
1228  * The requested page may have to be brought in from swap.  Calculate the
1229  * swap block and bring in additional pages if possible.  All pages must
1230  * have contiguous swap block assignments and reside in the same object.
1231  *
1232  * The caller has a single vm_object_pip_add() reference prior to
1233  * calling us and we should return with the same.
1234  *
1235  * The caller has BUSY'd the page.  We should return with (*mpp) left busy,
1236  * and any additinal pages unbusied.
1237  *
1238  * If the caller encounters a PG_RAM page it will pass it to us even though
1239  * it may be valid and dirty.  We cannot overwrite the page in this case!
1240  * The case is used to allow us to issue pure read-aheads.
1241  *
1242  * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1243  *       the PG_RAM page is validated at the same time as mreq.  What we
1244  *       really need to do is issue a separate read-ahead pbuf.
1245  *
1246  * No requirements.
1247  */
1248 static int
1249 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1250 {
1251         struct buf *bp;
1252         struct bio *bio;
1253         vm_page_t mreq;
1254         vm_page_t m;
1255         vm_offset_t kva;
1256         swblk_t blk;
1257         int i;
1258         int j;
1259         int raonly;
1260         int error;
1261         u_int32_t flags;
1262         vm_page_t marray[XIO_INTERNAL_PAGES];
1263
1264         mreq = *mpp;
1265
1266         vm_object_hold(object);
1267         if (mreq->object != object) {
1268                 panic("swap_pager_getpages: object mismatch %p/%p", 
1269                     object, 
1270                     mreq->object
1271                 );
1272         }
1273
1274         /*
1275          * We don't want to overwrite a fully valid page as it might be
1276          * dirty.  This case can occur when e.g. vm_fault hits a perfectly
1277          * valid page with PG_RAM set.
1278          *
1279          * In this case we see if the next page is a suitable page-in
1280          * candidate and if it is we issue read-ahead.  PG_RAM will be
1281          * set on the last page of the read-ahead to continue the pipeline.
1282          */
1283         if (mreq->valid == VM_PAGE_BITS_ALL) {
1284                 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1285                         vm_object_drop(object);
1286                         return(VM_PAGER_OK);
1287                 }
1288                 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1289                 if (blk == SWAPBLK_NONE) {
1290                         vm_object_drop(object);
1291                         return(VM_PAGER_OK);
1292                 }
1293                 m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1294                                             TRUE, &error);
1295                 if (error) {
1296                         vm_object_drop(object);
1297                         return(VM_PAGER_OK);
1298                 } else if (m == NULL) {
1299                         /*
1300                          * Use VM_ALLOC_QUICK to avoid blocking on cache
1301                          * page reuse.
1302                          */
1303                         m = vm_page_alloc(object, mreq->pindex + 1,
1304                                           VM_ALLOC_QUICK);
1305                         if (m == NULL) {
1306                                 vm_object_drop(object);
1307                                 return(VM_PAGER_OK);
1308                         }
1309                 } else {
1310                         if (m->valid) {
1311                                 vm_page_wakeup(m);
1312                                 vm_object_drop(object);
1313                                 return(VM_PAGER_OK);
1314                         }
1315                         vm_page_unqueue_nowakeup(m);
1316                 }
1317                 /* page is busy */
1318                 mreq = m;
1319                 raonly = 1;
1320         } else {
1321                 raonly = 0;
1322         }
1323
1324         /*
1325          * Try to block-read contiguous pages from swap if sequential,
1326          * otherwise just read one page.  Contiguous pages from swap must
1327          * reside within a single device stripe because the I/O cannot be
1328          * broken up across multiple stripes.
1329          *
1330          * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1331          * set up such that the case(s) are handled implicitly.
1332          */
1333         blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1334         marray[0] = mreq;
1335
1336         for (i = 1; i <= swap_burst_read &&
1337                     i < XIO_INTERNAL_PAGES &&
1338                     mreq->pindex + i < object->size; ++i) {
1339                 swblk_t iblk;
1340
1341                 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1342                 if (iblk != blk + i)
1343                         break;
1344                 if ((blk ^ iblk) & ~SWB_DMMASK)
1345                         break;
1346                 m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1347                                             TRUE, &error);
1348                 if (error) {
1349                         break;
1350                 } else if (m == NULL) {
1351                         /*
1352                          * Use VM_ALLOC_QUICK to avoid blocking on cache
1353                          * page reuse.
1354                          */
1355                         m = vm_page_alloc(object, mreq->pindex + i,
1356                                           VM_ALLOC_QUICK);
1357                         if (m == NULL)
1358                                 break;
1359                 } else {
1360                         if (m->valid) {
1361                                 vm_page_wakeup(m);
1362                                 break;
1363                         }
1364                         vm_page_unqueue_nowakeup(m);
1365                 }
1366                 /* page is busy */
1367                 marray[i] = m;
1368         }
1369         if (i > 1)
1370                 vm_page_flag_set(marray[i - 1], PG_RAM);
1371
1372         /*
1373          * If mreq is the requested page and we have nothing to do return
1374          * VM_PAGER_FAIL.  If raonly is set mreq is just another read-ahead
1375          * page and must be cleaned up.
1376          */
1377         if (blk == SWAPBLK_NONE) {
1378                 KKASSERT(i == 1);
1379                 if (raonly) {
1380                         vnode_pager_freepage(mreq);
1381                         vm_object_drop(object);
1382                         return(VM_PAGER_OK);
1383                 } else {
1384                         vm_object_drop(object);
1385                         return(VM_PAGER_FAIL);
1386                 }
1387         }
1388
1389         /*
1390          * map our page(s) into kva for input
1391          */
1392         bp = getpbuf_kva(&nsw_rcount);
1393         bio = &bp->b_bio1;
1394         kva = (vm_offset_t) bp->b_kvabase;
1395         bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1396         pmap_qenter(kva, bp->b_xio.xio_pages, i);
1397
1398         bp->b_data = (caddr_t)kva;
1399         bp->b_bcount = PAGE_SIZE * i;
1400         bp->b_xio.xio_npages = i;
1401         bio->bio_done = swp_pager_async_iodone;
1402         bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1403         bio->bio_caller_info1.index = SWBIO_READ;
1404
1405         /*
1406          * Set index.  If raonly set the index beyond the array so all
1407          * the pages are treated the same, otherwise the original mreq is
1408          * at index 0.
1409          */
1410         if (raonly)
1411                 bio->bio_driver_info = (void *)(intptr_t)i;
1412         else
1413                 bio->bio_driver_info = (void *)(intptr_t)0;
1414
1415         for (j = 0; j < i; ++j)
1416                 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
1417
1418         mycpu->gd_cnt.v_swapin++;
1419         mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1420
1421         /*
1422          * We still hold the lock on mreq, and our automatic completion routine
1423          * does not remove it.
1424          */
1425         vm_object_pip_add(object, bp->b_xio.xio_npages);
1426
1427         /*
1428          * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1429          * this point because we automatically release it on completion.
1430          * Instead, we look at the one page we are interested in which we
1431          * still hold a lock on even through the I/O completion.
1432          *
1433          * The other pages in our m[] array are also released on completion,
1434          * so we cannot assume they are valid anymore either.
1435          */
1436         bp->b_cmd = BUF_CMD_READ;
1437         BUF_KERNPROC(bp);
1438         vn_strategy(swapdev_vp, bio);
1439
1440         /*
1441          * Wait for the page we want to complete.  PG_SWAPINPROG is always
1442          * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1443          * is set in the meta-data.
1444          *
1445          * If this is a read-ahead only we return immediately without
1446          * waiting for I/O.
1447          */
1448         if (raonly) {
1449                 vm_object_drop(object);
1450                 return(VM_PAGER_OK);
1451         }
1452
1453         /*
1454          * Read-ahead includes originally requested page case.
1455          */
1456         for (;;) {
1457                 flags = mreq->flags;
1458                 cpu_ccfence();
1459                 if ((flags & PG_SWAPINPROG) == 0)
1460                         break;
1461                 tsleep_interlock(mreq, 0);
1462                 if (!atomic_cmpset_int(&mreq->flags, flags,
1463                                        flags | PG_WANTED | PG_REFERENCED)) {
1464                         continue;
1465                 }
1466                 mycpu->gd_cnt.v_intrans++;
1467                 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
1468                         kprintf(
1469                             "swap_pager: indefinite wait buffer: "
1470                                 " bp %p offset: %lld, size: %ld\n",
1471                             bp,
1472                             (long long)bio->bio_offset,
1473                             (long)bp->b_bcount
1474                         );
1475                 }
1476         }
1477
1478         /*
1479          * Disallow speculative reads prior to the PG_SWAPINPROG test.
1480          */
1481         cpu_lfence();
1482
1483         /*
1484          * mreq is left busied after completion, but all the other pages
1485          * are freed.  If we had an unrecoverable read error the page will
1486          * not be valid.
1487          */
1488         vm_object_drop(object);
1489         if (mreq->valid != VM_PAGE_BITS_ALL)
1490                 return(VM_PAGER_ERROR);
1491         else
1492                 return(VM_PAGER_OK);
1493
1494         /*
1495          * A final note: in a low swap situation, we cannot deallocate swap
1496          * and mark a page dirty here because the caller is likely to mark
1497          * the page clean when we return, causing the page to possibly revert 
1498          * to all-zero's later.
1499          */
1500 }
1501
1502 /*
1503  *      swap_pager_putpages: 
1504  *
1505  *      Assign swap (if necessary) and initiate I/O on the specified pages.
1506  *
1507  *      We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1508  *      are automatically converted to SWAP objects.
1509  *
1510  *      In a low memory situation we may block in vn_strategy(), but the new 
1511  *      vm_page reservation system coupled with properly written VFS devices 
1512  *      should ensure that no low-memory deadlock occurs.  This is an area
1513  *      which needs work.
1514  *
1515  *      The parent has N vm_object_pip_add() references prior to
1516  *      calling us and will remove references for rtvals[] that are
1517  *      not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1518  *      completion.
1519  *
1520  *      The parent has soft-busy'd the pages it passes us and will unbusy
1521  *      those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1522  *      We need to unbusy the rest on I/O completion.
1523  *
1524  * No requirements.
1525  */
1526 void
1527 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1528                     int flags, int *rtvals)
1529 {
1530         int i;
1531         int n = 0;
1532
1533         vm_object_hold(object);
1534
1535         if (count && m[0]->object != object) {
1536                 panic("swap_pager_getpages: object mismatch %p/%p", 
1537                     object, 
1538                     m[0]->object
1539                 );
1540         }
1541
1542         /*
1543          * Step 1
1544          *
1545          * Turn object into OBJT_SWAP
1546          * Check for bogus sysops
1547          *
1548          * Force sync if not pageout process, we don't want any single
1549          * non-pageout process to be able to hog the I/O subsystem!  This
1550          * can be overridden by setting.
1551          */
1552         if (object->type == OBJT_DEFAULT) {
1553                 if (object->type == OBJT_DEFAULT)
1554                         swp_pager_meta_convert(object);
1555         }
1556
1557         /*
1558          * Normally we force synchronous swap I/O if this is not the
1559          * pageout daemon to prevent any single user process limited
1560          * via RLIMIT_RSS from hogging swap write bandwidth.
1561          */
1562         if (curthread != pagethread &&
1563             curthread != emergpager &&
1564             swap_user_async == 0) {
1565                 flags |= VM_PAGER_PUT_SYNC;
1566         }
1567
1568         /*
1569          * Step 2
1570          *
1571          * Update nsw parameters from swap_async_max sysctl values.  
1572          * Do not let the sysop crash the machine with bogus numbers.
1573          */
1574         if (swap_async_max != nsw_wcount_async_max) {
1575                 int n;
1576
1577                 /*
1578                  * limit range
1579                  */
1580                 if ((n = swap_async_max) > nswbuf_kva / 2)
1581                         n = nswbuf_kva / 2;
1582                 if (n < 1)
1583                         n = 1;
1584                 swap_async_max = n;
1585
1586                 /*
1587                  * Adjust difference ( if possible ).  If the current async
1588                  * count is too low, we may not be able to make the adjustment
1589                  * at this time.
1590                  *
1591                  * vm_token needed for nsw_wcount sleep interlock
1592                  */
1593                 lwkt_gettoken(&vm_token);
1594                 n -= nsw_wcount_async_max;
1595                 if (nsw_wcount_async + n >= 0) {
1596                         nsw_wcount_async_max += n;
1597                         pbuf_adjcount(&nsw_wcount_async, n);
1598                 }
1599                 lwkt_reltoken(&vm_token);
1600         }
1601
1602         /*
1603          * Step 3
1604          *
1605          * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1606          * The page is left dirty until the pageout operation completes
1607          * successfully.
1608          */
1609
1610         for (i = 0; i < count; i += n) {
1611                 struct buf *bp;
1612                 struct bio *bio;
1613                 swblk_t blk;
1614                 int j;
1615
1616                 /*
1617                  * Maximum I/O size is limited by a number of factors.
1618                  */
1619
1620                 n = min(BLIST_MAX_ALLOC, count - i);
1621                 n = min(n, nsw_cluster_max);
1622
1623                 lwkt_gettoken(&vm_token);
1624
1625                 /*
1626                  * Get biggest block of swap we can.  If we fail, fall
1627                  * back and try to allocate a smaller block.  Don't go
1628                  * overboard trying to allocate space if it would overly
1629                  * fragment swap.
1630                  */
1631                 while (
1632                     (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
1633                     n > 4
1634                 ) {
1635                         n >>= 1;
1636                 }
1637                 if (blk == SWAPBLK_NONE) {
1638                         for (j = 0; j < n; ++j)
1639                                 rtvals[i+j] = VM_PAGER_FAIL;
1640                         lwkt_reltoken(&vm_token);
1641                         continue;
1642                 }
1643                 if (vm_report_swap_allocs > 0) {
1644                         kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n);
1645                         --vm_report_swap_allocs;
1646                 }
1647
1648                 /*
1649                  * The I/O we are constructing cannot cross a physical
1650                  * disk boundry in the swap stripe.
1651                  */
1652                 if ((blk ^ (blk + n)) & ~SWB_DMMASK) {
1653                         j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk;
1654                         swp_pager_freeswapspace(object, blk + j, n - j);
1655                         n = j;
1656                 }
1657
1658                 /*
1659                  * All I/O parameters have been satisfied, build the I/O
1660                  * request and assign the swap space.
1661                  */
1662                 if ((flags & VM_PAGER_PUT_SYNC))
1663                         bp = getpbuf_kva(&nsw_wcount_sync);
1664                 else
1665                         bp = getpbuf_kva(&nsw_wcount_async);
1666                 bio = &bp->b_bio1;
1667
1668                 lwkt_reltoken(&vm_token);
1669
1670                 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1671
1672                 bp->b_bcount = PAGE_SIZE * n;
1673                 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1674
1675                 for (j = 0; j < n; ++j) {
1676                         vm_page_t mreq = m[i+j];
1677
1678                         swp_pager_meta_build(mreq->object, mreq->pindex,
1679                                              blk + j);
1680                         if (object->type == OBJT_SWAP)
1681                                 vm_page_dirty(mreq);
1682                         rtvals[i+j] = VM_PAGER_OK;
1683
1684                         vm_page_flag_set(mreq, PG_SWAPINPROG);
1685                         bp->b_xio.xio_pages[j] = mreq;
1686                 }
1687                 bp->b_xio.xio_npages = n;
1688
1689                 mycpu->gd_cnt.v_swapout++;
1690                 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1691
1692                 bp->b_dirtyoff = 0;             /* req'd for NFS */
1693                 bp->b_dirtyend = bp->b_bcount;  /* req'd for NFS */
1694                 bp->b_cmd = BUF_CMD_WRITE;
1695                 bio->bio_caller_info1.index = SWBIO_WRITE;
1696
1697 #if 0
1698                 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */
1699                 bio->bio_crc = iscsi_crc32(bp->b_data, bp->b_bcount);
1700                 {
1701                     uint32_t crc = 0;
1702                     for (j = 0; j < n; ++j) {
1703                             vm_page_t mm = bp->b_xio.xio_pages[j];
1704                             char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mm));
1705                             crc = iscsi_crc32_ext(p, PAGE_SIZE, crc);
1706                     }
1707                     if (bio->bio_crc != crc) {
1708                             kprintf("PREWRITE MISMATCH-A "
1709                                     "bdata=%08x dmap=%08x bdata=%08x (%d)\n",
1710                                     bio->bio_crc,
1711                                     crc,
1712                                     iscsi_crc32(bp->b_data, bp->b_bcount),
1713                                     bp->b_bcount);
1714 #ifdef _KERNEL_VIRTUAL
1715                             madvise(bp->b_data, bp->b_bcount, MADV_INVAL);
1716 #endif
1717                             crc = 0;
1718                             for (j = 0; j < n; ++j) {
1719                                     vm_page_t mm = bp->b_xio.xio_pages[j];
1720                                     char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mm));
1721                                     crc = iscsi_crc32_ext(p, PAGE_SIZE, crc);
1722                             }
1723                             kprintf("PREWRITE MISMATCH-B "
1724                                     "bdata=%08x dmap=%08x\n",
1725                                     iscsi_crc32(bp->b_data, bp->b_bcount),
1726                                     crc);
1727                     }
1728                 }
1729 #endif
1730
1731                 /*
1732                  * asynchronous
1733                  */
1734                 if ((flags & VM_PAGER_PUT_SYNC) == 0) {
1735                         bio->bio_done = swp_pager_async_iodone;
1736                         BUF_KERNPROC(bp);
1737                         vn_strategy(swapdev_vp, bio);
1738
1739                         for (j = 0; j < n; ++j)
1740                                 rtvals[i+j] = VM_PAGER_PEND;
1741                         continue;
1742                 }
1743
1744                 /*
1745                  * Issue synchrnously.
1746                  *
1747                  * Wait for the sync I/O to complete, then update rtvals.
1748                  * We just set the rtvals[] to VM_PAGER_PEND so we can call
1749                  * our async completion routine at the end, thus avoiding a
1750                  * double-free.
1751                  */
1752                 bio->bio_caller_info1.index |= SWBIO_SYNC;
1753                 if (flags & VM_PAGER_TRY_TO_CACHE)
1754                         bio->bio_caller_info1.index |= SWBIO_TTC;
1755                 bio->bio_done = biodone_sync;
1756                 bio->bio_flags |= BIO_SYNC;
1757                 vn_strategy(swapdev_vp, bio);
1758                 biowait(bio, "swwrt");
1759
1760                 for (j = 0; j < n; ++j)
1761                         rtvals[i+j] = VM_PAGER_PEND;
1762
1763                 /*
1764                  * Now that we are through with the bp, we can call the
1765                  * normal async completion, which frees everything up.
1766                  */
1767                 swp_pager_async_iodone(bio);
1768         }
1769         vm_object_drop(object);
1770 }
1771
1772 /*
1773  * No requirements.
1774  *
1775  * Recalculate the low and high-water marks.
1776  */
1777 void
1778 swap_pager_newswap(void)
1779 {
1780         /*
1781          * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the
1782          *       limitation imposed by the blist code.  Remember that this
1783          *       will be divided by NSWAP_MAX (4), so each swap device is
1784          *       limited to around a terrabyte.
1785          */
1786         if (vm_swap_max) {
1787                 nswap_lowat = (int64_t)vm_swap_max * 4 / 100;   /* 4% left */
1788                 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100;   /* 6% left */
1789                 kprintf("swap low/high-water marks set to %d/%d\n",
1790                         nswap_lowat, nswap_hiwat);
1791         } else {
1792                 nswap_lowat = 128;
1793                 nswap_hiwat = 512;
1794         }
1795         swp_sizecheck();
1796 }
1797
1798 /*
1799  *      swp_pager_async_iodone:
1800  *
1801  *      Completion routine for asynchronous reads and writes from/to swap.
1802  *      Also called manually by synchronous code to finish up a bp.
1803  *
1804  *      For READ operations, the pages are PG_BUSY'd.  For WRITE operations, 
1805  *      the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY 
1806  *      unbusy all pages except the 'main' request page.  For WRITE 
1807  *      operations, we vm_page_t->busy'd unbusy all pages ( we can do this 
1808  *      because we marked them all VM_PAGER_PEND on return from putpages ).
1809  *
1810  *      This routine may not block.
1811  *
1812  * No requirements.
1813  */
1814 static void
1815 swp_pager_async_iodone(struct bio *bio)
1816 {
1817         struct buf *bp = bio->bio_buf;
1818         vm_object_t object = NULL;
1819         int i;
1820         int *nswptr;
1821
1822         /*
1823          * report error
1824          */
1825         if (bp->b_flags & B_ERROR) {
1826                 kprintf(
1827                     "swap_pager: I/O error - %s failed; offset %lld,"
1828                         "size %ld, error %d\n",
1829                     ((bio->bio_caller_info1.index & SWBIO_READ) ?
1830                         "pagein" : "pageout"),
1831                     (long long)bio->bio_offset,
1832                     (long)bp->b_bcount,
1833                     bp->b_error
1834                 );
1835         }
1836
1837         /*
1838          * set object.
1839          */
1840         if (bp->b_xio.xio_npages)
1841                 object = bp->b_xio.xio_pages[0]->object;
1842
1843 #if 0
1844         /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */
1845         if (bio->bio_caller_info1.index & SWBIO_WRITE) {
1846                 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) {
1847                         kprintf("SWAPOUT: BADCRC %08x %08x\n",
1848                                 bio->bio_crc,
1849                                 iscsi_crc32(bp->b_data, bp->b_bcount));
1850                         for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1851                                 vm_page_t m = bp->b_xio.xio_pages[i];
1852                                 if (m->flags & PG_WRITEABLE)
1853                                         kprintf("SWAPOUT: "
1854                                                 "%d/%d %p writable\n",
1855                                                 i, bp->b_xio.xio_npages, m);
1856                         }
1857                 }
1858         }
1859 #endif
1860
1861         /*
1862          * remove the mapping for kernel virtual
1863          */
1864         pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1865
1866         /*
1867          * cleanup pages.  If an error occurs writing to swap, we are in
1868          * very serious trouble.  If it happens to be a disk error, though,
1869          * we may be able to recover by reassigning the swap later on.  So
1870          * in this case we remove the m->swapblk assignment for the page 
1871          * but do not free it in the rlist.  The errornous block(s) are thus
1872          * never reallocated as swap.  Redirty the page and continue.
1873          */
1874         for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1875                 vm_page_t m = bp->b_xio.xio_pages[i];
1876
1877                 if (bp->b_flags & B_ERROR) {
1878                         /*
1879                          * If an error occurs I'd love to throw the swapblk
1880                          * away without freeing it back to swapspace, so it
1881                          * can never be used again.  But I can't from an 
1882                          * interrupt.
1883                          */
1884
1885                         if (bio->bio_caller_info1.index & SWBIO_READ) {
1886                                 /*
1887                                  * When reading, reqpage needs to stay
1888                                  * locked for the parent, but all other
1889                                  * pages can be freed.  We still want to
1890                                  * wakeup the parent waiting on the page,
1891                                  * though.  ( also: pg_reqpage can be -1 and 
1892                                  * not match anything ).
1893                                  *
1894                                  * We have to wake specifically requested pages
1895                                  * up too because we cleared PG_SWAPINPROG and
1896                                  * someone may be waiting for that.
1897                                  *
1898                                  * NOTE: For reads, m->dirty will probably
1899                                  *       be overridden by the original caller
1900                                  *       of getpages so don't play cute tricks
1901                                  *       here.
1902                                  *
1903                                  * NOTE: We can't actually free the page from
1904                                  *       here, because this is an interrupt.
1905                                  *       It is not legal to mess with
1906                                  *       object->memq from an interrupt.
1907                                  *       Deactivate the page instead.
1908                                  *
1909                                  * WARNING! The instant PG_SWAPINPROG is
1910                                  *          cleared another cpu may start
1911                                  *          using the mreq page (it will
1912                                  *          check m->valid immediately).
1913                                  */
1914
1915                                 m->valid = 0;
1916                                 vm_page_flag_clear(m, PG_SWAPINPROG);
1917
1918                                 /*
1919                                  * bio_driver_info holds the requested page
1920                                  * index.
1921                                  */
1922                                 if (i != (int)(intptr_t)bio->bio_driver_info) {
1923                                         vm_page_deactivate(m);
1924                                         vm_page_wakeup(m);
1925                                 } else {
1926                                         vm_page_flash(m);
1927                                 }
1928                                 /*
1929                                  * If i == bp->b_pager.pg_reqpage, do not wake 
1930                                  * the page up.  The caller needs to.
1931                                  */
1932                         } else {
1933                                 /*
1934                                  * If a write error occurs remove the swap
1935                                  * assignment (note that PG_SWAPPED may or
1936                                  * may not be set depending on prior activity).
1937                                  *
1938                                  * Re-dirty OBJT_SWAP pages as there is no
1939                                  * other backing store, we can't throw the
1940                                  * page away.
1941                                  *
1942                                  * Non-OBJT_SWAP pages (aka swapcache) must
1943                                  * not be dirtied since they may not have
1944                                  * been dirty in the first place, and they
1945                                  * do have backing store (the vnode).
1946                                  */
1947                                 vm_page_busy_wait(m, FALSE, "swadpg");
1948                                 vm_object_hold(m->object);
1949                                 swp_pager_meta_ctl(m->object, m->pindex,
1950                                                    SWM_FREE);
1951                                 vm_page_flag_clear(m, PG_SWAPPED);
1952                                 vm_object_drop(m->object);
1953                                 if (m->object->type == OBJT_SWAP) {
1954                                         vm_page_dirty(m);
1955                                         vm_page_activate(m);
1956                                 }
1957                                 vm_page_io_finish(m);
1958                                 vm_page_flag_clear(m, PG_SWAPINPROG);
1959                                 vm_page_wakeup(m);
1960                         }
1961                 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
1962                         /*
1963                          * NOTE: for reads, m->dirty will probably be 
1964                          * overridden by the original caller of getpages so
1965                          * we cannot set them in order to free the underlying
1966                          * swap in a low-swap situation.  I don't think we'd
1967                          * want to do that anyway, but it was an optimization
1968                          * that existed in the old swapper for a time before
1969                          * it got ripped out due to precisely this problem.
1970                          *
1971                          * If not the requested page then deactivate it.
1972                          *
1973                          * Note that the requested page, reqpage, is left
1974                          * busied, but we still have to wake it up.  The
1975                          * other pages are released (unbusied) by 
1976                          * vm_page_wakeup().  We do not set reqpage's
1977                          * valid bits here, it is up to the caller.
1978                          */
1979
1980                         /* 
1981                          * NOTE: Can't call pmap_clear_modify(m) from an
1982                          *       interrupt thread, the pmap code may have to
1983                          *       map non-kernel pmaps and currently asserts
1984                          *       the case.
1985                          *
1986                          * WARNING! The instant PG_SWAPINPROG is
1987                          *          cleared another cpu may start
1988                          *          using the mreq page (it will
1989                          *          check m->valid immediately).
1990                          */
1991                         /*pmap_clear_modify(m);*/
1992                         m->valid = VM_PAGE_BITS_ALL;
1993                         vm_page_undirty(m);
1994                         vm_page_flag_set(m, PG_SWAPPED);
1995                         vm_page_flag_clear(m, PG_SWAPINPROG);
1996
1997                         /*
1998                          * We have to wake specifically requested pages
1999                          * up too because we cleared PG_SWAPINPROG and
2000                          * could be waiting for it in getpages.  However,
2001                          * be sure to not unbusy getpages specifically
2002                          * requested page - getpages expects it to be 
2003                          * left busy.
2004                          *
2005                          * bio_driver_info holds the requested page
2006                          */
2007                         if (i != (int)(intptr_t)bio->bio_driver_info) {
2008                                 vm_page_deactivate(m);
2009                                 vm_page_wakeup(m);
2010                         } else {
2011                                 vm_page_flash(m);
2012                         }
2013                 } else {
2014                         /*
2015                          * Mark the page clean but do not mess with the
2016                          * pmap-layer's modified state.  That state should
2017                          * also be clear since the caller protected the
2018                          * page VM_PROT_READ, but allow the case.
2019                          *
2020                          * We are in an interrupt, avoid pmap operations.
2021                          *
2022                          * If we have a severe page deficit, deactivate the
2023                          * page.  Do not try to cache it (which would also
2024                          * involve a pmap op), because the page might still
2025                          * be read-heavy.
2026                          *
2027                          * When using the swap to cache clean vnode pages
2028                          * we do not mess with the page dirty bits.
2029                          *
2030                          * NOTE! Nobody is waiting for the key mreq page
2031                          *       on write completion.
2032                          */
2033                         vm_page_busy_wait(m, FALSE, "swadpg");
2034                         if (m->object->type == OBJT_SWAP)
2035                                 vm_page_undirty(m);
2036                         vm_page_flag_set(m, PG_SWAPPED);
2037                         vm_page_flag_clear(m, PG_SWAPINPROG);
2038                         if (vm_page_count_severe())
2039                                 vm_page_deactivate(m);
2040                         vm_page_io_finish(m);
2041                         if (bio->bio_caller_info1.index & SWBIO_TTC)
2042                                 vm_page_try_to_cache(m);
2043                         else
2044                                 vm_page_wakeup(m);
2045                 }
2046         }
2047
2048         /*
2049          * adjust pip.  NOTE: the original parent may still have its own
2050          * pip refs on the object.
2051          */
2052
2053         if (object)
2054                 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
2055
2056         /*
2057          * Release the physical I/O buffer.
2058          *
2059          * NOTE: Due to synchronous operations in the write case b_cmd may
2060          *       already be set to BUF_CMD_DONE and BIO_SYNC may have already
2061          *       been cleared.
2062          *
2063          * Use vm_token to interlock nsw_rcount/wcount wakeup?
2064          */
2065         lwkt_gettoken(&vm_token);
2066         if (bio->bio_caller_info1.index & SWBIO_READ)
2067                 nswptr = &nsw_rcount;
2068         else if (bio->bio_caller_info1.index & SWBIO_SYNC)
2069                 nswptr = &nsw_wcount_sync;
2070         else
2071                 nswptr = &nsw_wcount_async;
2072         bp->b_cmd = BUF_CMD_DONE;
2073         relpbuf(bp, nswptr);
2074         lwkt_reltoken(&vm_token);
2075 }
2076
2077 /*
2078  * Fault-in a potentially swapped page and remove the swap reference.
2079  * (used by swapoff code)
2080  *
2081  * object must be held.
2082  */
2083 static __inline void
2084 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex)
2085 {
2086         struct vnode *vp;
2087         vm_page_t m;
2088         int error;
2089
2090         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2091
2092         if (object->type == OBJT_VNODE) {
2093                 /*
2094                  * Any swap related to a vnode is due to swapcache.  We must
2095                  * vget() the vnode in case it is not active (otherwise
2096                  * vref() will panic).  Calling vm_object_page_remove() will
2097                  * ensure that any swap ref is removed interlocked with the
2098                  * page.  clean_only is set to TRUE so we don't throw away
2099                  * dirty pages.
2100                  */
2101                 vp = object->handle;
2102                 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
2103                 if (error == 0) {
2104                         vm_object_page_remove(object, pindex, pindex + 1, TRUE);
2105                         vput(vp);
2106                 }
2107         } else {
2108                 /*
2109                  * Otherwise it is a normal OBJT_SWAP object and we can
2110                  * fault the page in and remove the swap.
2111                  */
2112                 m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
2113                                          VM_PROT_NONE,
2114                                          VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
2115                                          sharedp, &error);
2116                 if (m)
2117                         vm_page_unhold(m);
2118         }
2119 }
2120
2121 /*
2122  * This removes all swap blocks related to a particular device.  We have
2123  * to be careful of ripups during the scan.
2124  */
2125 static int swp_pager_swapoff_callback(struct swblock *swap, void *data);
2126
2127 int
2128 swap_pager_swapoff(int devidx)
2129 {
2130         struct vm_object_hash *hash;
2131         struct swswapoffinfo info;
2132         struct vm_object marker;
2133         vm_object_t object;
2134         int n;
2135
2136         bzero(&marker, sizeof(marker));
2137         marker.type = OBJT_MARKER;
2138
2139         for (n = 0; n < VMOBJ_HSIZE; ++n) {
2140                 hash = &vm_object_hash[n];
2141
2142                 lwkt_gettoken(&hash->token);
2143                 TAILQ_INSERT_HEAD(&hash->list, &marker, object_list);
2144
2145                 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) {
2146                         if (object->type == OBJT_MARKER)
2147                                 goto skip;
2148                         if (object->type != OBJT_SWAP &&
2149                             object->type != OBJT_VNODE)
2150                                 goto skip;
2151                         vm_object_hold(object);
2152                         if (object->type != OBJT_SWAP &&
2153                             object->type != OBJT_VNODE) {
2154                                 vm_object_drop(object);
2155                                 goto skip;
2156                         }
2157
2158                         /*
2159                          * Object is special in that we can't just pagein
2160                          * into vm_page's in it (tmpfs, vn).
2161                          */
2162                         if ((object->flags & OBJ_NOPAGEIN) &&
2163                             RB_ROOT(&object->swblock_root)) {
2164                                 vm_object_drop(object);
2165                                 goto skip;
2166                         }
2167
2168                         info.object = object;
2169                         info.shared = 0;
2170                         info.devidx = devidx;
2171                         swblock_rb_tree_RB_SCAN(&object->swblock_root,
2172                                             NULL, swp_pager_swapoff_callback,
2173                                             &info);
2174                         vm_object_drop(object);
2175 skip:
2176                         if (object == TAILQ_NEXT(&marker, object_list)) {
2177                                 TAILQ_REMOVE(&hash->list, &marker, object_list);
2178                                 TAILQ_INSERT_AFTER(&hash->list, object,
2179                                                    &marker, object_list);
2180                         }
2181                 }
2182                 TAILQ_REMOVE(&hash->list, &marker, object_list);
2183                 lwkt_reltoken(&hash->token);
2184         }
2185
2186         /*
2187          * If we fail to locate all swblocks we just fail gracefully and
2188          * do not bother to restore paging on the swap device.  If the
2189          * user wants to retry the user can retry.
2190          */
2191         if (swdevt[devidx].sw_nused)
2192                 return (1);
2193         else
2194                 return (0);
2195 }
2196
2197 static
2198 int
2199 swp_pager_swapoff_callback(struct swblock *swap, void *data)
2200 {
2201         struct swswapoffinfo *info = data;
2202         vm_object_t object = info->object;
2203         vm_pindex_t index;
2204         swblk_t v;
2205         int i;
2206
2207         index = swap->swb_index;
2208         for (i = 0; i < SWAP_META_PAGES; ++i) {
2209                 /*
2210                  * Make sure we don't race a dying object.  This will
2211                  * kill the scan of the object's swap blocks entirely.
2212                  */
2213                 if (object->flags & OBJ_DEAD)
2214                         return(-1);
2215
2216                 /*
2217                  * Fault the page, which can obviously block.  If the swap
2218                  * structure disappears break out.
2219                  */
2220                 v = swap->swb_pages[i];
2221                 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) {
2222                         swp_pager_fault_page(object, &info->shared,
2223                                              swap->swb_index + i);
2224                         /* swap ptr might go away */
2225                         if (RB_LOOKUP(swblock_rb_tree,
2226                                       &object->swblock_root, index) != swap) {
2227                                 break;
2228                         }
2229                 }
2230         }
2231         return(0);
2232 }
2233
2234 /************************************************************************
2235  *                              SWAP META DATA                          *
2236  ************************************************************************
2237  *
2238  *      These routines manipulate the swap metadata stored in the 
2239  *      OBJT_SWAP object.
2240  *
2241  *      Swap metadata is implemented with a global hash and not directly
2242  *      linked into the object.  Instead the object simply contains
2243  *      appropriate tracking counters.
2244  */
2245
2246 /*
2247  * Lookup the swblock containing the specified swap block index.
2248  *
2249  * The caller must hold the object.
2250  */
2251 static __inline
2252 struct swblock *
2253 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
2254 {
2255         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2256         index &= ~(vm_pindex_t)SWAP_META_MASK;
2257         return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
2258 }
2259
2260 /*
2261  * Remove a swblock from the RB tree.
2262  *
2263  * The caller must hold the object.
2264  */
2265 static __inline
2266 void
2267 swp_pager_remove(vm_object_t object, struct swblock *swap)
2268 {
2269         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2270         RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2271 }
2272
2273 /*
2274  * Convert default object to swap object if necessary
2275  *
2276  * The caller must hold the object.
2277  */
2278 static void
2279 swp_pager_meta_convert(vm_object_t object)
2280 {
2281         if (object->type == OBJT_DEFAULT) {
2282                 object->type = OBJT_SWAP;
2283                 KKASSERT(object->swblock_count == 0);
2284         }
2285 }
2286
2287 /*
2288  * SWP_PAGER_META_BUILD() -     add swap block to swap meta data for object
2289  *
2290  *      We first convert the object to a swap object if it is a default
2291  *      object.  Vnode objects do not need to be converted.
2292  *
2293  *      The specified swapblk is added to the object's swap metadata.  If
2294  *      the swapblk is not valid, it is freed instead.  Any previously
2295  *      assigned swapblk is freed.
2296  *
2297  * The caller must hold the object.
2298  */
2299 static void
2300 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
2301 {
2302         struct swblock *swap;
2303         struct swblock *oswap;
2304         vm_pindex_t v;
2305
2306         KKASSERT(swapblk != SWAPBLK_NONE);
2307         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2308
2309         /*
2310          * Convert object if necessary
2311          */
2312         if (object->type == OBJT_DEFAULT)
2313                 swp_pager_meta_convert(object);
2314         
2315         /*
2316          * Locate swblock.  If not found create, but if we aren't adding
2317          * anything just return.  If we run out of space in the map we wait
2318          * and, since the hash table may have changed, retry.
2319          */
2320 retry:
2321         swap = swp_pager_lookup(object, index);
2322
2323         if (swap == NULL) {
2324                 int i;
2325
2326                 swap = zalloc(swap_zone);
2327                 if (swap == NULL) {
2328                         vm_wait(0);
2329                         goto retry;
2330                 }
2331                 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
2332                 swap->swb_count = 0;
2333
2334                 ++object->swblock_count;
2335
2336                 for (i = 0; i < SWAP_META_PAGES; ++i)
2337                         swap->swb_pages[i] = SWAPBLK_NONE;
2338                 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2339                 KKASSERT(oswap == NULL);
2340         }
2341
2342         /*
2343          * Delete prior contents of metadata.
2344          *
2345          * NOTE: Decrement swb_count after the freeing operation (which
2346          *       might block) to prevent racing destruction of the swblock.
2347          */
2348         index &= SWAP_META_MASK;
2349
2350         while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2351                 swap->swb_pages[index] = SWAPBLK_NONE;
2352                 /* can block */
2353                 swp_pager_freeswapspace(object, v, 1);
2354                 --swap->swb_count;
2355                 --mycpu->gd_vmtotal.t_vm;
2356         }
2357
2358         /*
2359          * Enter block into metadata
2360          */
2361         swap->swb_pages[index] = swapblk;
2362         if (swapblk != SWAPBLK_NONE) {
2363                 ++swap->swb_count;
2364                 ++mycpu->gd_vmtotal.t_vm;
2365         }
2366 }
2367
2368 /*
2369  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2370  *
2371  *      The requested range of blocks is freed, with any associated swap 
2372  *      returned to the swap bitmap.
2373  *
2374  *      This routine will free swap metadata structures as they are cleaned 
2375  *      out.  This routine does *NOT* operate on swap metadata associated
2376  *      with resident pages.
2377  *
2378  * The caller must hold the object.
2379  */
2380 static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2381
2382 static void
2383 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
2384 {
2385         struct swfreeinfo info;
2386
2387         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2388
2389         /*
2390          * Nothing to do
2391          */
2392         if (object->swblock_count == 0) {
2393                 KKASSERT(RB_EMPTY(&object->swblock_root));
2394                 return;
2395         }
2396         if (count == 0)
2397                 return;
2398
2399         /*
2400          * Setup for RB tree scan.  Note that the pindex range can be huge
2401          * due to the 64 bit page index space so we cannot safely iterate.
2402          */
2403         info.object = object;
2404         info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
2405         info.begi = index;
2406         info.endi = index + count - 1;
2407         swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2408                                 swp_pager_meta_free_callback, &info);
2409 }
2410
2411 /*
2412  * The caller must hold the object.
2413  */
2414 static
2415 int
2416 swp_pager_meta_free_callback(struct swblock *swap, void *data)
2417 {
2418         struct swfreeinfo *info = data;
2419         vm_object_t object = info->object;
2420         int index;
2421         int eindex;
2422
2423         /*
2424          * Figure out the range within the swblock.  The wider scan may
2425          * return edge-case swap blocks when the start and/or end points
2426          * are in the middle of a block.
2427          */
2428         if (swap->swb_index < info->begi)
2429                 index = (int)info->begi & SWAP_META_MASK;
2430         else
2431                 index = 0;
2432
2433         if (swap->swb_index + SWAP_META_PAGES > info->endi)
2434                 eindex = (int)info->endi & SWAP_META_MASK;
2435         else
2436                 eindex = SWAP_META_MASK;
2437
2438         /*
2439          * Scan and free the blocks.  The loop terminates early
2440          * if (swap) runs out of blocks and could be freed.
2441          *
2442          * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2443          *       to deal with a zfree race.
2444          */
2445         while (index <= eindex) {
2446                 swblk_t v = swap->swb_pages[index];
2447
2448                 if (v != SWAPBLK_NONE) {
2449                         swap->swb_pages[index] = SWAPBLK_NONE;
2450                         /* can block */
2451                         swp_pager_freeswapspace(object, v, 1);
2452                         --mycpu->gd_vmtotal.t_vm;
2453                         if (--swap->swb_count == 0) {
2454                                 swp_pager_remove(object, swap);
2455                                 zfree(swap_zone, swap);
2456                                 --object->swblock_count;
2457                                 break;
2458                         }
2459                 }
2460                 ++index;
2461         }
2462
2463         /* swap may be invalid here due to zfree above */
2464         lwkt_yield();
2465
2466         return(0);
2467 }
2468
2469 /*
2470  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2471  *
2472  *      This routine locates and destroys all swap metadata associated with
2473  *      an object.
2474  *
2475  * NOTE: Decrement swb_count after the freeing operation (which
2476  *       might block) to prevent racing destruction of the swblock.
2477  *
2478  * The caller must hold the object.
2479  */
2480 static void
2481 swp_pager_meta_free_all(vm_object_t object)
2482 {
2483         struct swblock *swap;
2484         int i;
2485
2486         ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2487
2488         while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2489                 swp_pager_remove(object, swap);
2490                 for (i = 0; i < SWAP_META_PAGES; ++i) {
2491                         swblk_t v = swap->swb_pages[i];
2492                         if (v != SWAPBLK_NONE) {
2493                                 /* can block */
2494                                 swp_pager_freeswapspace(object, v, 1);
2495                                 --swap->swb_count;
2496                                 --mycpu->gd_vmtotal.t_vm;
2497                         }
2498                 }
2499                 if (swap->swb_count != 0)
2500                         panic("swap_pager_meta_free_all: swb_count != 0");
2501                 zfree(swap_zone, swap);
2502                 --object->swblock_count;
2503                 lwkt_yield();
2504         }
2505         KKASSERT(object->swblock_count == 0);
2506 }
2507
2508 /*
2509  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
2510  *
2511  *      This routine is capable of looking up, popping, or freeing
2512  *      swapblk assignments in the swap meta data or in the vm_page_t.
2513  *      The routine typically returns the swapblk being looked-up, or popped,
2514  *      or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2515  *      was invalid.  This routine will automatically free any invalid 
2516  *      meta-data swapblks.
2517  *
2518  *      It is not possible to store invalid swapblks in the swap meta data
2519  *      (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2520  *
2521  *      When acting on a busy resident page and paging is in progress, we 
2522  *      have to wait until paging is complete but otherwise can act on the 
2523  *      busy page.
2524  *
2525  *      SWM_FREE        remove and free swap block from metadata
2526  *      SWM_POP         remove from meta data but do not free.. pop it out
2527  *
2528  * The caller must hold the object.
2529  */
2530 static swblk_t
2531 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2532 {
2533         struct swblock *swap;
2534         swblk_t r1;
2535
2536         if (object->swblock_count == 0)
2537                 return(SWAPBLK_NONE);
2538
2539         r1 = SWAPBLK_NONE;
2540         swap = swp_pager_lookup(object, index);
2541
2542         if (swap != NULL) {
2543                 index &= SWAP_META_MASK;
2544                 r1 = swap->swb_pages[index];
2545
2546                 if (r1 != SWAPBLK_NONE) {
2547                         if (flags & (SWM_FREE|SWM_POP)) {
2548                                 swap->swb_pages[index] = SWAPBLK_NONE;
2549                                 --mycpu->gd_vmtotal.t_vm;
2550                                 if (--swap->swb_count == 0) {
2551                                         swp_pager_remove(object, swap);
2552                                         zfree(swap_zone, swap);
2553                                         --object->swblock_count;
2554                                 }
2555                         } 
2556                         /* swap ptr may be invalid */
2557                         if (flags & SWM_FREE) {
2558                                 swp_pager_freeswapspace(object, r1, 1);
2559                                 r1 = SWAPBLK_NONE;
2560                         }
2561                 }
2562                 /* swap ptr may be invalid */
2563         }
2564         return(r1);
2565 }