kernel - Refactor swapoff scan code
[dragonfly.git] / sys / vm / swap_pager.c
CommitLineData
984263bc 1/*
8e7c4729
MD
2 * (MPSAFE)
3 *
4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved.
8c10bfcf
MD
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
984263bc
MD
36 * Copyright (c) 1994 John S. Dyson
37 * Copyright (c) 1990 University of Utah.
38 * Copyright (c) 1991, 1993
39 * The Regents of the University of California. All rights reserved.
40 *
41 * This code is derived from software contributed to Berkeley by
42 * the Systems Programming Group of the University of Utah Computer
43 * Science Department.
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
60 *
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
72 *
73 * New Swap System
74 * Matthew Dillon
75 *
76 * Radix Bitmap 'blists'.
77 *
78 * - The new swapper uses the new radix bitmap code. This should scale
79 * to arbitrarily small or arbitrarily large swap spaces and an almost
80 * arbitrary degree of fragmentation.
81 *
82 * Features:
83 *
84 * - on the fly reallocation of swap during putpages. The new system
85 * does not try to keep previously allocated swap blocks for dirty
86 * pages.
87 *
88 * - on the fly deallocation of swap
89 *
90 * - No more garbage collection required. Unnecessarily allocated swap
91 * blocks only exist for dirty vm_page_t's now and these are already
92 * cycled (in a high-load system) by the pager. We also do on-the-fly
93 * removal of invalidated swap blocks when a page is destroyed
94 * or renamed.
95 *
96 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
8e7c4729 97 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
984263bc
MD
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99 */
100
101#include <sys/param.h>
102#include <sys/systm.h>
103#include <sys/conf.h>
104#include <sys/kernel.h>
105#include <sys/proc.h>
106#include <sys/buf.h>
107#include <sys/vnode.h>
108#include <sys/malloc.h>
109#include <sys/vmmeter.h>
110#include <sys/sysctl.h>
111#include <sys/blist.h>
112#include <sys/lock.h>
cdd46d2e 113#include <sys/thread2.h>
984263bc
MD
114
115#ifndef MAX_PAGEOUT_CLUSTER
116#define MAX_PAGEOUT_CLUSTER 16
117#endif
118
119#define SWB_NPAGES MAX_PAGEOUT_CLUSTER
120
121#include "opt_swap.h"
122#include <vm/vm.h>
123#include <vm/vm_object.h>
124#include <vm/vm_page.h>
125#include <vm/vm_pager.h>
126#include <vm/vm_pageout.h>
127#include <vm/swap_pager.h>
128#include <vm/vm_extern.h>
129#include <vm/vm_zone.h>
5d5c5831 130#include <vm/vnode_pager.h>
984263bc 131
3020e3be 132#include <sys/buf2.h>
12e4aaff 133#include <vm/vm_page2.h>
3020e3be 134
984263bc
MD
135#define SWM_FREE 0x02 /* free, period */
136#define SWM_POP 0x04 /* pop out */
137
8aa92e4b
MD
138#define SWBIO_READ 0x01
139#define SWBIO_WRITE 0x02
140#define SWBIO_SYNC 0x04
141
8d292090
MD
142struct swfreeinfo {
143 vm_object_t object;
144 vm_pindex_t basei;
145 vm_pindex_t begi;
146 vm_pindex_t endi; /* inclusive */
147};
148
427e1a99
MD
149struct swswapoffinfo {
150 vm_object_t object;
151 int devidx;
152};
153
984263bc
MD
154/*
155 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
156 * in the old system.
157 */
158
984263bc 159int swap_pager_full; /* swap space exhaustion (task killing) */
096e95c0
MD
160int vm_swap_cache_use;
161int vm_swap_anon_use;
162
984263bc
MD
163static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
164static int nsw_rcount; /* free read buffers */
165static int nsw_wcount_sync; /* limit write buffers / synchronous */
166static int nsw_wcount_async; /* limit write buffers / asynchronous */
167static int nsw_wcount_async_max;/* assigned maximum */
168static int nsw_cluster_max; /* maximum VOP I/O allowed */
984263bc
MD
169
170struct blist *swapblist;
984263bc 171static int swap_async_max = 4; /* maximum in-progress async I/O's */
5d5c5831 172static int swap_burst_read = 0; /* allow burst reading */
7fa8d3ba 173static swblk_t swapiterator; /* linearize allocations */
984263bc 174
9f3543c6
MD
175/* from vm_swap.c */
176extern struct vnode *swapdev_vp;
177extern struct swdevt *swdevt;
178extern int nswdev;
179
180#define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
984263bc
MD
181
182SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
183 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
5d5c5831
MD
184SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
185 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
984263bc 186
096e95c0
MD
187SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
188 CTLFLAG_RD, &vm_swap_cache_use, 0, "");
189SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
190 CTLFLAG_RD, &vm_swap_anon_use, 0, "");
97a3ace7
MD
191SYSCTL_INT(_vm, OID_AUTO, swap_size,
192 CTLFLAG_RD, &vm_swap_size, 0, "");
096e95c0 193
984263bc
MD
194vm_zone_t swap_zone;
195
96adc753
MD
196/*
197 * Red-Black tree for swblock entries
8e7c4729
MD
198 *
199 * The caller must hold vm_token
96adc753
MD
200 */
201RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
202 vm_pindex_t, swb_index);
203
204int
205rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
206{
207 if (swb1->swb_index < swb2->swb_index)
208 return(-1);
209 if (swb1->swb_index > swb2->swb_index)
210 return(1);
211 return(0);
212}
213
8d292090
MD
214static
215int
216rb_swblock_scancmp(struct swblock *swb, void *data)
217{
218 struct swfreeinfo *info = data;
219
220 if (swb->swb_index < info->basei)
221 return(-1);
222 if (swb->swb_index > info->endi)
223 return(1);
224 return(0);
225}
226
00a3fdca
MD
227static
228int
229rb_swblock_condcmp(struct swblock *swb, void *data)
230{
231 struct swfreeinfo *info = data;
232
233 if (swb->swb_index < info->basei)
234 return(-1);
235 return(0);
236}
237
984263bc
MD
238/*
239 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
240 * calls hooked from other parts of the VM system and do not appear here.
241 * (see vm/swap_pager.h).
242 */
243
1388df65 244static void swap_pager_dealloc (vm_object_t object);
1b9d3514 245static int swap_pager_getpage (vm_object_t, vm_page_t *, int);
81b5c339 246static void swap_chain_iodone(struct bio *biox);
984263bc
MD
247
248struct pagerops swappagerops = {
984263bc 249 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
1b9d3514 250 swap_pager_getpage, /* pagein */
984263bc 251 swap_pager_putpages, /* pageout */
107e9bcc 252 swap_pager_haspage /* get backing store status for page */
984263bc
MD
253};
254
255/*
256 * dmmax is in page-sized chunks with the new swap system. It was
257 * dev-bsized chunks in the old. dmmax is always a power of 2.
258 *
259 * swap_*() routines are externally accessible. swp_*() routines are
260 * internal.
261 */
262
263int dmmax;
264static int dmmax_mask;
265int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
266int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
267
1388df65 268static __inline void swp_sizecheck (void);
81b5c339 269static void swp_pager_async_iodone (struct bio *bio);
984263bc
MD
270
271/*
272 * Swap bitmap functions
273 */
274
8e7c4729 275static __inline void swp_pager_freeswapspace(vm_object_t object,
651d8e75
MD
276 swblk_t blk, int npages);
277static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages);
984263bc
MD
278
279/*
280 * Metadata functions
281 */
282
8e7c4729 283static void swp_pager_meta_convert(vm_object_t);
651d8e75 284static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
8e7c4729
MD
285static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
286static void swp_pager_meta_free_all(vm_object_t);
651d8e75 287static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
984263bc
MD
288
289/*
290 * SWP_SIZECHECK() - update swap_pager_full indication
291 *
292 * update the swap_pager_almost_full indication and warn when we are
293 * about to run out of swap space, using lowat/hiwat hysteresis.
294 *
295 * Clear swap_pager_full ( task killing ) indication when lowat is met.
296 *
8e7c4729
MD
297 * No restrictions on call
298 * This routine may not block.
299 * SMP races are ok.
984263bc 300 */
984263bc 301static __inline void
57e43348 302swp_sizecheck(void)
984263bc
MD
303{
304 if (vm_swap_size < nswap_lowat) {
305 if (swap_pager_almost_full == 0) {
086c1d7e 306 kprintf("swap_pager: out of swap space\n");
984263bc
MD
307 swap_pager_almost_full = 1;
308 }
309 } else {
310 swap_pager_full = 0;
311 if (vm_swap_size > nswap_hiwat)
312 swap_pager_almost_full = 0;
313 }
314}
315
316/*
317 * SWAP_PAGER_INIT() - initialize the swap pager!
318 *
319 * Expected to be started from system init. NOTE: This code is run
320 * before much else so be careful what you depend on. Most of the VM
321 * system has yet to be initialized at this point.
8e7c4729
MD
322 *
323 * Called from the low level boot code only.
984263bc 324 */
984263bc 325static void
107e9bcc 326swap_pager_init(void *arg __unused)
984263bc 327{
984263bc
MD
328 /*
329 * Device Stripe, in PAGE_SIZE'd blocks
330 */
984263bc
MD
331 dmmax = SWB_NPAGES * 2;
332 dmmax_mask = ~(dmmax - 1);
333}
107e9bcc 334SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL)
984263bc
MD
335
336/*
337 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
338 *
339 * Expected to be started from pageout process once, prior to entering
340 * its main loop.
8e7c4729
MD
341 *
342 * Called from the low level boot code only.
984263bc 343 */
984263bc 344void
57e43348 345swap_pager_swap_init(void)
984263bc
MD
346{
347 int n, n2;
348
349 /*
350 * Number of in-transit swap bp operations. Don't
351 * exhaust the pbufs completely. Make sure we
352 * initialize workable values (0 will work for hysteresis
353 * but it isn't very efficient).
354 *
54f51aeb
HP
355 * The nsw_cluster_max is constrained by the number of pages an XIO
356 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
984263bc
MD
357 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
358 * constrained by the swap device interleave stripe size.
359 *
360 * Currently we hardwire nsw_wcount_async to 4. This limit is
361 * designed to prevent other I/O from having high latencies due to
362 * our pageout I/O. The value 4 works well for one or two active swap
363 * devices but is probably a little low if you have more. Even so,
364 * a higher value would probably generate only a limited improvement
365 * with three or four active swap devices since the system does not
366 * typically have to pageout at extreme bandwidths. We will want
367 * at least 2 per swap devices, and 4 is a pretty good value if you
368 * have one NFS swap device due to the command/ack latency over NFS.
369 * So it all works out pretty well.
370 */
371
372 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
373
374 nsw_rcount = (nswbuf + 1) / 2;
375 nsw_wcount_sync = (nswbuf + 3) / 4;
376 nsw_wcount_async = 4;
377 nsw_wcount_async_max = nsw_wcount_async;
378
379 /*
79634a66
MD
380 * The zone is dynamically allocated so generally size it to
381 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
382 * on physical memory of around 8x (each swblock can hold 16 pages).
383 *
384 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
385 * has increased dramatically.
984263bc 386 */
12e4aaff 387 n = vmstats.v_page_count / 2;
79634a66 388 if (maxswzone && n < maxswzone / sizeof(struct swblock))
984263bc
MD
389 n = maxswzone / sizeof(struct swblock);
390 n2 = n;
391
392 do {
393 swap_zone = zinit(
394 "SWAPMETA",
395 sizeof(struct swblock),
396 n,
397 ZONE_INTERRUPT,
398 1);
399 if (swap_zone != NULL)
400 break;
401 /*
402 * if the allocation failed, try a zone two thirds the
403 * size of the previous attempt.
404 */
405 n -= ((n + 2) / 3);
406 } while (n > 0);
407
408 if (swap_zone == NULL)
409 panic("swap_pager_swap_init: swap_zone == NULL");
410 if (n2 != n)
086c1d7e 411 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
984263bc
MD
412}
413
414/*
415 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
416 * its metadata structures.
417 *
418 * This routine is called from the mmap and fork code to create a new
419 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
96adc753 420 * and then converting it with swp_pager_meta_convert().
984263bc 421 *
8e7c4729
MD
422 * We only support unnamed objects.
423 *
424 * No restrictions.
984263bc 425 */
5a648714 426vm_object_t
57f7b636 427swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
984263bc
MD
428{
429 vm_object_t object;
430
d28e1355 431 KKASSERT(handle == NULL);
a2ee730d
MD
432 object = vm_object_allocate_hold(OBJT_DEFAULT,
433 OFF_TO_IDX(offset + PAGE_MASK + size));
d28e1355 434 swp_pager_meta_convert(object);
b12defdc 435 vm_object_drop(object);
984263bc
MD
436
437 return (object);
438}
439
440/*
441 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
442 *
443 * The swap backing for the object is destroyed. The code is
444 * designed such that we can reinstantiate it later, but this
445 * routine is typically called only when the entire object is
446 * about to be destroyed.
447 *
8e7c4729
MD
448 * The object must be locked or unreferenceable.
449 * No other requirements.
984263bc 450 */
984263bc 451static void
57e43348 452swap_pager_dealloc(vm_object_t object)
984263bc 453{
b12defdc 454 vm_object_hold(object);
984263bc
MD
455 vm_object_pip_wait(object, "swpdea");
456
457 /*
458 * Free all remaining metadata. We only bother to free it from
459 * the swap meta data. We do not attempt to free swapblk's still
460 * associated with vm_page_t's for this object. We do not care
461 * if paging is still in progress on some objects.
462 */
984263bc 463 swp_pager_meta_free_all(object);
b12defdc 464 vm_object_drop(object);
984263bc
MD
465}
466
467/************************************************************************
468 * SWAP PAGER BITMAP ROUTINES *
469 ************************************************************************/
470
471/*
472 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
473 *
474 * Allocate swap for the requested number of pages. The starting
475 * swap block number (a page index) is returned or SWAPBLK_NONE
476 * if the allocation failed.
477 *
478 * Also has the side effect of advising that somebody made a mistake
479 * when they configured swap and didn't configure enough.
480 *
b12defdc 481 * The caller must hold the object.
8e7c4729 482 * This routine may not block.
984263bc 483 */
651d8e75 484static __inline swblk_t
096e95c0 485swp_pager_getswapspace(vm_object_t object, int npages)
984263bc 486{
651d8e75 487 swblk_t blk;
984263bc 488
b12defdc 489 lwkt_gettoken(&vm_token);
7fa8d3ba
MD
490 blk = blist_allocat(swapblist, npages, swapiterator);
491 if (blk == SWAPBLK_NONE)
492 blk = blist_allocat(swapblist, npages, 0);
493 if (blk == SWAPBLK_NONE) {
984263bc 494 if (swap_pager_full != 2) {
b5e19bf6
MD
495 kprintf("swap_pager_getswapspace: failed alloc=%d\n",
496 npages);
984263bc
MD
497 swap_pager_full = 2;
498 swap_pager_almost_full = 1;
499 }
500 } else {
7fa8d3ba 501 swapiterator = blk;
099f3e5e 502 swapacctspace(blk, -npages);
096e95c0
MD
503 if (object->type == OBJT_SWAP)
504 vm_swap_anon_use += npages;
505 else
506 vm_swap_cache_use += npages;
984263bc
MD
507 swp_sizecheck();
508 }
b12defdc 509 lwkt_reltoken(&vm_token);
984263bc
MD
510 return(blk);
511}
512
513/*
514 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
515 *
516 * This routine returns the specified swap blocks back to the bitmap.
517 *
518 * Note: This routine may not block (it could in the old swap code),
519 * and through the use of the new blist routines it does not block.
520 *
521 * We must be called at splvm() to avoid races with bitmap frees from
522 * vm_page_remove() aka swap_pager_page_removed().
523 *
8e7c4729 524 * This routine may not block.
984263bc
MD
525 */
526
527static __inline void
651d8e75 528swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
984263bc 529{
9f3543c6
MD
530 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
531
b12defdc 532 lwkt_gettoken(&vm_token);
9f3543c6 533 sp->sw_nused -= npages;
096e95c0
MD
534 if (object->type == OBJT_SWAP)
535 vm_swap_anon_use -= npages;
536 else
537 vm_swap_cache_use -= npages;
9f3543c6 538
b12defdc
MD
539 if (sp->sw_flags & SW_CLOSING) {
540 lwkt_reltoken(&vm_token);
9f3543c6 541 return;
b12defdc 542 }
9f3543c6
MD
543
544 blist_free(swapblist, blk, npages);
545 vm_swap_size += npages;
984263bc 546 swp_sizecheck();
b12defdc 547 lwkt_reltoken(&vm_token);
984263bc
MD
548}
549
550/*
551 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
552 * range within an object.
553 *
554 * This is a globally accessible routine.
555 *
556 * This routine removes swapblk assignments from swap metadata.
557 *
558 * The external callers of this routine typically have already destroyed
559 * or renamed vm_page_t's associated with this range in the object so
560 * we should be ok.
561 *
8e7c4729 562 * No requirements.
984263bc 563 */
984263bc 564void
8d292090 565swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
984263bc 566{
b12defdc 567 vm_object_hold(object);
984263bc 568 swp_pager_meta_free(object, start, size);
b12defdc 569 vm_object_drop(object);
984263bc
MD
570}
571
8e7c4729
MD
572/*
573 * No requirements.
574 */
8d292090
MD
575void
576swap_pager_freespace_all(vm_object_t object)
577{
b12defdc 578 vm_object_hold(object);
8d292090 579 swp_pager_meta_free_all(object);
b12defdc 580 vm_object_drop(object);
8d292090
MD
581}
582
00a3fdca
MD
583/*
584 * This function conditionally frees swap cache swap starting at
585 * (*basei) in the object. (count) swap blocks will be nominally freed.
586 * The actual number of blocks freed can be more or less than the
587 * requested number.
588 *
589 * This function nominally returns the number of blocks freed. However,
590 * the actual number of blocks freed may be less then the returned value.
591 * If the function is unable to exhaust the object or if it is able to
592 * free (approximately) the requested number of blocks it returns
593 * a value n > count.
594 *
595 * If we exhaust the object we will return a value n <= count.
135b4b20 596 *
b12defdc 597 * The caller must hold the object.
34542daf
MD
598 *
599 * WARNING! If count == 0 then -1 can be returned as a degenerate case,
600 * callers should always pass a count value > 0.
00a3fdca
MD
601 */
602static int swap_pager_condfree_callback(struct swblock *swap, void *data);
603
604int
aecf2182 605swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
00a3fdca
MD
606{
607 struct swfreeinfo info;
08fb7a9d
MD
608 int n;
609 int t;
00a3fdca 610
b12defdc 611 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
8e7c4729 612
00a3fdca
MD
613 info.object = object;
614 info.basei = *basei; /* skip up to this page index */
615 info.begi = count; /* max swap pages to destroy */
616 info.endi = count * 8; /* max swblocks to scan */
617
618 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
619 swap_pager_condfree_callback, &info);
620 *basei = info.basei;
08fb7a9d
MD
621
622 /*
623 * Take the higher difference swblocks vs pages
624 */
625 n = count - (int)info.begi;
626 t = count * 8 - (int)info.endi;
627 if (n < t)
628 n = t;
629 if (n < 1)
630 n = 1;
631 return(n);
00a3fdca
MD
632}
633
634/*
635 * The idea is to free whole meta-block to avoid fragmenting
636 * the swap space or disk I/O. We only do this if NO VM pages
637 * are present.
638 *
639 * We do not have to deal with clearing PG_SWAPPED in related VM
640 * pages because there are no related VM pages.
8e7c4729 641 *
b12defdc 642 * The caller must hold the object.
00a3fdca
MD
643 */
644static int
645swap_pager_condfree_callback(struct swblock *swap, void *data)
646{
647 struct swfreeinfo *info = data;
648 vm_object_t object = info->object;
649 int i;
650
651 for (i = 0; i < SWAP_META_PAGES; ++i) {
652 if (vm_page_lookup(object, swap->swb_index + i))
653 break;
654 }
655 info->basei = swap->swb_index + SWAP_META_PAGES;
656 if (i == SWAP_META_PAGES) {
657 info->begi -= swap->swb_count;
658 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
659 }
660 --info->endi;
661 if ((int)info->begi < 0 || (int)info->endi < 0)
662 return(-1);
d2d8515b 663 lwkt_yield();
00a3fdca
MD
664 return(0);
665}
666
096e95c0
MD
667/*
668 * Called by vm_page_alloc() when a new VM page is inserted
669 * into a VM object. Checks whether swap has been assigned to
670 * the page and sets PG_SWAPPED as necessary.
8e7c4729
MD
671 *
672 * No requirements.
096e95c0
MD
673 */
674void
675swap_pager_page_inserted(vm_page_t m)
676{
677 if (m->object->swblock_count) {
b12defdc 678 vm_object_hold(m->object);
096e95c0
MD
679 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
680 vm_page_flag_set(m, PG_SWAPPED);
b12defdc 681 vm_object_drop(m->object);
096e95c0
MD
682 }
683}
684
984263bc
MD
685/*
686 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
687 *
688 * Assigns swap blocks to the specified range within the object. The
689 * swap blocks are not zerod. Any previous swap assignment is destroyed.
690 *
691 * Returns 0 on success, -1 on failure.
8e7c4729
MD
692 *
693 * The caller is responsible for avoiding races in the specified range.
694 * No other requirements.
984263bc 695 */
984263bc
MD
696int
697swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
698{
984263bc 699 int n = 0;
651d8e75 700 swblk_t blk = SWAPBLK_NONE;
984263bc
MD
701 vm_pindex_t beg = start; /* save start index */
702
b12defdc
MD
703 vm_object_hold(object);
704
984263bc
MD
705 while (size) {
706 if (n == 0) {
707 n = BLIST_MAX_ALLOC;
096e95c0
MD
708 while ((blk = swp_pager_getswapspace(object, n)) ==
709 SWAPBLK_NONE)
710 {
984263bc
MD
711 n >>= 1;
712 if (n == 0) {
8d292090
MD
713 swp_pager_meta_free(object, beg,
714 start - beg);
b12defdc 715 vm_object_drop(object);
984263bc
MD
716 return(-1);
717 }
718 }
719 }
720 swp_pager_meta_build(object, start, blk);
721 --size;
722 ++start;
723 ++blk;
724 --n;
725 }
726 swp_pager_meta_free(object, start, n);
b12defdc 727 vm_object_drop(object);
984263bc
MD
728 return(0);
729}
730
731/*
732 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
733 * and destroy the source.
734 *
735 * Copy any valid swapblks from the source to the destination. In
736 * cases where both the source and destination have a valid swapblk,
737 * we keep the destination's.
738 *
739 * This routine is allowed to block. It may block allocating metadata
740 * indirectly through swp_pager_meta_build() or if paging is still in
741 * progress on the source.
742 *
984263bc
MD
743 * XXX vm_page_collapse() kinda expects us not to block because we
744 * supposedly do not need to allocate memory, but for the moment we
745 * *may* have to get a little memory from the zone allocator, but
746 * it is taken from the interrupt memory. We should be ok.
747 *
748 * The source object contains no vm_page_t's (which is just as well)
984263bc
MD
749 * The source object is of type OBJT_SWAP.
750 *
b12defdc 751 * The source and destination objects must be held by the caller.
984263bc 752 */
984263bc 753void
57e43348 754swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
8d292090 755 vm_pindex_t base_index, int destroysource)
984263bc
MD
756{
757 vm_pindex_t i;
984263bc 758
b12defdc
MD
759 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
760 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
984263bc 761
984263bc
MD
762 /*
763 * transfer source to destination.
764 */
984263bc 765 for (i = 0; i < dstobject->size; ++i) {
651d8e75 766 swblk_t dstaddr;
984263bc
MD
767
768 /*
769 * Locate (without changing) the swapblk on the destination,
770 * unless it is invalid in which case free it silently, or
771 * if the destination is a resident page, in which case the
772 * source is thrown away.
773 */
984263bc
MD
774 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
775
776 if (dstaddr == SWAPBLK_NONE) {
777 /*
778 * Destination has no swapblk and is not resident,
779 * copy source.
780 */
651d8e75 781 swblk_t srcaddr;
984263bc 782
8d292090
MD
783 srcaddr = swp_pager_meta_ctl(srcobject,
784 base_index + i, SWM_POP);
984263bc
MD
785
786 if (srcaddr != SWAPBLK_NONE)
787 swp_pager_meta_build(dstobject, i, srcaddr);
788 } else {
789 /*
790 * Destination has valid swapblk or it is represented
791 * by a resident page. We destroy the sourceblock.
792 */
8d292090 793 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
984263bc
MD
794 }
795 }
796
797 /*
798 * Free left over swap blocks in source.
799 *
800 * We have to revert the type to OBJT_DEFAULT so we do not accidently
801 * double-remove the object from the swap queues.
802 */
984263bc 803 if (destroysource) {
984263bc
MD
804 /*
805 * Reverting the type is not necessary, the caller is going
806 * to destroy srcobject directly, but I'm doing it here
807 * for consistency since we've removed the object from its
808 * queues.
809 */
96adc753 810 swp_pager_meta_free_all(srcobject);
8d292090
MD
811 if (srcobject->type == OBJT_SWAP)
812 srcobject->type = OBJT_DEFAULT;
984263bc 813 }
984263bc
MD
814}
815
816/*
817 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
818 * the requested page.
819 *
820 * We determine whether good backing store exists for the requested
821 * page and return TRUE if it does, FALSE if it doesn't.
822 *
823 * If TRUE, we also try to determine how much valid, contiguous backing
824 * store exists before and after the requested page within a reasonable
825 * distance. We do not try to restrict it to the swap device stripe
826 * (that is handled in getpages/putpages). It probably isn't worth
827 * doing here.
8e7c4729
MD
828 *
829 * No requirements.
984263bc 830 */
984263bc 831boolean_t
1b9d3514 832swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
984263bc 833{
651d8e75 834 swblk_t blk0;
984263bc
MD
835
836 /*
837 * do we have good backing store at the requested index ?
838 */
b12defdc 839 vm_object_hold(object);
984263bc
MD
840 blk0 = swp_pager_meta_ctl(object, pindex, 0);
841
842 if (blk0 == SWAPBLK_NONE) {
b12defdc 843 vm_object_drop(object);
984263bc
MD
844 return (FALSE);
845 }
b12defdc 846 vm_object_drop(object);
984263bc
MD
847 return (TRUE);
848}
849
850/*
851 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
852 *
107e9bcc
MD
853 * This removes any associated swap backing store, whether valid or
854 * not, from the page. This operates on any VM object, not just OBJT_SWAP
855 * objects.
984263bc 856 *
107e9bcc
MD
857 * This routine is typically called when a page is made dirty, at
858 * which point any associated swap can be freed. MADV_FREE also
859 * calls us in a special-case situation
984263bc 860 *
107e9bcc
MD
861 * NOTE!!! If the page is clean and the swap was valid, the caller
862 * should make the page dirty before calling this routine. This routine
863 * does NOT change the m->dirty status of the page. Also: MADV_FREE
864 * depends on it.
984263bc 865 *
135b4b20 866 * The page must be busied or soft-busied.
b12defdc 867 * The caller can hold the object to avoid blocking, else we might block.
8e7c4729 868 * No other requirements.
984263bc 869 */
107e9bcc 870void
57e43348 871swap_pager_unswapped(vm_page_t m)
984263bc 872{
67803f3e 873 if (m->flags & PG_SWAPPED) {
b12defdc 874 vm_object_hold(m->object);
135b4b20 875 KKASSERT(m->flags & PG_SWAPPED);
67803f3e
MD
876 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
877 vm_page_flag_clear(m, PG_SWAPPED);
b12defdc 878 vm_object_drop(m->object);
67803f3e 879 }
984263bc
MD
880}
881
882/*
883 * SWAP_PAGER_STRATEGY() - read, write, free blocks
884 *
107e9bcc
MD
885 * This implements a VM OBJECT strategy function using swap backing store.
886 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
887 * types.
888 *
889 * This is intended to be a cacheless interface (i.e. caching occurs at
890 * higher levels), and is also used as a swap-based SSD cache for vnode
891 * and device objects.
892 *
893 * All I/O goes directly to and from the swap device.
984263bc 894 *
107e9bcc
MD
895 * We currently attempt to run I/O synchronously or asynchronously as
896 * the caller requests. This isn't perfect because we loose error
897 * sequencing when we run multiple ops in parallel to satisfy a request.
898 * But this is swap, so we let it all hang out.
8e7c4729
MD
899 *
900 * No requirements.
984263bc 901 */
107e9bcc 902void
81b5c339 903swap_pager_strategy(vm_object_t object, struct bio *bio)
984263bc 904{
81b5c339
MD
905 struct buf *bp = bio->bio_buf;
906 struct bio *nbio;
984263bc 907 vm_pindex_t start;
54078292 908 vm_pindex_t biox_blkno = 0;
984263bc 909 int count;
984263bc 910 char *data;
ae8e83e6
MD
911 struct bio *biox;
912 struct buf *bufx;
7fa8d3ba 913#if 0
81b5c339 914 struct bio_track *track;
7fa8d3ba 915#endif
81b5c339 916
7fa8d3ba 917#if 0
81b5c339
MD
918 /*
919 * tracking for swapdev vnode I/Os
920 */
10f3fee5 921 if (bp->b_cmd == BUF_CMD_READ)
81b5c339
MD
922 track = &swapdev_vp->v_track_read;
923 else
924 track = &swapdev_vp->v_track_write;
7fa8d3ba 925#endif
984263bc
MD
926
927 if (bp->b_bcount & PAGE_MASK) {
928 bp->b_error = EINVAL;
929 bp->b_flags |= B_ERROR | B_INVAL;
81b5c339 930 biodone(bio);
973c11b9
MD
931 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
932 "not page bounded\n",
933 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
984263bc
MD
934 return;
935 }
936
937 /*
938 * Clear error indication, initialize page index, count, data pointer.
939 */
984263bc
MD
940 bp->b_error = 0;
941 bp->b_flags &= ~B_ERROR;
942 bp->b_resid = bp->b_bcount;
943
54078292 944 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
984263bc
MD
945 count = howmany(bp->b_bcount, PAGE_SIZE);
946 data = bp->b_data;
947
984263bc 948 /*
10f3fee5 949 * Deal with BUF_CMD_FREEBLKS
984263bc 950 */
10f3fee5 951 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
984263bc
MD
952 /*
953 * FREE PAGE(s) - destroy underlying swap that is no longer
954 * needed.
955 */
b12defdc 956 vm_object_hold(object);
984263bc 957 swp_pager_meta_free(object, start, count);
b12defdc 958 vm_object_drop(object);
984263bc 959 bp->b_resid = 0;
81b5c339 960 biodone(bio);
984263bc
MD
961 return;
962 }
963
81b5c339
MD
964 /*
965 * We need to be able to create a new cluster of I/O's. We cannot
966 * use the caller fields of the passed bio so push a new one.
967 *
968 * Because nbio is just a placeholder for the cluster links,
969 * we can biodone() the original bio instead of nbio to make
970 * things a bit more efficient.
971 */
972 nbio = push_bio(bio);
54078292 973 nbio->bio_offset = bio->bio_offset;
81b5c339
MD
974 nbio->bio_caller_info1.cluster_head = NULL;
975 nbio->bio_caller_info2.cluster_tail = NULL;
976
ae8e83e6
MD
977 biox = NULL;
978 bufx = NULL;
979
984263bc
MD
980 /*
981 * Execute read or write
982 */
b12defdc
MD
983 vm_object_hold(object);
984
984263bc 985 while (count > 0) {
651d8e75 986 swblk_t blk;
984263bc
MD
987
988 /*
989 * Obtain block. If block not found and writing, allocate a
990 * new block and build it into the object.
991 */
984263bc 992 blk = swp_pager_meta_ctl(object, start, 0);
10f3fee5 993 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
096e95c0 994 blk = swp_pager_getswapspace(object, 1);
984263bc
MD
995 if (blk == SWAPBLK_NONE) {
996 bp->b_error = ENOMEM;
997 bp->b_flags |= B_ERROR;
998 break;
999 }
1000 swp_pager_meta_build(object, start, blk);
1001 }
1002
1003 /*
1004 * Do we have to flush our current collection? Yes if:
1005 *
1006 * - no swap block at this index
1007 * - swap block is not contiguous
1008 * - we cross a physical disk boundry in the
1009 * stripe.
1010 */
984263bc 1011 if (
54078292
MD
1012 biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
1013 ((biox_blkno ^ blk) & dmmax_mask)
984263bc
MD
1014 )
1015 ) {
10f3fee5 1016 if (bp->b_cmd == BUF_CMD_READ) {
12e4aaff 1017 ++mycpu->gd_cnt.v_swapin;
81b5c339 1018 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
984263bc 1019 } else {
12e4aaff 1020 ++mycpu->gd_cnt.v_swapout;
81b5c339
MD
1021 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1022 bufx->b_dirtyend = bufx->b_bcount;
1023 }
1024
1025 /*
ae8e83e6 1026 * Finished with this buf.
81b5c339 1027 */
ae8e83e6
MD
1028 KKASSERT(bufx->b_bcount != 0);
1029 if (bufx->b_cmd != BUF_CMD_READ)
1030 bufx->b_dirtyend = bufx->b_bcount;
81b5c339
MD
1031 biox = NULL;
1032 bufx = NULL;
984263bc
MD
1033 }
1034
1035 /*
81b5c339 1036 * Add new swapblk to biox, instantiating biox if necessary.
984263bc
MD
1037 * Zero-fill reads are able to take a shortcut.
1038 */
984263bc
MD
1039 if (blk == SWAPBLK_NONE) {
1040 /*
1041 * We can only get here if we are reading. Since
1042 * we are at splvm() we can safely modify b_resid,
1043 * even if chain ops are in progress.
1044 */
1045 bzero(data, PAGE_SIZE);
1046 bp->b_resid -= PAGE_SIZE;
1047 } else {
81b5c339
MD
1048 if (biox == NULL) {
1049 /* XXX chain count > 4, wait to <= 4 */
1050
1051 bufx = getpbuf(NULL);
1052 biox = &bufx->b_bio1;
1053 cluster_append(nbio, bufx);
257c30fe 1054 bufx->b_flags |= (bp->b_flags & B_ORDERED);
10f3fee5 1055 bufx->b_cmd = bp->b_cmd;
81b5c339 1056 biox->bio_done = swap_chain_iodone;
54078292 1057 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
81b5c339 1058 biox->bio_caller_info1.cluster_parent = nbio;
54078292 1059 biox_blkno = blk;
81b5c339
MD
1060 bufx->b_bcount = 0;
1061 bufx->b_data = data;
984263bc 1062 }
81b5c339 1063 bufx->b_bcount += PAGE_SIZE;
984263bc
MD
1064 }
1065 --count;
1066 ++start;
1067 data += PAGE_SIZE;
1068 }
b12defdc
MD
1069
1070 vm_object_drop(object);
984263bc
MD
1071
1072 /*
1073 * Flush out last buffer
1074 */
81b5c339 1075 if (biox) {
10f3fee5 1076 if (bufx->b_cmd == BUF_CMD_READ) {
12e4aaff 1077 ++mycpu->gd_cnt.v_swapin;
81b5c339 1078 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
984263bc 1079 } else {
12e4aaff 1080 ++mycpu->gd_cnt.v_swapout;
81b5c339
MD
1081 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1082 bufx->b_dirtyend = bufx->b_bcount;
1083 }
ae8e83e6
MD
1084 KKASSERT(bufx->b_bcount);
1085 if (bufx->b_cmd != BUF_CMD_READ)
1086 bufx->b_dirtyend = bufx->b_bcount;
81b5c339 1087 /* biox, bufx = NULL */
984263bc
MD
1088 }
1089
1090 /*
ae8e83e6
MD
1091 * Now initiate all the I/O. Be careful looping on our chain as
1092 * I/O's may complete while we are still initiating them.
0a8aee15
MD
1093 *
1094 * If the request is a 100% sparse read no bios will be present
1095 * and we just biodone() the buffer.
984263bc 1096 */
ae8e83e6
MD
1097 nbio->bio_caller_info2.cluster_tail = NULL;
1098 bufx = nbio->bio_caller_info1.cluster_head;
1099
0a8aee15
MD
1100 if (bufx) {
1101 while (bufx) {
1102 biox = &bufx->b_bio1;
1103 BUF_KERNPROC(bufx);
1104 bufx = bufx->b_cluster_next;
1105 vn_strategy(swapdev_vp, biox);
1106 }
1107 } else {
1108 biodone(bio);
984263bc 1109 }
ae8e83e6
MD
1110
1111 /*
1112 * Completion of the cluster will also call biodone_chain(nbio).
1113 * We never call biodone(nbio) so we don't have to worry about
1114 * setting up a bio_done callback. It's handled in the sub-IO.
1115 */
1116 /**/
984263bc
MD
1117}
1118
8e7c4729
MD
1119/*
1120 * biodone callback
1121 *
1122 * No requirements.
1123 */
81b5c339
MD
1124static void
1125swap_chain_iodone(struct bio *biox)
1126{
1127 struct buf **nextp;
1128 struct buf *bufx; /* chained sub-buffer */
1129 struct bio *nbio; /* parent nbio with chain glue */
1130 struct buf *bp; /* original bp associated with nbio */
ae8e83e6 1131 int chain_empty;
81b5c339
MD
1132
1133 bufx = biox->bio_buf;
1134 nbio = biox->bio_caller_info1.cluster_parent;
1135 bp = nbio->bio_buf;
1136
1137 /*
1138 * Update the original buffer
1139 */
1140 KKASSERT(bp != NULL);
1141 if (bufx->b_flags & B_ERROR) {
ae8e83e6 1142 atomic_set_int(&bufx->b_flags, B_ERROR);
77912481 1143 bp->b_error = bufx->b_error; /* race ok */
81b5c339 1144 } else if (bufx->b_resid != 0) {
ae8e83e6 1145 atomic_set_int(&bufx->b_flags, B_ERROR);
77912481 1146 bp->b_error = EINVAL; /* race ok */
81b5c339 1147 } else {
ae8e83e6 1148 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
81b5c339
MD
1149 }
1150
1151 /*
ae8e83e6 1152 * Remove us from the chain.
81b5c339 1153 */
287a8577 1154 spin_lock(&bp->b_lock.lk_spinlock);
81b5c339
MD
1155 nextp = &nbio->bio_caller_info1.cluster_head;
1156 while (*nextp != bufx) {
1157 KKASSERT(*nextp != NULL);
1158 nextp = &(*nextp)->b_cluster_next;
1159 }
1160 *nextp = bufx->b_cluster_next;
ae8e83e6 1161 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
287a8577 1162 spin_unlock(&bp->b_lock.lk_spinlock);
81b5c339
MD
1163
1164 /*
ae8e83e6
MD
1165 * Clean up bufx. If the chain is now empty we finish out
1166 * the parent. Note that we may be racing other completions
1167 * so we must use the chain_empty status from above.
81b5c339 1168 */
ae8e83e6 1169 if (chain_empty) {
81b5c339 1170 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
ae8e83e6 1171 atomic_set_int(&bp->b_flags, B_ERROR);
81b5c339
MD
1172 bp->b_error = EINVAL;
1173 }
ae8e83e6 1174 biodone_chain(nbio);
81b5c339 1175 }
81b5c339
MD
1176 relpbuf(bufx, NULL);
1177}
1178
984263bc 1179/*
5d5c5831 1180 * SWAP_PAGER_GETPAGES() - bring page in from swap
984263bc 1181 *
5d5c5831
MD
1182 * The requested page may have to be brought in from swap. Calculate the
1183 * swap block and bring in additional pages if possible. All pages must
1184 * have contiguous swap block assignments and reside in the same object.
984263bc 1185 *
5d5c5831
MD
1186 * The caller has a single vm_object_pip_add() reference prior to
1187 * calling us and we should return with the same.
984263bc 1188 *
5d5c5831
MD
1189 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1190 * and any additinal pages unbusied.
984263bc 1191 *
5d5c5831
MD
1192 * If the caller encounters a PG_RAM page it will pass it to us even though
1193 * it may be valid and dirty. We cannot overwrite the page in this case!
1194 * The case is used to allow us to issue pure read-aheads.
1195 *
1196 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1197 * the PG_RAM page is validated at the same time as mreq. What we
1198 * really need to do is issue a separate read-ahead pbuf.
8e7c4729
MD
1199 *
1200 * No requirements.
984263bc 1201 */
984263bc 1202static int
1b9d3514 1203swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
984263bc
MD
1204{
1205 struct buf *bp;
81b5c339 1206 struct bio *bio;
984263bc 1207 vm_page_t mreq;
5d5c5831
MD
1208 vm_page_t m;
1209 vm_offset_t kva;
651d8e75 1210 swblk_t blk;
984263bc
MD
1211 int i;
1212 int j;
5d5c5831 1213 int raonly;
b12defdc
MD
1214 int error;
1215 u_int32_t flags;
5d5c5831 1216 vm_page_t marray[XIO_INTERNAL_PAGES];
984263bc 1217
1b9d3514 1218 mreq = *mpp;
984263bc 1219
b12defdc 1220 vm_object_hold(object);
984263bc
MD
1221 if (mreq->object != object) {
1222 panic("swap_pager_getpages: object mismatch %p/%p",
1223 object,
1224 mreq->object
1225 );
1226 }
17cde63e 1227
984263bc 1228 /*
5d5c5831
MD
1229 * We don't want to overwrite a fully valid page as it might be
1230 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1231 * valid page with PG_RAM set.
984263bc 1232 *
5d5c5831
MD
1233 * In this case we see if the next page is a suitable page-in
1234 * candidate and if it is we issue read-ahead. PG_RAM will be
1235 * set on the last page of the read-ahead to continue the pipeline.
1236 */
1237 if (mreq->valid == VM_PAGE_BITS_ALL) {
b12defdc
MD
1238 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1239 vm_object_drop(object);
5d5c5831 1240 return(VM_PAGER_OK);
b12defdc 1241 }
3bb7eedb
MD
1242 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1243 if (blk == SWAPBLK_NONE) {
b12defdc 1244 vm_object_drop(object);
5d5c5831
MD
1245 return(VM_PAGER_OK);
1246 }
b12defdc
MD
1247 m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1248 TRUE, &error);
1249 if (error) {
1250 vm_object_drop(object);
1251 return(VM_PAGER_OK);
1252 } else if (m == NULL) {
d2d8515b
MD
1253 /*
1254 * Use VM_ALLOC_QUICK to avoid blocking on cache
1255 * page reuse.
1256 */
5d5c5831
MD
1257 m = vm_page_alloc(object, mreq->pindex + 1,
1258 VM_ALLOC_QUICK);
1259 if (m == NULL) {
b12defdc 1260 vm_object_drop(object);
5d5c5831
MD
1261 return(VM_PAGER_OK);
1262 }
1263 } else {
b12defdc
MD
1264 if (m->valid) {
1265 vm_page_wakeup(m);
1266 vm_object_drop(object);
5d5c5831
MD
1267 return(VM_PAGER_OK);
1268 }
3bb7eedb 1269 vm_page_unqueue_nowakeup(m);
5d5c5831 1270 }
b12defdc 1271 /* page is busy */
5d5c5831
MD
1272 mreq = m;
1273 raonly = 1;
5d5c5831
MD
1274 } else {
1275 raonly = 0;
1276 }
1277
1278 /*
1279 * Try to block-read contiguous pages from swap if sequential,
1280 * otherwise just read one page. Contiguous pages from swap must
1281 * reside within a single device stripe because the I/O cannot be
1282 * broken up across multiple stripes.
1283 *
1284 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1285 * set up such that the case(s) are handled implicitly.
984263bc 1286 */
984263bc 1287 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
5d5c5831 1288 marray[0] = mreq;
984263bc 1289
5d5c5831
MD
1290 for (i = 1; swap_burst_read &&
1291 i < XIO_INTERNAL_PAGES &&
1292 mreq->pindex + i < object->size; ++i) {
651d8e75 1293 swblk_t iblk;
984263bc 1294
5d5c5831
MD
1295 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1296 if (iblk != blk + i)
984263bc
MD
1297 break;
1298 if ((blk ^ iblk) & dmmax_mask)
1299 break;
b12defdc
MD
1300 m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1301 TRUE, &error);
1302 if (error) {
1303 break;
1304 } else if (m == NULL) {
d2d8515b
MD
1305 /*
1306 * Use VM_ALLOC_QUICK to avoid blocking on cache
1307 * page reuse.
1308 */
5d5c5831
MD
1309 m = vm_page_alloc(object, mreq->pindex + i,
1310 VM_ALLOC_QUICK);
1311 if (m == NULL)
1312 break;
1313 } else {
b12defdc
MD
1314 if (m->valid) {
1315 vm_page_wakeup(m);
5d5c5831 1316 break;
b12defdc 1317 }
3bb7eedb 1318 vm_page_unqueue_nowakeup(m);
5d5c5831 1319 }
b12defdc 1320 /* page is busy */
5d5c5831 1321 marray[i] = m;
984263bc 1322 }
5d5c5831
MD
1323 if (i > 1)
1324 vm_page_flag_set(marray[i - 1], PG_RAM);
984263bc 1325
984263bc 1326 /*
5d5c5831
MD
1327 * If mreq is the requested page and we have nothing to do return
1328 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1329 * page and must be cleaned up.
984263bc 1330 */
5d5c5831
MD
1331 if (blk == SWAPBLK_NONE) {
1332 KKASSERT(i == 1);
1333 if (raonly) {
1334 vnode_pager_freepage(mreq);
b12defdc 1335 vm_object_drop(object);
5d5c5831
MD
1336 return(VM_PAGER_OK);
1337 } else {
b12defdc 1338 vm_object_drop(object);
5d5c5831
MD
1339 return(VM_PAGER_FAIL);
1340 }
1341 }
984263bc
MD
1342
1343 /*
5d5c5831 1344 * map our page(s) into kva for input
984263bc 1345 */
9a82e536 1346 bp = getpbuf_kva(&nsw_rcount);
81b5c339 1347 bio = &bp->b_bio1;
5d5c5831
MD
1348 kva = (vm_offset_t) bp->b_kvabase;
1349 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1350 pmap_qenter(kva, bp->b_xio.xio_pages, i);
984263bc 1351
5d5c5831
MD
1352 bp->b_data = (caddr_t)kva;
1353 bp->b_bcount = PAGE_SIZE * i;
1354 bp->b_xio.xio_npages = i;
81b5c339 1355 bio->bio_done = swp_pager_async_iodone;
5d5c5831 1356 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
8aa92e4b 1357 bio->bio_caller_info1.index = SWBIO_READ;
984263bc 1358
5d5c5831
MD
1359 /*
1360 * Set index. If raonly set the index beyond the array so all
1361 * the pages are treated the same, otherwise the original mreq is
1362 * at index 0.
1363 */
1364 if (raonly)
1365 bio->bio_driver_info = (void *)(intptr_t)i;
1366 else
1367 bio->bio_driver_info = (void *)(intptr_t)0;
984263bc 1368
5d5c5831
MD
1369 for (j = 0; j < i; ++j)
1370 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
984263bc 1371
12e4aaff 1372 mycpu->gd_cnt.v_swapin++;
54f51aeb 1373 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
984263bc
MD
1374
1375 /*
1376 * We still hold the lock on mreq, and our automatic completion routine
1377 * does not remove it.
1378 */
3bb7eedb 1379 vm_object_pip_add(object, bp->b_xio.xio_npages);
984263bc
MD
1380
1381 /*
1382 * perform the I/O. NOTE!!! bp cannot be considered valid after
1383 * this point because we automatically release it on completion.
1384 * Instead, we look at the one page we are interested in which we
1385 * still hold a lock on even through the I/O completion.
1386 *
1387 * The other pages in our m[] array are also released on completion,
1388 * so we cannot assume they are valid anymore either.
984263bc 1389 */
10f3fee5 1390 bp->b_cmd = BUF_CMD_READ;
984263bc 1391 BUF_KERNPROC(bp);
81b5c339 1392 vn_strategy(swapdev_vp, bio);
984263bc
MD
1393
1394 /*
3bb7eedb 1395 * Wait for the page we want to complete. PG_SWAPINPROG is always
984263bc
MD
1396 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1397 * is set in the meta-data.
5d5c5831
MD
1398 *
1399 * If this is a read-ahead only we return immediately without
1400 * waiting for I/O.
984263bc 1401 */
b12defdc
MD
1402 if (raonly) {
1403 vm_object_drop(object);
5d5c5831 1404 return(VM_PAGER_OK);
b12defdc 1405 }
984263bc 1406
3bb7eedb
MD
1407 /*
1408 * Read-ahead includes originally requested page case.
1409 */
b12defdc
MD
1410 for (;;) {
1411 flags = mreq->flags;
1412 cpu_ccfence();
1413 if ((flags & PG_SWAPINPROG) == 0)
1414 break;
1415 tsleep_interlock(mreq, 0);
1416 if (!atomic_cmpset_int(&mreq->flags, flags,
1417 flags | PG_WANTED | PG_REFERENCED)) {
1418 continue;
1419 }
12e4aaff 1420 mycpu->gd_cnt.v_intrans++;
b12defdc 1421 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
086c1d7e 1422 kprintf(
81b5c339 1423 "swap_pager: indefinite wait buffer: "
973c11b9
MD
1424 " offset: %lld, size: %ld\n",
1425 (long long)bio->bio_offset,
1426 (long)bp->b_bcount
984263bc
MD
1427 );
1428 }
1429 }
984263bc
MD
1430
1431 /*
1432 * mreq is left bussied after completion, but all the other pages
1433 * are freed. If we had an unrecoverable read error the page will
1434 * not be valid.
1435 */
b12defdc 1436 vm_object_drop(object);
5d5c5831 1437 if (mreq->valid != VM_PAGE_BITS_ALL)
984263bc 1438 return(VM_PAGER_ERROR);
5d5c5831 1439 else
984263bc 1440 return(VM_PAGER_OK);
984263bc
MD
1441
1442 /*
1443 * A final note: in a low swap situation, we cannot deallocate swap
1444 * and mark a page dirty here because the caller is likely to mark
1445 * the page clean when we return, causing the page to possibly revert
1446 * to all-zero's later.
1447 */
1448}
1449
1450/*
1451 * swap_pager_putpages:
1452 *
1453 * Assign swap (if necessary) and initiate I/O on the specified pages.
1454 *
1455 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1456 * are automatically converted to SWAP objects.
1457 *
81b5c339 1458 * In a low memory situation we may block in vn_strategy(), but the new
984263bc
MD
1459 * vm_page reservation system coupled with properly written VFS devices
1460 * should ensure that no low-memory deadlock occurs. This is an area
1461 * which needs work.
1462 *
1463 * The parent has N vm_object_pip_add() references prior to
1464 * calling us and will remove references for rtvals[] that are
1465 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1466 * completion.
1467 *
1468 * The parent has soft-busy'd the pages it passes us and will unbusy
1469 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1470 * We need to unbusy the rest on I/O completion.
8e7c4729
MD
1471 *
1472 * No requirements.
984263bc 1473 */
984263bc 1474void
17cde63e
MD
1475swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1476 boolean_t sync, int *rtvals)
984263bc
MD
1477{
1478 int i;
1479 int n = 0;
1480
b12defdc
MD
1481 vm_object_hold(object);
1482
984263bc
MD
1483 if (count && m[0]->object != object) {
1484 panic("swap_pager_getpages: object mismatch %p/%p",
1485 object,
1486 m[0]->object
1487 );
1488 }
17cde63e 1489
984263bc
MD
1490 /*
1491 * Step 1
1492 *
1493 * Turn object into OBJT_SWAP
1494 * check for bogus sysops
1495 * force sync if not pageout process
1496 */
8e7c4729 1497 if (object->type == OBJT_DEFAULT) {
8e7c4729
MD
1498 if (object->type == OBJT_DEFAULT)
1499 swp_pager_meta_convert(object);
8e7c4729 1500 }
984263bc 1501
bc6dffab 1502 if (curthread != pagethread)
984263bc
MD
1503 sync = TRUE;
1504
1505 /*
1506 * Step 2
1507 *
1508 * Update nsw parameters from swap_async_max sysctl values.
1509 * Do not let the sysop crash the machine with bogus numbers.
1510 */
984263bc
MD
1511 if (swap_async_max != nsw_wcount_async_max) {
1512 int n;
984263bc
MD
1513
1514 /*
1515 * limit range
1516 */
1517 if ((n = swap_async_max) > nswbuf / 2)
1518 n = nswbuf / 2;
1519 if (n < 1)
1520 n = 1;
1521 swap_async_max = n;
1522
1523 /*
1524 * Adjust difference ( if possible ). If the current async
1525 * count is too low, we may not be able to make the adjustment
1526 * at this time.
b12defdc
MD
1527 *
1528 * vm_token needed for nsw_wcount sleep interlock
984263bc 1529 */
8e7c4729 1530 lwkt_gettoken(&vm_token);
984263bc
MD
1531 n -= nsw_wcount_async_max;
1532 if (nsw_wcount_async + n >= 0) {
984263bc 1533 nsw_wcount_async_max += n;
7f86d367 1534 pbuf_adjcount(&nsw_wcount_async, n);
984263bc 1535 }
8e7c4729 1536 lwkt_reltoken(&vm_token);
984263bc
MD
1537 }
1538
1539 /*
1540 * Step 3
1541 *
1542 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1543 * The page is left dirty until the pageout operation completes
1544 * successfully.
1545 */
1546
1547 for (i = 0; i < count; i += n) {
984263bc 1548 struct buf *bp;
81b5c339 1549 struct bio *bio;
651d8e75 1550 swblk_t blk;
81b5c339 1551 int j;
984263bc
MD
1552
1553 /*
1554 * Maximum I/O size is limited by a number of factors.
1555 */
1556
1557 n = min(BLIST_MAX_ALLOC, count - i);
1558 n = min(n, nsw_cluster_max);
1559
8e7c4729 1560 lwkt_gettoken(&vm_token);
984263bc
MD
1561
1562 /*
1563 * Get biggest block of swap we can. If we fail, fall
1564 * back and try to allocate a smaller block. Don't go
1565 * overboard trying to allocate space if it would overly
1566 * fragment swap.
1567 */
1568 while (
096e95c0 1569 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
984263bc
MD
1570 n > 4
1571 ) {
1572 n >>= 1;
1573 }
1574 if (blk == SWAPBLK_NONE) {
1575 for (j = 0; j < n; ++j)
1576 rtvals[i+j] = VM_PAGER_FAIL;
8e7c4729 1577 lwkt_reltoken(&vm_token);
984263bc
MD
1578 continue;
1579 }
1580
1581 /*
1582 * The I/O we are constructing cannot cross a physical
1583 * disk boundry in the swap stripe. Note: we are still
1584 * at splvm().
1585 */
1586 if ((blk ^ (blk + n)) & dmmax_mask) {
1587 j = ((blk + dmmax) & dmmax_mask) - blk;
096e95c0 1588 swp_pager_freeswapspace(object, blk + j, n - j);
984263bc
MD
1589 n = j;
1590 }
1591
1592 /*
1593 * All I/O parameters have been satisfied, build the I/O
1594 * request and assign the swap space.
984263bc 1595 */
10f3fee5 1596 if (sync == TRUE)
9a82e536 1597 bp = getpbuf_kva(&nsw_wcount_sync);
10f3fee5 1598 else
9a82e536 1599 bp = getpbuf_kva(&nsw_wcount_async);
81b5c339 1600 bio = &bp->b_bio1;
984263bc 1601
b12defdc
MD
1602 lwkt_reltoken(&vm_token);
1603
984263bc
MD
1604 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1605
984263bc 1606 bp->b_bcount = PAGE_SIZE * n;
54078292 1607 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
984263bc 1608
984263bc
MD
1609 for (j = 0; j < n; ++j) {
1610 vm_page_t mreq = m[i+j];
1611
096e95c0
MD
1612 swp_pager_meta_build(mreq->object, mreq->pindex,
1613 blk + j);
1614 if (object->type == OBJT_SWAP)
1615 vm_page_dirty(mreq);
984263bc
MD
1616 rtvals[i+j] = VM_PAGER_OK;
1617
1618 vm_page_flag_set(mreq, PG_SWAPINPROG);
54f51aeb 1619 bp->b_xio.xio_pages[j] = mreq;
984263bc 1620 }
54f51aeb 1621 bp->b_xio.xio_npages = n;
984263bc 1622
12e4aaff 1623 mycpu->gd_cnt.v_swapout++;
54f51aeb 1624 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
984263bc 1625
10f3fee5
MD
1626 bp->b_dirtyoff = 0; /* req'd for NFS */
1627 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1628 bp->b_cmd = BUF_CMD_WRITE;
8aa92e4b 1629 bio->bio_caller_info1.index = SWBIO_WRITE;
10f3fee5 1630
984263bc
MD
1631 /*
1632 * asynchronous
984263bc 1633 */
984263bc 1634 if (sync == FALSE) {
81b5c339 1635 bio->bio_done = swp_pager_async_iodone;
984263bc 1636 BUF_KERNPROC(bp);
81b5c339 1637 vn_strategy(swapdev_vp, bio);
984263bc
MD
1638
1639 for (j = 0; j < n; ++j)
1640 rtvals[i+j] = VM_PAGER_PEND;
1641 continue;
1642 }
1643
1644 /*
ae8e83e6
MD
1645 * Issue synchrnously.
1646 *
984263bc
MD
1647 * Wait for the sync I/O to complete, then update rtvals.
1648 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1649 * our async completion routine at the end, thus avoiding a
1650 * double-free.
1651 */
8aa92e4b 1652 bio->bio_caller_info1.index |= SWBIO_SYNC;
ae8e83e6
MD
1653 bio->bio_done = biodone_sync;
1654 bio->bio_flags |= BIO_SYNC;
1655 vn_strategy(swapdev_vp, bio);
1656 biowait(bio, "swwrt");
984263bc
MD
1657
1658 for (j = 0; j < n; ++j)
1659 rtvals[i+j] = VM_PAGER_PEND;
1660
1661 /*
1662 * Now that we are through with the bp, we can call the
1663 * normal async completion, which frees everything up.
1664 */
81b5c339 1665 swp_pager_async_iodone(bio);
984263bc 1666 }
b12defdc 1667 vm_object_drop(object);
984263bc
MD
1668}
1669
8e7c4729
MD
1670/*
1671 * No requirements.
1672 */
c84c24da
MD
1673void
1674swap_pager_newswap(void)
1675{
1676 swp_sizecheck();
1677}
1678
984263bc
MD
1679/*
1680 * swp_pager_async_iodone:
1681 *
1682 * Completion routine for asynchronous reads and writes from/to swap.
1683 * Also called manually by synchronous code to finish up a bp.
1684 *
1685 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1686 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1687 * unbusy all pages except the 'main' request page. For WRITE
1688 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1689 * because we marked them all VM_PAGER_PEND on return from putpages ).
1690 *
1691 * This routine may not block.
8e7c4729
MD
1692 *
1693 * No requirements.
984263bc 1694 */
984263bc 1695static void
81b5c339 1696swp_pager_async_iodone(struct bio *bio)
984263bc 1697{
81b5c339 1698 struct buf *bp = bio->bio_buf;
984263bc 1699 vm_object_t object = NULL;
81b5c339 1700 int i;
10f3fee5 1701 int *nswptr;
984263bc
MD
1702
1703 /*
1704 * report error
1705 */
984263bc 1706 if (bp->b_flags & B_ERROR) {
086c1d7e 1707 kprintf(
54078292 1708 "swap_pager: I/O error - %s failed; offset %lld,"
984263bc 1709 "size %ld, error %d\n",
8aa92e4b
MD
1710 ((bio->bio_caller_info1.index & SWBIO_READ) ?
1711 "pagein" : "pageout"),
973c11b9 1712 (long long)bio->bio_offset,
984263bc
MD
1713 (long)bp->b_bcount,
1714 bp->b_error
1715 );
1716 }
1717
1718 /*
1719 * set object, raise to splvm().
1720 */
54f51aeb
HP
1721 if (bp->b_xio.xio_npages)
1722 object = bp->b_xio.xio_pages[0]->object;
984263bc
MD
1723
1724 /*
1725 * remove the mapping for kernel virtual
1726 */
54f51aeb 1727 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
984263bc
MD
1728
1729 /*
1730 * cleanup pages. If an error occurs writing to swap, we are in
1731 * very serious trouble. If it happens to be a disk error, though,
1732 * we may be able to recover by reassigning the swap later on. So
1733 * in this case we remove the m->swapblk assignment for the page
1734 * but do not free it in the rlist. The errornous block(s) are thus
1735 * never reallocated as swap. Redirty the page and continue.
1736 */
54f51aeb
HP
1737 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1738 vm_page_t m = bp->b_xio.xio_pages[i];
984263bc 1739
984263bc
MD
1740 if (bp->b_flags & B_ERROR) {
1741 /*
1742 * If an error occurs I'd love to throw the swapblk
1743 * away without freeing it back to swapspace, so it
1744 * can never be used again. But I can't from an
1745 * interrupt.
1746 */
1747
8aa92e4b 1748 if (bio->bio_caller_info1.index & SWBIO_READ) {
984263bc
MD
1749 /*
1750 * When reading, reqpage needs to stay
1751 * locked for the parent, but all other
1752 * pages can be freed. We still want to
1753 * wakeup the parent waiting on the page,
1754 * though. ( also: pg_reqpage can be -1 and
1755 * not match anything ).
1756 *
1757 * We have to wake specifically requested pages
1758 * up too because we cleared PG_SWAPINPROG and
1759 * someone may be waiting for that.
1760 *
1761 * NOTE: for reads, m->dirty will probably
1762 * be overridden by the original caller of
1763 * getpages so don't play cute tricks here.
1764 *
93afe6be
MD
1765 * NOTE: We can't actually free the page from
1766 * here, because this is an interrupt. It
1767 * is not legal to mess with object->memq
1768 * from an interrupt. Deactivate the page
1769 * instead.
984263bc
MD
1770 */
1771
1772 m->valid = 0;
1773 vm_page_flag_clear(m, PG_ZERO);
5d5c5831 1774 vm_page_flag_clear(m, PG_SWAPINPROG);
984263bc 1775
81b5c339
MD
1776 /*
1777 * bio_driver_info holds the requested page
1778 * index.
1779 */
973c11b9 1780 if (i != (int)(intptr_t)bio->bio_driver_info) {
93afe6be
MD
1781 vm_page_deactivate(m);
1782 vm_page_wakeup(m);
1783 } else {
984263bc 1784 vm_page_flash(m);
93afe6be 1785 }
984263bc
MD
1786 /*
1787 * If i == bp->b_pager.pg_reqpage, do not wake
1788 * the page up. The caller needs to.
1789 */
1790 } else {
1791 /*
3ffc7051
MD
1792 * If a write error occurs remove the swap
1793 * assignment (note that PG_SWAPPED may or
1794 * may not be set depending on prior activity).
096e95c0 1795 *
3ffc7051
MD
1796 * Re-dirty OBJT_SWAP pages as there is no
1797 * other backing store, we can't throw the
1798 * page away.
1799 *
1800 * Non-OBJT_SWAP pages (aka swapcache) must
1801 * not be dirtied since they may not have
1802 * been dirty in the first place, and they
1803 * do have backing store (the vnode).
984263bc 1804 */
b12defdc 1805 vm_page_busy_wait(m, FALSE, "swadpg");
3ffc7051
MD
1806 swp_pager_meta_ctl(m->object, m->pindex,
1807 SWM_FREE);
1808 vm_page_flag_clear(m, PG_SWAPPED);
096e95c0
MD
1809 if (m->object->type == OBJT_SWAP) {
1810 vm_page_dirty(m);
1811 vm_page_activate(m);
1812 }
3ffc7051 1813 vm_page_flag_clear(m, PG_SWAPINPROG);
984263bc 1814 vm_page_io_finish(m);
b12defdc 1815 vm_page_wakeup(m);
984263bc 1816 }
8aa92e4b 1817 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
984263bc 1818 /*
984263bc
MD
1819 * NOTE: for reads, m->dirty will probably be
1820 * overridden by the original caller of getpages so
1821 * we cannot set them in order to free the underlying
1822 * swap in a low-swap situation. I don't think we'd
1823 * want to do that anyway, but it was an optimization
1824 * that existed in the old swapper for a time before
1825 * it got ripped out due to precisely this problem.
1826 *
1827 * clear PG_ZERO in page.
1828 *
1829 * If not the requested page then deactivate it.
1830 *
1831 * Note that the requested page, reqpage, is left
1832 * busied, but we still have to wake it up. The
1833 * other pages are released (unbusied) by
1834 * vm_page_wakeup(). We do not set reqpage's
1835 * valid bits here, it is up to the caller.
1836 */
1837
4530a3aa
MD
1838 /*
1839 * NOTE: can't call pmap_clear_modify(m) from an
1840 * interrupt thread, the pmap code may have to map
1841 * non-kernel pmaps and currently asserts the case.
1842 */
1843 /*pmap_clear_modify(m);*/
984263bc
MD
1844 m->valid = VM_PAGE_BITS_ALL;
1845 vm_page_undirty(m);
5d5c5831 1846 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG);
67803f3e 1847 vm_page_flag_set(m, PG_SWAPPED);
984263bc
MD
1848
1849 /*
1850 * We have to wake specifically requested pages
1851 * up too because we cleared PG_SWAPINPROG and
1852 * could be waiting for it in getpages. However,
1853 * be sure to not unbusy getpages specifically
1854 * requested page - getpages expects it to be
1855 * left busy.
81b5c339
MD
1856 *
1857 * bio_driver_info holds the requested page
984263bc 1858 */
973c11b9 1859 if (i != (int)(intptr_t)bio->bio_driver_info) {
984263bc
MD
1860 vm_page_deactivate(m);
1861 vm_page_wakeup(m);
1862 } else {
1863 vm_page_flash(m);
1864 }
1865 } else {
1866 /*
93afe6be
MD
1867 * Mark the page clean but do not mess with the
1868 * pmap-layer's modified state. That state should
1869 * also be clear since the caller protected the
1870 * page VM_PROT_READ, but allow the case.
1871 *
1872 * We are in an interrupt, avoid pmap operations.
1873 *
1874 * If we have a severe page deficit, deactivate the
1875 * page. Do not try to cache it (which would also
1876 * involve a pmap op), because the page might still
1877 * be read-heavy.
096e95c0
MD
1878 *
1879 * When using the swap to cache clean vnode pages
1880 * we do not mess with the page dirty bits.
984263bc 1881 */
b12defdc 1882 vm_page_busy_wait(m, FALSE, "swadpg");
096e95c0
MD
1883 if (m->object->type == OBJT_SWAP)
1884 vm_page_undirty(m);
5d5c5831 1885 vm_page_flag_clear(m, PG_SWAPINPROG);
67803f3e 1886 vm_page_flag_set(m, PG_SWAPPED);
93afe6be
MD
1887 if (vm_page_count_severe())
1888 vm_page_deactivate(m);
1889#if 0
984263bc
MD
1890 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1891 vm_page_protect(m, VM_PROT_READ);
93afe6be 1892#endif
a491077e 1893 vm_page_io_finish(m);
b12defdc 1894 vm_page_wakeup(m);
984263bc
MD
1895 }
1896 }
1897
1898 /*
1899 * adjust pip. NOTE: the original parent may still have its own
1900 * pip refs on the object.
1901 */
1902
1903 if (object)
00db03f1 1904 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
984263bc
MD
1905
1906 /*
8aa92e4b
MD
1907 * Release the physical I/O buffer.
1908 *
1909 * NOTE: Due to synchronous operations in the write case b_cmd may
1910 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
1911 * been cleared.
b12defdc
MD
1912 *
1913 * Use vm_token to interlock nsw_rcount/wcount wakeup?
984263bc 1914 */
b12defdc 1915 lwkt_gettoken(&vm_token);
8aa92e4b 1916 if (bio->bio_caller_info1.index & SWBIO_READ)
10f3fee5 1917 nswptr = &nsw_rcount;
8aa92e4b 1918 else if (bio->bio_caller_info1.index & SWBIO_SYNC)
10f3fee5 1919 nswptr = &nsw_wcount_sync;
ae8e83e6
MD
1920 else
1921 nswptr = &nsw_wcount_async;
10f3fee5
MD
1922 bp->b_cmd = BUF_CMD_DONE;
1923 relpbuf(bp, nswptr);
8e7c4729 1924 lwkt_reltoken(&vm_token);
984263bc
MD
1925}
1926
9f3543c6
MD
1927/*
1928 * Fault-in a potentially swapped page and remove the swap reference.
427e1a99 1929 * (used by swapoff code)
b12defdc
MD
1930 *
1931 * object must be held.
9f3543c6
MD
1932 */
1933static __inline void
1934swp_pager_fault_page(vm_object_t object, vm_pindex_t pindex)
1935{
1936 struct vnode *vp;
1937 vm_page_t m;
1938 int error;
1939
b12defdc
MD
1940 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1941
9f3543c6
MD
1942 if (object->type == OBJT_VNODE) {
1943 /*
1944 * Any swap related to a vnode is due to swapcache. We must
1945 * vget() the vnode in case it is not active (otherwise
1946 * vref() will panic). Calling vm_object_page_remove() will
1947 * ensure that any swap ref is removed interlocked with the
1948 * page. clean_only is set to TRUE so we don't throw away
1949 * dirty pages.
1950 */
1951 vp = object->handle;
1952 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
1953 if (error == 0) {
1954 vm_object_page_remove(object, pindex, pindex + 1, TRUE);
1955 vput(vp);
1956 }
1957 } else {
1958 /*
1959 * Otherwise it is a normal OBJT_SWAP object and we can
1960 * fault the page in and remove the swap.
1961 */
1962 m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
1963 VM_PROT_NONE,
1964 VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
ce94514e 1965 0, &error);
9f3543c6
MD
1966 if (m)
1967 vm_page_unhold(m);
1968 }
1969}
1970
427e1a99
MD
1971/*
1972 * This removes all swap blocks related to a particular device. We have
1973 * to be careful of ripups during the scan.
1974 */
1975static int swp_pager_swapoff_callback(struct swblock *swap, void *data);
1976
9f3543c6
MD
1977int
1978swap_pager_swapoff(int devidx)
1979{
427e1a99 1980 struct vm_object marker;
9f3543c6 1981 vm_object_t object;
427e1a99
MD
1982 struct swswapoffinfo info;
1983
1984 bzero(&marker, sizeof(marker));
1985 marker.type = OBJT_MARKER;
9f3543c6 1986
9f3543c6 1987 lwkt_gettoken(&vmobj_token);
427e1a99
MD
1988 TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list);
1989
1990 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) {
1991 if (object->type == OBJT_MARKER)
1992 goto skip;
b12defdc 1993 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE)
427e1a99 1994 goto skip;
b12defdc 1995 vm_object_hold(object);
427e1a99
MD
1996 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE) {
1997 vm_object_drop(object);
1998 goto skip;
9f3543c6 1999 }
427e1a99
MD
2000 info.object = object;
2001 info.devidx = devidx;
2002 swblock_rb_tree_RB_SCAN(&object->swblock_root,
2003 NULL,
2004 swp_pager_swapoff_callback,
2005 &info);
b12defdc 2006 vm_object_drop(object);
427e1a99
MD
2007skip:
2008 if (object == TAILQ_NEXT(&marker, object_list)) {
2009 TAILQ_REMOVE(&vm_object_list, &marker, object_list);
2010 TAILQ_INSERT_AFTER(&vm_object_list, object,
2011 &marker, object_list);
2012 }
9f3543c6 2013 }
427e1a99 2014 TAILQ_REMOVE(&vm_object_list, &marker, object_list);
9f3543c6 2015 lwkt_reltoken(&vmobj_token);
9f3543c6
MD
2016
2017 /*
2018 * If we fail to locate all swblocks we just fail gracefully and
2019 * do not bother to restore paging on the swap device. If the
2020 * user wants to retry the user can retry.
2021 */
2022 if (swdevt[devidx].sw_nused)
2023 return (1);
2024 else
2025 return (0);
2026}
2027
427e1a99
MD
2028static
2029int
2030swp_pager_swapoff_callback(struct swblock *swap, void *data)
2031{
2032 struct swswapoffinfo *info = data;
2033 vm_object_t object = info->object;
2034 vm_pindex_t index;
2035 swblk_t v;
2036 int i;
2037
2038 index = swap->swb_index;
2039 for (i = 0; i < SWAP_META_PAGES; ++i) {
2040 /*
2041 * Make sure we don't race a dying object. This will
2042 * kill the scan of the object's swap blocks entirely.
2043 */
2044 if (object->flags & OBJ_DEAD)
2045 return(-1);
2046
2047 /*
2048 * Fault the page, which can obviously block. If the swap
2049 * structure disappears break out.
2050 */
2051 v = swap->swb_pages[i];
2052 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) {
2053 swp_pager_fault_page(object, swap->swb_index + i);
2054 /* swap ptr might go away */
2055 if (RB_LOOKUP(swblock_rb_tree,
2056 &object->swblock_root, index) != swap) {
2057 break;
2058 }
2059 }
2060 }
2061 return(0);
2062}
2063
984263bc
MD
2064/************************************************************************
2065 * SWAP META DATA *
2066 ************************************************************************
2067 *
2068 * These routines manipulate the swap metadata stored in the
2069 * OBJT_SWAP object. All swp_*() routines must be called at
2070 * splvm() because swap can be freed up by the low level vm_page
2071 * code which might be called from interrupts beyond what splbio() covers.
2072 *
2073 * Swap metadata is implemented with a global hash and not directly
2074 * linked into the object. Instead the object simply contains
2075 * appropriate tracking counters.
2076 */
2077
2078/*
96adc753 2079 * Lookup the swblock containing the specified swap block index.
8e7c4729 2080 *
b12defdc 2081 * The caller must hold the object.
984263bc 2082 */
96adc753
MD
2083static __inline
2084struct swblock *
2085swp_pager_lookup(vm_object_t object, vm_pindex_t index)
984263bc 2086{
b12defdc 2087 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
0301c407 2088 index &= ~(vm_pindex_t)SWAP_META_MASK;
96adc753 2089 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
984263bc
MD
2090}
2091
2092/*
96adc753 2093 * Remove a swblock from the RB tree.
8e7c4729 2094 *
b12defdc 2095 * The caller must hold the object.
984263bc 2096 */
96adc753
MD
2097static __inline
2098void
2099swp_pager_remove(vm_object_t object, struct swblock *swap)
2100{
b12defdc 2101 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
96adc753
MD
2102 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2103}
984263bc 2104
96adc753
MD
2105/*
2106 * Convert default object to swap object if necessary
8e7c4729 2107 *
b12defdc 2108 * The caller must hold the object.
96adc753 2109 */
984263bc 2110static void
96adc753
MD
2111swp_pager_meta_convert(vm_object_t object)
2112{
2113 if (object->type == OBJT_DEFAULT) {
984263bc 2114 object->type = OBJT_SWAP;
96adc753 2115 KKASSERT(object->swblock_count == 0);
984263bc 2116 }
96adc753
MD
2117}
2118
2119/*
2120 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
2121 *
2122 * We first convert the object to a swap object if it is a default
2123 * object. Vnode objects do not need to be converted.
2124 *
2125 * The specified swapblk is added to the object's swap metadata. If
2126 * the swapblk is not valid, it is freed instead. Any previously
2127 * assigned swapblk is freed.
8e7c4729 2128 *
b12defdc 2129 * The caller must hold the object.
96adc753
MD
2130 */
2131static void
651d8e75 2132swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
96adc753
MD
2133{
2134 struct swblock *swap;
2135 struct swblock *oswap;
0301c407 2136 vm_pindex_t v;
96adc753
MD
2137
2138 KKASSERT(swapblk != SWAPBLK_NONE);
b12defdc 2139 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
96adc753
MD
2140
2141 /*
2142 * Convert object if necessary
2143 */
2144 if (object->type == OBJT_DEFAULT)
2145 swp_pager_meta_convert(object);
984263bc
MD
2146
2147 /*
96adc753 2148 * Locate swblock. If not found create, but if we aren't adding
984263bc
MD
2149 * anything just return. If we run out of space in the map we wait
2150 * and, since the hash table may have changed, retry.
2151 */
984263bc 2152retry:
96adc753 2153 swap = swp_pager_lookup(object, index);
984263bc 2154
96adc753 2155 if (swap == NULL) {
984263bc
MD
2156 int i;
2157
96adc753 2158 swap = zalloc(swap_zone);
984263bc 2159 if (swap == NULL) {
4ecf7cc9 2160 vm_wait(0);
984263bc
MD
2161 goto retry;
2162 }
0301c407 2163 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
984263bc
MD
2164 swap->swb_count = 0;
2165
96adc753 2166 ++object->swblock_count;
984263bc
MD
2167
2168 for (i = 0; i < SWAP_META_PAGES; ++i)
2169 swap->swb_pages[i] = SWAPBLK_NONE;
96adc753
MD
2170 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2171 KKASSERT(oswap == NULL);
984263bc
MD
2172 }
2173
2174 /*
0301c407
MD
2175 * Delete prior contents of metadata.
2176 *
2177 * NOTE: Decrement swb_count after the freeing operation (which
2178 * might block) to prevent racing destruction of the swblock.
984263bc 2179 */
984263bc
MD
2180 index &= SWAP_META_MASK;
2181
0301c407
MD
2182 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2183 swap->swb_pages[index] = SWAPBLK_NONE;
2184 /* can block */
2185 swp_pager_freeswapspace(object, v, 1);
984263bc
MD
2186 --swap->swb_count;
2187 }
2188
2189 /*
2190 * Enter block into metadata
2191 */
984263bc
MD
2192 swap->swb_pages[index] = swapblk;
2193 if (swapblk != SWAPBLK_NONE)
2194 ++swap->swb_count;
2195}
2196
2197/*
2198 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2199 *
2200 * The requested range of blocks is freed, with any associated swap
2201 * returned to the swap bitmap.
2202 *
2203 * This routine will free swap metadata structures as they are cleaned
2204 * out. This routine does *NOT* operate on swap metadata associated
2205 * with resident pages.
2206 *
b12defdc 2207 * The caller must hold the object.
984263bc 2208 */
8d292090
MD
2209static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2210
984263bc 2211static void
8d292090 2212swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
984263bc 2213{
8d292090 2214 struct swfreeinfo info;
96adc753 2215
b12defdc
MD
2216 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2217
8d292090
MD
2218 /*
2219 * Nothing to do
2220 */
2221 if (object->swblock_count == 0) {
2222 KKASSERT(RB_EMPTY(&object->swblock_root));
2223 return;
2224 }
2225 if (count == 0)
984263bc
MD
2226 return;
2227
8d292090
MD
2228 /*
2229 * Setup for RB tree scan. Note that the pindex range can be huge
2230 * due to the 64 bit page index space so we cannot safely iterate.
2231 */
2232 info.object = object;
0301c407 2233 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
8d292090
MD
2234 info.begi = index;
2235 info.endi = index + count - 1;
2236 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2237 swp_pager_meta_free_callback, &info);
2238}
984263bc 2239
8e7c4729 2240/*
b12defdc 2241 * The caller must hold the object.
8e7c4729 2242 */
8d292090
MD
2243static
2244int
2245swp_pager_meta_free_callback(struct swblock *swap, void *data)
2246{
2247 struct swfreeinfo *info = data;
2248 vm_object_t object = info->object;
2249 int index;
2250 int eindex;
2251
2252 /*
2253 * Figure out the range within the swblock. The wider scan may
2254 * return edge-case swap blocks when the start and/or end points
2255 * are in the middle of a block.
2256 */
2257 if (swap->swb_index < info->begi)
2258 index = (int)info->begi & SWAP_META_MASK;
2259 else
2260 index = 0;
2261
2262 if (swap->swb_index + SWAP_META_PAGES > info->endi)
2263 eindex = (int)info->endi & SWAP_META_MASK;
2264 else
2265 eindex = SWAP_META_MASK;
2266
2267 /*
2268 * Scan and free the blocks. The loop terminates early
2269 * if (swap) runs out of blocks and could be freed.
0301c407
MD
2270 *
2271 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2272 * to deal with a zfree race.
8d292090
MD
2273 */
2274 while (index <= eindex) {
651d8e75 2275 swblk_t v = swap->swb_pages[index];
8d292090
MD
2276
2277 if (v != SWAPBLK_NONE) {
8d292090 2278 swap->swb_pages[index] = SWAPBLK_NONE;
0301c407
MD
2279 /* can block */
2280 swp_pager_freeswapspace(object, v, 1);
8d292090
MD
2281 if (--swap->swb_count == 0) {
2282 swp_pager_remove(object, swap);
2283 zfree(swap_zone, swap);
2284 --object->swblock_count;
2285 break;
984263bc 2286 }
984263bc 2287 }
8d292090 2288 ++index;
984263bc 2289 }
6235163e 2290
8d292090 2291 /* swap may be invalid here due to zfree above */
6235163e
MD
2292 lwkt_yield();
2293
8d292090 2294 return(0);
984263bc
MD
2295}
2296
2297/*
2298 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2299 *
2300 * This routine locates and destroys all swap metadata associated with
2301 * an object.
2302 *
0301c407
MD
2303 * NOTE: Decrement swb_count after the freeing operation (which
2304 * might block) to prevent racing destruction of the swblock.
2305 *
b12defdc 2306 * The caller must hold the object.
984263bc 2307 */
984263bc
MD
2308static void
2309swp_pager_meta_free_all(vm_object_t object)
2310{
96adc753
MD
2311 struct swblock *swap;
2312 int i;
984263bc 2313
b12defdc
MD
2314 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2315
96adc753
MD
2316 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2317 swp_pager_remove(object, swap);
2318 for (i = 0; i < SWAP_META_PAGES; ++i) {
651d8e75 2319 swblk_t v = swap->swb_pages[i];
96adc753 2320 if (v != SWAPBLK_NONE) {
0301c407 2321 /* can block */
096e95c0 2322 swp_pager_freeswapspace(object, v, 1);
0301c407 2323 --swap->swb_count;
984263bc 2324 }
984263bc 2325 }
96adc753
MD
2326 if (swap->swb_count != 0)
2327 panic("swap_pager_meta_free_all: swb_count != 0");
2328 zfree(swap_zone, swap);
2329 --object->swblock_count;
6235163e 2330 lwkt_yield();
984263bc 2331 }
96adc753 2332 KKASSERT(object->swblock_count == 0);
984263bc
MD
2333}
2334
2335/*
2336 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
2337 *
2338 * This routine is capable of looking up, popping, or freeing
2339 * swapblk assignments in the swap meta data or in the vm_page_t.
2340 * The routine typically returns the swapblk being looked-up, or popped,
2341 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2342 * was invalid. This routine will automatically free any invalid
2343 * meta-data swapblks.
2344 *
2345 * It is not possible to store invalid swapblks in the swap meta data
2346 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2347 *
2348 * When acting on a busy resident page and paging is in progress, we
2349 * have to wait until paging is complete but otherwise can act on the
2350 * busy page.
2351 *
984263bc
MD
2352 * SWM_FREE remove and free swap block from metadata
2353 * SWM_POP remove from meta data but do not free.. pop it out
8e7c4729 2354 *
b12defdc 2355 * The caller must hold the object.
984263bc 2356 */
651d8e75 2357static swblk_t
96adc753
MD
2358swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2359{
984263bc 2360 struct swblock *swap;
651d8e75 2361 swblk_t r1;
984263bc 2362
8d292090 2363 if (object->swblock_count == 0)
984263bc
MD
2364 return(SWAPBLK_NONE);
2365
2366 r1 = SWAPBLK_NONE;
96adc753 2367 swap = swp_pager_lookup(object, index);
984263bc 2368
96adc753 2369 if (swap != NULL) {
984263bc
MD
2370 index &= SWAP_META_MASK;
2371 r1 = swap->swb_pages[index];
2372
2373 if (r1 != SWAPBLK_NONE) {
984263bc
MD
2374 if (flags & (SWM_FREE|SWM_POP)) {
2375 swap->swb_pages[index] = SWAPBLK_NONE;
2376 if (--swap->swb_count == 0) {
96adc753 2377 swp_pager_remove(object, swap);
984263bc 2378 zfree(swap_zone, swap);
96adc753 2379 --object->swblock_count;
984263bc
MD
2380 }
2381 }
0301c407 2382 /* swap ptr may be invalid */
b12defdc
MD
2383 if (flags & SWM_FREE) {
2384 swp_pager_freeswapspace(object, r1, 1);
2385 r1 = SWAPBLK_NONE;
2386 }
984263bc 2387 }
0301c407 2388 /* swap ptr may be invalid */
984263bc
MD
2389 }
2390 return(r1);
2391}