4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Copyright (c) 1994 John S. Dyson
37 * Copyright (c) 1990 University of Utah.
38 * Copyright (c) 1991, 1993
39 * The Regents of the University of California. All rights reserved.
41 * This code is derived from software contributed to Berkeley by
42 * the Systems Programming Group of the University of Utah Computer
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. Neither the name of the University nor the names of its contributors
54 * may be used to endorse or promote products derived from this software
55 * without specific prior written permission.
57 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
58 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
61 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
62 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
63 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
64 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
65 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
66 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * Radix Bitmap 'blists'.
74 * - The new swapper uses the new radix bitmap code. This should scale
75 * to arbitrarily small or arbitrarily large swap spaces and an almost
76 * arbitrary degree of fragmentation.
80 * - on the fly reallocation of swap during putpages. The new system
81 * does not try to keep previously allocated swap blocks for dirty
84 * - on the fly deallocation of swap
86 * - No more garbage collection required. Unnecessarily allocated swap
87 * blocks only exist for dirty vm_page_t's now and these are already
88 * cycled (in a high-load system) by the pager. We also do on-the-fly
89 * removal of invalidated swap blocks when a page is destroyed
92 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
93 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
94 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
98 #include <sys/param.h>
99 #include <sys/systm.h>
100 #include <sys/conf.h>
101 #include <sys/kernel.h>
102 #include <sys/proc.h>
104 #include <sys/vnode.h>
105 #include <sys/malloc.h>
106 #include <sys/vmmeter.h>
107 #include <sys/sysctl.h>
108 #include <sys/blist.h>
109 #include <sys/lock.h>
110 #include <sys/kcollect.h>
113 #include <vm/vm_object.h>
114 #include <vm/vm_page.h>
115 #include <vm/vm_pager.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/swap_pager.h>
118 #include <vm/vm_extern.h>
119 #include <vm/vm_zone.h>
120 #include <vm/vnode_pager.h>
122 #include <sys/buf2.h>
123 #include <vm/vm_page2.h>
125 #ifndef MAX_PAGEOUT_CLUSTER
126 #define MAX_PAGEOUT_CLUSTER SWB_NPAGES
129 #define SWM_FREE 0x02 /* free, period */
130 #define SWM_POP 0x04 /* pop out */
132 #define SWBIO_READ 0x01
133 #define SWBIO_WRITE 0x02
134 #define SWBIO_SYNC 0x04
135 #define SWBIO_TTC 0x08 /* for VM_PAGER_TRY_TO_CACHE */
141 vm_pindex_t endi; /* inclusive */
144 struct swswapoffinfo {
151 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
155 int swap_pager_full; /* swap space exhaustion (task killing) */
156 int swap_fail_ticks; /* when we became exhausted */
157 int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
158 swblk_t vm_swap_cache_use;
159 swblk_t vm_swap_anon_use;
160 static int vm_report_swap_allocs;
162 static struct krate kswaprate = { 1 };
163 static int nsw_rcount; /* free read buffers */
164 static int nsw_wcount_sync; /* limit write buffers / synchronous */
165 static int nsw_wcount_async; /* limit write buffers / asynchronous */
166 static int nsw_wcount_async_max;/* assigned maximum */
167 static int nsw_cluster_max; /* maximum VOP I/O allowed */
169 struct blist *swapblist;
170 static int swap_async_max = 4; /* maximum in-progress async I/O's */
171 static int swap_burst_read = 0; /* allow burst reading */
172 static swblk_t swapiterator; /* linearize allocations */
173 int swap_user_async = 0; /* user swap pager operation can be async */
175 static struct spinlock swapbp_spin = SPINLOCK_INITIALIZER(&swapbp_spin, "swapbp_spin");
178 extern struct vnode *swapdev_vp;
179 extern struct swdevt *swdevt;
182 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / SWB_DMMAX % nswdev : 0)
184 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
185 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
186 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
187 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
188 SYSCTL_INT(_vm, OID_AUTO, swap_user_async,
189 CTLFLAG_RW, &swap_user_async, 0, "Allow async uuser swap write I/O");
192 SYSCTL_LONG(_vm, OID_AUTO, swap_cache_use,
193 CTLFLAG_RD, &vm_swap_cache_use, 0, "");
194 SYSCTL_LONG(_vm, OID_AUTO, swap_anon_use,
195 CTLFLAG_RD, &vm_swap_anon_use, 0, "");
196 SYSCTL_LONG(_vm, OID_AUTO, swap_size,
197 CTLFLAG_RD, &vm_swap_size, 0, "");
199 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
200 CTLFLAG_RD, &vm_swap_cache_use, 0, "");
201 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
202 CTLFLAG_RD, &vm_swap_anon_use, 0, "");
203 SYSCTL_INT(_vm, OID_AUTO, swap_size,
204 CTLFLAG_RD, &vm_swap_size, 0, "");
206 SYSCTL_INT(_vm, OID_AUTO, report_swap_allocs,
207 CTLFLAG_RW, &vm_report_swap_allocs, 0, "");
212 * Red-Black tree for swblock entries
214 * The caller must hold vm_token
216 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
217 vm_pindex_t, swb_index);
220 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
222 if (swb1->swb_index < swb2->swb_index)
224 if (swb1->swb_index > swb2->swb_index)
231 rb_swblock_scancmp(struct swblock *swb, void *data)
233 struct swfreeinfo *info = data;
235 if (swb->swb_index < info->basei)
237 if (swb->swb_index > info->endi)
244 rb_swblock_condcmp(struct swblock *swb, void *data)
246 struct swfreeinfo *info = data;
248 if (swb->swb_index < info->basei)
254 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
255 * calls hooked from other parts of the VM system and do not appear here.
256 * (see vm/swap_pager.h).
259 static void swap_pager_dealloc (vm_object_t object);
260 static int swap_pager_getpage (vm_object_t, vm_page_t *, int);
261 static void swap_chain_iodone(struct bio *biox);
263 struct pagerops swappagerops = {
264 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
265 swap_pager_getpage, /* pagein */
266 swap_pager_putpages, /* pageout */
267 swap_pager_haspage /* get backing store status for page */
271 * SWB_DMMAX is in page-sized chunks with the new swap system. It was
272 * dev-bsized chunks in the old. SWB_DMMAX is always a power of 2.
274 * swap_*() routines are externally accessible. swp_*() routines are
278 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
279 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
281 static __inline void swp_sizecheck (void);
282 static void swp_pager_async_iodone (struct bio *bio);
285 * Swap bitmap functions
288 static __inline void swp_pager_freeswapspace(vm_object_t object,
289 swblk_t blk, int npages);
290 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages);
296 static void swp_pager_meta_convert(vm_object_t);
297 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
298 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
299 static void swp_pager_meta_free_all(vm_object_t);
300 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
303 * SWP_SIZECHECK() - update swap_pager_full indication
305 * update the swap_pager_almost_full indication and warn when we are
306 * about to run out of swap space, using lowat/hiwat hysteresis.
308 * Clear swap_pager_full ( task killing ) indication when lowat is met.
310 * No restrictions on call
311 * This routine may not block.
317 if (vm_swap_size < nswap_lowat) {
318 if (swap_pager_almost_full == 0) {
319 kprintf("swap_pager: out of swap space\n");
320 swap_pager_almost_full = 1;
321 swap_fail_ticks = ticks;
325 if (vm_swap_size > nswap_hiwat)
326 swap_pager_almost_full = 0;
331 * Long-term data collection on 10-second interval. Return the value
332 * for KCOLLECT_SWAPPCT and set the values for SWAPANO and SWAPCCAC.
334 * Return total swap in the scale field. This can change if swap is
335 * regularly added or removed and may cause some historical confusion
336 * in that case, but SWAPPCT will always be historically accurate.
339 #define PTOB(value) ((uint64_t)(value) << PAGE_SHIFT)
342 collect_swap_callback(int n)
344 uint64_t total = vm_swap_max;
345 uint64_t anon = vm_swap_anon_use;
346 uint64_t cache = vm_swap_cache_use;
348 if (total == 0) /* avoid divide by zero */
350 kcollect_setvalue(KCOLLECT_SWAPANO, PTOB(anon));
351 kcollect_setvalue(KCOLLECT_SWAPCAC, PTOB(cache));
352 kcollect_setscale(KCOLLECT_SWAPANO,
353 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, PTOB(total)));
354 kcollect_setscale(KCOLLECT_SWAPCAC,
355 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, PTOB(total)));
356 return (((anon + cache) * 10000 + (total >> 1)) / total);
360 * SWAP_PAGER_INIT() - initialize the swap pager!
362 * Expected to be started from system init. NOTE: This code is run
363 * before much else so be careful what you depend on. Most of the VM
364 * system has yet to be initialized at this point.
366 * Called from the low level boot code only.
369 swap_pager_init(void *arg __unused)
371 kcollect_register(KCOLLECT_SWAPPCT, "swapuse", collect_swap_callback,
372 KCOLLECT_SCALE(KCOLLECT_SWAPPCT_FORMAT, 0));
373 kcollect_register(KCOLLECT_SWAPANO, "swapano", NULL,
374 KCOLLECT_SCALE(KCOLLECT_SWAPANO_FORMAT, 0));
375 kcollect_register(KCOLLECT_SWAPCAC, "swapcac", NULL,
376 KCOLLECT_SCALE(KCOLLECT_SWAPCAC_FORMAT, 0));
378 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL);
381 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
383 * Expected to be started from pageout process once, prior to entering
386 * Called from the low level boot code only.
389 swap_pager_swap_init(void)
394 * Number of in-transit swap bp operations. Don't
395 * exhaust the pbufs completely. Make sure we
396 * initialize workable values (0 will work for hysteresis
397 * but it isn't very efficient).
399 * The nsw_cluster_max is constrained by the number of pages an XIO
400 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
401 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
402 * constrained by the swap device interleave stripe size.
404 * Currently we hardwire nsw_wcount_async to 4. This limit is
405 * designed to prevent other I/O from having high latencies due to
406 * our pageout I/O. The value 4 works well for one or two active swap
407 * devices but is probably a little low if you have more. Even so,
408 * a higher value would probably generate only a limited improvement
409 * with three or four active swap devices since the system does not
410 * typically have to pageout at extreme bandwidths. We will want
411 * at least 2 per swap devices, and 4 is a pretty good value if you
412 * have one NFS swap device due to the command/ack latency over NFS.
413 * So it all works out pretty well.
416 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
418 nsw_rcount = (nswbuf_kva + 1) / 2;
419 nsw_wcount_sync = (nswbuf_kva + 3) / 4;
420 nsw_wcount_async = 4;
421 nsw_wcount_async_max = nsw_wcount_async;
424 * The zone is dynamically allocated so generally size it to
425 * maxswzone (32MB to 256GB of KVM). Set a minimum size based
426 * on physical memory of around 8x (each swblock can hold 16 pages).
428 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
429 * has increased dramatically.
431 n = vmstats.v_page_count / 2;
432 if (maxswzone && n < maxswzone / sizeof(struct swblock))
433 n = maxswzone / sizeof(struct swblock);
439 sizeof(struct swblock),
442 if (swap_zone != NULL)
445 * if the allocation failed, try a zone two thirds the
446 * size of the previous attempt.
451 if (swap_zone == NULL)
452 panic("swap_pager_swap_init: swap_zone == NULL");
454 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
458 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
459 * its metadata structures.
461 * This routine is called from the mmap and fork code to create a new
462 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
463 * and then converting it with swp_pager_meta_convert().
465 * We only support unnamed objects.
470 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
474 KKASSERT(handle == NULL);
475 object = vm_object_allocate_hold(OBJT_DEFAULT,
476 OFF_TO_IDX(offset + PAGE_MASK + size));
477 swp_pager_meta_convert(object);
478 vm_object_drop(object);
484 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
486 * The swap backing for the object is destroyed. The code is
487 * designed such that we can reinstantiate it later, but this
488 * routine is typically called only when the entire object is
489 * about to be destroyed.
491 * The object must be locked or unreferenceable.
492 * No other requirements.
495 swap_pager_dealloc(vm_object_t object)
497 vm_object_hold(object);
498 vm_object_pip_wait(object, "swpdea");
501 * Free all remaining metadata. We only bother to free it from
502 * the swap meta data. We do not attempt to free swapblk's still
503 * associated with vm_page_t's for this object. We do not care
504 * if paging is still in progress on some objects.
506 swp_pager_meta_free_all(object);
507 vm_object_drop(object);
510 /************************************************************************
511 * SWAP PAGER BITMAP ROUTINES *
512 ************************************************************************/
515 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
517 * Allocate swap for the requested number of pages. The starting
518 * swap block number (a page index) is returned or SWAPBLK_NONE
519 * if the allocation failed.
521 * Also has the side effect of advising that somebody made a mistake
522 * when they configured swap and didn't configure enough.
524 * The caller must hold the object.
525 * This routine may not block.
527 static __inline swblk_t
528 swp_pager_getswapspace(vm_object_t object, int npages)
532 lwkt_gettoken(&vm_token);
533 blk = blist_allocat(swapblist, npages, swapiterator);
534 if (blk == SWAPBLK_NONE)
535 blk = blist_allocat(swapblist, npages, 0);
536 if (blk == SWAPBLK_NONE) {
537 if (swap_pager_full != 2) {
538 if (vm_swap_max == 0) {
539 krateprintf(&kswaprate,
540 "Warning: The system would like to "
541 "page to swap but no swap space "
544 krateprintf(&kswaprate,
545 "swap_pager_getswapspace: "
546 "swap full allocating %d pages\n",
550 if (swap_pager_almost_full == 0)
551 swap_fail_ticks = ticks;
552 swap_pager_almost_full = 1;
555 /* swapiterator = blk; disable for now, doesn't work well */
556 swapacctspace(blk, -npages);
557 if (object->type == OBJT_SWAP)
558 vm_swap_anon_use += npages;
560 vm_swap_cache_use += npages;
563 lwkt_reltoken(&vm_token);
568 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
570 * This routine returns the specified swap blocks back to the bitmap.
572 * Note: This routine may not block (it could in the old swap code),
573 * and through the use of the new blist routines it does not block.
575 * This routine may not block.
579 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
581 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
583 lwkt_gettoken(&vm_token);
584 sp->sw_nused -= npages;
585 if (object->type == OBJT_SWAP)
586 vm_swap_anon_use -= npages;
588 vm_swap_cache_use -= npages;
590 if (sp->sw_flags & SW_CLOSING) {
591 lwkt_reltoken(&vm_token);
595 blist_free(swapblist, blk, npages);
596 vm_swap_size += npages;
598 lwkt_reltoken(&vm_token);
602 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
603 * range within an object.
605 * This is a globally accessible routine.
607 * This routine removes swapblk assignments from swap metadata.
609 * The external callers of this routine typically have already destroyed
610 * or renamed vm_page_t's associated with this range in the object so
616 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
618 vm_object_hold(object);
619 swp_pager_meta_free(object, start, size);
620 vm_object_drop(object);
627 swap_pager_freespace_all(vm_object_t object)
629 vm_object_hold(object);
630 swp_pager_meta_free_all(object);
631 vm_object_drop(object);
635 * This function conditionally frees swap cache swap starting at
636 * (*basei) in the object. (count) swap blocks will be nominally freed.
637 * The actual number of blocks freed can be more or less than the
640 * This function nominally returns the number of blocks freed. However,
641 * the actual number of blocks freed may be less then the returned value.
642 * If the function is unable to exhaust the object or if it is able to
643 * free (approximately) the requested number of blocks it returns
646 * If we exhaust the object we will return a value n <= count.
648 * The caller must hold the object.
650 * WARNING! If count == 0 then -1 can be returned as a degenerate case,
651 * callers should always pass a count value > 0.
653 static int swap_pager_condfree_callback(struct swblock *swap, void *data);
656 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
658 struct swfreeinfo info;
662 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
664 info.object = object;
665 info.basei = *basei; /* skip up to this page index */
666 info.begi = count; /* max swap pages to destroy */
667 info.endi = count * 8; /* max swblocks to scan */
669 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
670 swap_pager_condfree_callback, &info);
674 * Take the higher difference swblocks vs pages
676 n = count - (int)info.begi;
677 t = count * 8 - (int)info.endi;
686 * The idea is to free whole meta-block to avoid fragmenting
687 * the swap space or disk I/O. We only do this if NO VM pages
690 * We do not have to deal with clearing PG_SWAPPED in related VM
691 * pages because there are no related VM pages.
693 * The caller must hold the object.
696 swap_pager_condfree_callback(struct swblock *swap, void *data)
698 struct swfreeinfo *info = data;
699 vm_object_t object = info->object;
702 for (i = 0; i < SWAP_META_PAGES; ++i) {
703 if (vm_page_lookup(object, swap->swb_index + i))
706 info->basei = swap->swb_index + SWAP_META_PAGES;
707 if (i == SWAP_META_PAGES) {
708 info->begi -= swap->swb_count;
709 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
712 if ((int)info->begi < 0 || (int)info->endi < 0)
719 * Called by vm_page_alloc() when a new VM page is inserted
720 * into a VM object. Checks whether swap has been assigned to
721 * the page and sets PG_SWAPPED as necessary.
723 * (m) must be busied by caller and remains busied on return.
726 swap_pager_page_inserted(vm_page_t m)
728 if (m->object->swblock_count) {
729 vm_object_hold(m->object);
730 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
731 vm_page_flag_set(m, PG_SWAPPED);
732 vm_object_drop(m->object);
737 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
739 * Assigns swap blocks to the specified range within the object. The
740 * swap blocks are not zerod. Any previous swap assignment is destroyed.
742 * Returns 0 on success, -1 on failure.
744 * The caller is responsible for avoiding races in the specified range.
745 * No other requirements.
748 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
751 swblk_t blk = SWAPBLK_NONE;
752 vm_pindex_t beg = start; /* save start index */
754 vm_object_hold(object);
759 while ((blk = swp_pager_getswapspace(object, n)) ==
764 swp_pager_meta_free(object, beg,
766 vm_object_drop(object);
771 swp_pager_meta_build(object, start, blk);
777 swp_pager_meta_free(object, start, n);
778 vm_object_drop(object);
783 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
784 * and destroy the source.
786 * Copy any valid swapblks from the source to the destination. In
787 * cases where both the source and destination have a valid swapblk,
788 * we keep the destination's.
790 * This routine is allowed to block. It may block allocating metadata
791 * indirectly through swp_pager_meta_build() or if paging is still in
792 * progress on the source.
794 * XXX vm_page_collapse() kinda expects us not to block because we
795 * supposedly do not need to allocate memory, but for the moment we
796 * *may* have to get a little memory from the zone allocator, but
797 * it is taken from the interrupt memory. We should be ok.
799 * The source object contains no vm_page_t's (which is just as well)
800 * The source object is of type OBJT_SWAP.
802 * The source and destination objects must be held by the caller.
805 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
806 vm_pindex_t base_index, int destroysource)
810 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
811 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
814 * transfer source to destination.
816 for (i = 0; i < dstobject->size; ++i) {
820 * Locate (without changing) the swapblk on the destination,
821 * unless it is invalid in which case free it silently, or
822 * if the destination is a resident page, in which case the
823 * source is thrown away.
825 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
827 if (dstaddr == SWAPBLK_NONE) {
829 * Destination has no swapblk and is not resident,
834 srcaddr = swp_pager_meta_ctl(srcobject,
835 base_index + i, SWM_POP);
837 if (srcaddr != SWAPBLK_NONE)
838 swp_pager_meta_build(dstobject, i, srcaddr);
841 * Destination has valid swapblk or it is represented
842 * by a resident page. We destroy the sourceblock.
844 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
849 * Free left over swap blocks in source.
851 * We have to revert the type to OBJT_DEFAULT so we do not accidently
852 * double-remove the object from the swap queues.
856 * Reverting the type is not necessary, the caller is going
857 * to destroy srcobject directly, but I'm doing it here
858 * for consistency since we've removed the object from its
861 swp_pager_meta_free_all(srcobject);
862 if (srcobject->type == OBJT_SWAP)
863 srcobject->type = OBJT_DEFAULT;
868 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
869 * the requested page.
871 * We determine whether good backing store exists for the requested
872 * page and return TRUE if it does, FALSE if it doesn't.
874 * If TRUE, we also try to determine how much valid, contiguous backing
875 * store exists before and after the requested page within a reasonable
876 * distance. We do not try to restrict it to the swap device stripe
877 * (that is handled in getpages/putpages). It probably isn't worth
883 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
888 * do we have good backing store at the requested index ?
890 vm_object_hold(object);
891 blk0 = swp_pager_meta_ctl(object, pindex, 0);
893 if (blk0 == SWAPBLK_NONE) {
894 vm_object_drop(object);
897 vm_object_drop(object);
902 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
904 * This removes any associated swap backing store, whether valid or
905 * not, from the page. This operates on any VM object, not just OBJT_SWAP
908 * This routine is typically called when a page is made dirty, at
909 * which point any associated swap can be freed. MADV_FREE also
910 * calls us in a special-case situation
912 * NOTE!!! If the page is clean and the swap was valid, the caller
913 * should make the page dirty before calling this routine.
914 * This routine does NOT change the m->dirty status of the page.
915 * Also: MADV_FREE depends on it.
917 * The page must be busied.
918 * The caller can hold the object to avoid blocking, else we might block.
919 * No other requirements.
922 swap_pager_unswapped(vm_page_t m)
924 if (m->flags & PG_SWAPPED) {
925 vm_object_hold(m->object);
926 KKASSERT(m->flags & PG_SWAPPED);
927 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
928 vm_page_flag_clear(m, PG_SWAPPED);
929 vm_object_drop(m->object);
934 * SWAP_PAGER_STRATEGY() - read, write, free blocks
936 * This implements a VM OBJECT strategy function using swap backing store.
937 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
938 * types. Only BUF_CMD_{READ,WRITE,FREEBLKS} is supported, any other
939 * requests will return EINVAL.
941 * This is intended to be a cacheless interface (i.e. caching occurs at
942 * higher levels), and is also used as a swap-based SSD cache for vnode
943 * and device objects.
945 * All I/O goes directly to and from the swap device.
947 * We currently attempt to run I/O synchronously or asynchronously as
948 * the caller requests. This isn't perfect because we loose error
949 * sequencing when we run multiple ops in parallel to satisfy a request.
950 * But this is swap, so we let it all hang out.
952 * NOTE: This function supports the KVABIO API wherein bp->b_data might
953 * not be synchronized to the current cpu.
958 swap_pager_strategy(vm_object_t object, struct bio *bio)
960 struct buf *bp = bio->bio_buf;
963 vm_pindex_t biox_blkno = 0;
969 struct bio_track *track;
974 * tracking for swapdev vnode I/Os
976 if (bp->b_cmd == BUF_CMD_READ)
977 track = &swapdev_vp->v_track_read;
979 track = &swapdev_vp->v_track_write;
983 * Only supported commands
985 if (bp->b_cmd != BUF_CMD_FREEBLKS &&
986 bp->b_cmd != BUF_CMD_READ &&
987 bp->b_cmd != BUF_CMD_WRITE) {
988 bp->b_error = EINVAL;
989 bp->b_flags |= B_ERROR | B_INVAL;
995 * bcount must be an integral number of pages.
997 if (bp->b_bcount & PAGE_MASK) {
998 bp->b_error = EINVAL;
999 bp->b_flags |= B_ERROR | B_INVAL;
1001 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
1002 "not page bounded\n",
1003 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
1008 * Clear error indication, initialize page index, count, data pointer.
1011 bp->b_flags &= ~B_ERROR;
1012 bp->b_resid = bp->b_bcount;
1014 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
1015 count = howmany(bp->b_bcount, PAGE_SIZE);
1018 * WARNING! Do not dereference *data without issuing a bkvasync()
1023 * Deal with BUF_CMD_FREEBLKS
1025 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
1027 * FREE PAGE(s) - destroy underlying swap that is no longer
1030 vm_object_hold(object);
1031 swp_pager_meta_free(object, start, count);
1032 vm_object_drop(object);
1039 * We need to be able to create a new cluster of I/O's. We cannot
1040 * use the caller fields of the passed bio so push a new one.
1042 * Because nbio is just a placeholder for the cluster links,
1043 * we can biodone() the original bio instead of nbio to make
1044 * things a bit more efficient.
1046 nbio = push_bio(bio);
1047 nbio->bio_offset = bio->bio_offset;
1048 nbio->bio_caller_info1.cluster_head = NULL;
1049 nbio->bio_caller_info2.cluster_tail = NULL;
1055 * Execute read or write
1057 vm_object_hold(object);
1063 * Obtain block. If block not found and writing, allocate a
1064 * new block and build it into the object.
1066 blk = swp_pager_meta_ctl(object, start, 0);
1067 if ((blk == SWAPBLK_NONE) && bp->b_cmd == BUF_CMD_WRITE) {
1068 blk = swp_pager_getswapspace(object, 1);
1069 if (blk == SWAPBLK_NONE) {
1070 bp->b_error = ENOMEM;
1071 bp->b_flags |= B_ERROR;
1074 swp_pager_meta_build(object, start, blk);
1078 * Do we have to flush our current collection? Yes if:
1080 * - no swap block at this index
1081 * - swap block is not contiguous
1082 * - we cross a physical disk boundry in the
1086 (biox_blkno + btoc(bufx->b_bcount) != blk ||
1087 ((biox_blkno ^ blk) & ~SWB_DMMASK))) {
1090 ++mycpu->gd_cnt.v_swapin;
1091 mycpu->gd_cnt.v_swappgsin +=
1092 btoc(bufx->b_bcount);
1095 ++mycpu->gd_cnt.v_swapout;
1096 mycpu->gd_cnt.v_swappgsout +=
1097 btoc(bufx->b_bcount);
1098 bufx->b_dirtyend = bufx->b_bcount;
1106 * Finished with this buf.
1108 KKASSERT(bufx->b_bcount != 0);
1109 if (bufx->b_cmd != BUF_CMD_READ)
1110 bufx->b_dirtyend = bufx->b_bcount;
1116 * Add new swapblk to biox, instantiating biox if necessary.
1117 * Zero-fill reads are able to take a shortcut.
1119 if (blk == SWAPBLK_NONE) {
1121 * We can only get here if we are reading.
1124 bzero(data, PAGE_SIZE);
1125 bp->b_resid -= PAGE_SIZE;
1128 /* XXX chain count > 4, wait to <= 4 */
1130 bufx = getpbuf(NULL);
1131 bufx->b_flags |= B_KVABIO;
1132 biox = &bufx->b_bio1;
1133 cluster_append(nbio, bufx);
1134 bufx->b_cmd = bp->b_cmd;
1135 biox->bio_done = swap_chain_iodone;
1136 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1137 biox->bio_caller_info1.cluster_parent = nbio;
1140 bufx->b_data = data;
1142 bufx->b_bcount += PAGE_SIZE;
1149 vm_object_drop(object);
1152 * Flush out last buffer
1155 if (bufx->b_cmd == BUF_CMD_READ) {
1156 ++mycpu->gd_cnt.v_swapin;
1157 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1159 ++mycpu->gd_cnt.v_swapout;
1160 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1161 bufx->b_dirtyend = bufx->b_bcount;
1163 KKASSERT(bufx->b_bcount);
1164 if (bufx->b_cmd != BUF_CMD_READ)
1165 bufx->b_dirtyend = bufx->b_bcount;
1166 /* biox, bufx = NULL */
1170 * Now initiate all the I/O. Be careful looping on our chain as
1171 * I/O's may complete while we are still initiating them.
1173 * If the request is a 100% sparse read no bios will be present
1174 * and we just biodone() the buffer.
1176 nbio->bio_caller_info2.cluster_tail = NULL;
1177 bufx = nbio->bio_caller_info1.cluster_head;
1181 biox = &bufx->b_bio1;
1183 bufx = bufx->b_cluster_next;
1184 vn_strategy(swapdev_vp, biox);
1191 * Completion of the cluster will also call biodone_chain(nbio).
1192 * We never call biodone(nbio) so we don't have to worry about
1193 * setting up a bio_done callback. It's handled in the sub-IO.
1204 swap_chain_iodone(struct bio *biox)
1207 struct buf *bufx; /* chained sub-buffer */
1208 struct bio *nbio; /* parent nbio with chain glue */
1209 struct buf *bp; /* original bp associated with nbio */
1212 bufx = biox->bio_buf;
1213 nbio = biox->bio_caller_info1.cluster_parent;
1217 * Update the original buffer
1219 KKASSERT(bp != NULL);
1220 if (bufx->b_flags & B_ERROR) {
1221 atomic_set_int(&bufx->b_flags, B_ERROR);
1222 bp->b_error = bufx->b_error; /* race ok */
1223 } else if (bufx->b_resid != 0) {
1224 atomic_set_int(&bufx->b_flags, B_ERROR);
1225 bp->b_error = EINVAL; /* race ok */
1227 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1231 * Remove us from the chain.
1233 spin_lock(&swapbp_spin);
1234 nextp = &nbio->bio_caller_info1.cluster_head;
1235 while (*nextp != bufx) {
1236 KKASSERT(*nextp != NULL);
1237 nextp = &(*nextp)->b_cluster_next;
1239 *nextp = bufx->b_cluster_next;
1240 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1241 spin_unlock(&swapbp_spin);
1244 * Clean up bufx. If the chain is now empty we finish out
1245 * the parent. Note that we may be racing other completions
1246 * so we must use the chain_empty status from above.
1249 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1250 atomic_set_int(&bp->b_flags, B_ERROR);
1251 bp->b_error = EINVAL;
1253 biodone_chain(nbio);
1255 relpbuf(bufx, NULL);
1259 * SWAP_PAGER_GETPAGES() - bring page in from swap
1261 * The requested page may have to be brought in from swap. Calculate the
1262 * swap block and bring in additional pages if possible. All pages must
1263 * have contiguous swap block assignments and reside in the same object.
1265 * The caller has a single vm_object_pip_add() reference prior to
1266 * calling us and we should return with the same.
1268 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1269 * and any additinal pages unbusied.
1271 * If the caller encounters a PG_RAM page it will pass it to us even though
1272 * it may be valid and dirty. We cannot overwrite the page in this case!
1273 * The case is used to allow us to issue pure read-aheads.
1275 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1276 * the PG_RAM page is validated at the same time as mreq. What we
1277 * really need to do is issue a separate read-ahead pbuf.
1282 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1294 u_int32_t busy_count;
1295 vm_page_t marray[XIO_INTERNAL_PAGES];
1299 vm_object_hold(object);
1300 if (mreq->object != object) {
1301 panic("swap_pager_getpages: object mismatch %p/%p",
1308 * We don't want to overwrite a fully valid page as it might be
1309 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1310 * valid page with PG_RAM set.
1312 * In this case we see if the next page is a suitable page-in
1313 * candidate and if it is we issue read-ahead. PG_RAM will be
1314 * set on the last page of the read-ahead to continue the pipeline.
1316 if (mreq->valid == VM_PAGE_BITS_ALL) {
1317 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1318 vm_object_drop(object);
1319 return(VM_PAGER_OK);
1321 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1322 if (blk == SWAPBLK_NONE) {
1323 vm_object_drop(object);
1324 return(VM_PAGER_OK);
1326 m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1329 vm_object_drop(object);
1330 return(VM_PAGER_OK);
1331 } else if (m == NULL) {
1333 * Use VM_ALLOC_QUICK to avoid blocking on cache
1336 m = vm_page_alloc(object, mreq->pindex + 1,
1339 vm_object_drop(object);
1340 return(VM_PAGER_OK);
1345 vm_object_drop(object);
1346 return(VM_PAGER_OK);
1348 vm_page_unqueue_nowakeup(m);
1358 * Try to block-read contiguous pages from swap if sequential,
1359 * otherwise just read one page. Contiguous pages from swap must
1360 * reside within a single device stripe because the I/O cannot be
1361 * broken up across multiple stripes.
1363 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1364 * set up such that the case(s) are handled implicitly.
1366 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1369 for (i = 1; i <= swap_burst_read &&
1370 i < XIO_INTERNAL_PAGES &&
1371 mreq->pindex + i < object->size; ++i) {
1374 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1375 if (iblk != blk + i)
1377 if ((blk ^ iblk) & ~SWB_DMMASK)
1379 m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1383 } else if (m == NULL) {
1385 * Use VM_ALLOC_QUICK to avoid blocking on cache
1388 m = vm_page_alloc(object, mreq->pindex + i,
1397 vm_page_unqueue_nowakeup(m);
1403 vm_page_flag_set(marray[i - 1], PG_RAM);
1406 * If mreq is the requested page and we have nothing to do return
1407 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1408 * page and must be cleaned up.
1410 if (blk == SWAPBLK_NONE) {
1413 vnode_pager_freepage(mreq);
1414 vm_object_drop(object);
1415 return(VM_PAGER_OK);
1417 vm_object_drop(object);
1418 return(VM_PAGER_FAIL);
1423 * Map our page(s) into kva for input
1425 * Use the KVABIO API to avoid synchronizing the pmap.
1427 bp = getpbuf_kva(&nsw_rcount);
1429 kva = (vm_offset_t) bp->b_kvabase;
1430 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1431 pmap_qenter_noinval(kva, bp->b_xio.xio_pages, i);
1433 bp->b_data = (caddr_t)kva;
1434 bp->b_bcount = PAGE_SIZE * i;
1435 bp->b_xio.xio_npages = i;
1436 bp->b_flags |= B_KVABIO;
1437 bio->bio_done = swp_pager_async_iodone;
1438 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1439 bio->bio_caller_info1.index = SWBIO_READ;
1442 * Set index. If raonly set the index beyond the array so all
1443 * the pages are treated the same, otherwise the original mreq is
1447 bio->bio_driver_info = (void *)(intptr_t)i;
1449 bio->bio_driver_info = (void *)(intptr_t)0;
1451 for (j = 0; j < i; ++j) {
1452 atomic_set_int(&bp->b_xio.xio_pages[j]->busy_count,
1456 mycpu->gd_cnt.v_swapin++;
1457 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1460 * We still hold the lock on mreq, and our automatic completion routine
1461 * does not remove it.
1463 vm_object_pip_add(object, bp->b_xio.xio_npages);
1466 * perform the I/O. NOTE!!! bp cannot be considered valid after
1467 * this point because we automatically release it on completion.
1468 * Instead, we look at the one page we are interested in which we
1469 * still hold a lock on even through the I/O completion.
1471 * The other pages in our m[] array are also released on completion,
1472 * so we cannot assume they are valid anymore either.
1474 bp->b_cmd = BUF_CMD_READ;
1476 vn_strategy(swapdev_vp, bio);
1479 * Wait for the page we want to complete. PBUSY_SWAPINPROG is always
1480 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1481 * is set in the meta-data.
1483 * If this is a read-ahead only we return immediately without
1487 vm_object_drop(object);
1488 return(VM_PAGER_OK);
1492 * Read-ahead includes originally requested page case.
1495 busy_count = mreq->busy_count;
1497 if ((busy_count & PBUSY_SWAPINPROG) == 0)
1499 tsleep_interlock(mreq, 0);
1500 if (!atomic_cmpset_int(&mreq->busy_count, busy_count,
1502 PBUSY_SWAPINPROG | PBUSY_WANTED)) {
1505 atomic_set_int(&mreq->flags, PG_REFERENCED);
1506 mycpu->gd_cnt.v_intrans++;
1507 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
1509 "swap_pager: indefinite wait buffer: "
1510 " bp %p offset: %lld, size: %ld\n",
1512 (long long)bio->bio_offset,
1519 * Disallow speculative reads prior to the SWAPINPROG test.
1524 * mreq is left busied after completion, but all the other pages
1525 * are freed. If we had an unrecoverable read error the page will
1528 vm_object_drop(object);
1529 if (mreq->valid != VM_PAGE_BITS_ALL)
1530 return(VM_PAGER_ERROR);
1532 return(VM_PAGER_OK);
1535 * A final note: in a low swap situation, we cannot deallocate swap
1536 * and mark a page dirty here because the caller is likely to mark
1537 * the page clean when we return, causing the page to possibly revert
1538 * to all-zero's later.
1543 * swap_pager_putpages:
1545 * Assign swap (if necessary) and initiate I/O on the specified pages.
1547 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1548 * are automatically converted to SWAP objects.
1550 * In a low memory situation we may block in vn_strategy(), but the new
1551 * vm_page reservation system coupled with properly written VFS devices
1552 * should ensure that no low-memory deadlock occurs. This is an area
1555 * The parent has N vm_object_pip_add() references prior to
1556 * calling us and will remove references for rtvals[] that are
1557 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1560 * The parent has soft-busy'd the pages it passes us and will unbusy
1561 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1562 * We need to unbusy the rest on I/O completion.
1567 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1568 int flags, int *rtvals)
1573 vm_object_hold(object);
1575 if (count && m[0]->object != object) {
1576 panic("swap_pager_getpages: object mismatch %p/%p",
1585 * Turn object into OBJT_SWAP
1586 * Check for bogus sysops
1588 * Force sync if not pageout process, we don't want any single
1589 * non-pageout process to be able to hog the I/O subsystem! This
1590 * can be overridden by setting.
1592 if (object->type == OBJT_DEFAULT) {
1593 if (object->type == OBJT_DEFAULT)
1594 swp_pager_meta_convert(object);
1598 * Normally we force synchronous swap I/O if this is not the
1599 * pageout daemon to prevent any single user process limited
1600 * via RLIMIT_RSS from hogging swap write bandwidth.
1602 if (curthread != pagethread &&
1603 curthread != emergpager &&
1604 swap_user_async == 0) {
1605 flags |= VM_PAGER_PUT_SYNC;
1611 * Update nsw parameters from swap_async_max sysctl values.
1612 * Do not let the sysop crash the machine with bogus numbers.
1614 if (swap_async_max != nsw_wcount_async_max) {
1620 if ((n = swap_async_max) > nswbuf_kva / 2)
1627 * Adjust difference ( if possible ). If the current async
1628 * count is too low, we may not be able to make the adjustment
1631 * vm_token needed for nsw_wcount sleep interlock
1633 lwkt_gettoken(&vm_token);
1634 n -= nsw_wcount_async_max;
1635 if (nsw_wcount_async + n >= 0) {
1636 nsw_wcount_async_max += n;
1637 pbuf_adjcount(&nsw_wcount_async, n);
1639 lwkt_reltoken(&vm_token);
1645 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1646 * The page is left dirty until the pageout operation completes
1650 for (i = 0; i < count; i += n) {
1657 * Maximum I/O size is limited by a number of factors.
1660 n = min(BLIST_MAX_ALLOC, count - i);
1661 n = min(n, nsw_cluster_max);
1663 lwkt_gettoken(&vm_token);
1666 * Get biggest block of swap we can. If we fail, fall
1667 * back and try to allocate a smaller block. Don't go
1668 * overboard trying to allocate space if it would overly
1672 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
1677 if (blk == SWAPBLK_NONE) {
1678 for (j = 0; j < n; ++j)
1679 rtvals[i+j] = VM_PAGER_FAIL;
1680 lwkt_reltoken(&vm_token);
1683 if (vm_report_swap_allocs > 0) {
1684 kprintf("swap_alloc %08jx,%d\n", (intmax_t)blk, n);
1685 --vm_report_swap_allocs;
1689 * The I/O we are constructing cannot cross a physical
1690 * disk boundry in the swap stripe.
1692 if ((blk ^ (blk + n)) & ~SWB_DMMASK) {
1693 j = ((blk + SWB_DMMAX) & ~SWB_DMMASK) - blk;
1694 swp_pager_freeswapspace(object, blk + j, n - j);
1699 * All I/O parameters have been satisfied, build the I/O
1700 * request and assign the swap space.
1702 * Use the KVABIO API to avoid synchronizing the pmap.
1704 if ((flags & VM_PAGER_PUT_SYNC))
1705 bp = getpbuf_kva(&nsw_wcount_sync);
1707 bp = getpbuf_kva(&nsw_wcount_async);
1710 lwkt_reltoken(&vm_token);
1712 pmap_qenter_noinval((vm_offset_t)bp->b_data, &m[i], n);
1714 bp->b_flags |= B_KVABIO;
1715 bp->b_bcount = PAGE_SIZE * n;
1716 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1718 for (j = 0; j < n; ++j) {
1719 vm_page_t mreq = m[i+j];
1721 swp_pager_meta_build(mreq->object, mreq->pindex,
1723 if (object->type == OBJT_SWAP)
1724 vm_page_dirty(mreq);
1725 rtvals[i+j] = VM_PAGER_OK;
1727 atomic_set_int(&mreq->busy_count, PBUSY_SWAPINPROG);
1728 bp->b_xio.xio_pages[j] = mreq;
1730 bp->b_xio.xio_npages = n;
1732 mycpu->gd_cnt.v_swapout++;
1733 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1735 bp->b_dirtyoff = 0; /* req'd for NFS */
1736 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1737 bp->b_cmd = BUF_CMD_WRITE;
1738 bio->bio_caller_info1.index = SWBIO_WRITE;
1743 if ((flags & VM_PAGER_PUT_SYNC) == 0) {
1744 bio->bio_done = swp_pager_async_iodone;
1746 vn_strategy(swapdev_vp, bio);
1748 for (j = 0; j < n; ++j)
1749 rtvals[i+j] = VM_PAGER_PEND;
1754 * Issue synchrnously.
1756 * Wait for the sync I/O to complete, then update rtvals.
1757 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1758 * our async completion routine at the end, thus avoiding a
1761 bio->bio_caller_info1.index |= SWBIO_SYNC;
1762 if (flags & VM_PAGER_TRY_TO_CACHE)
1763 bio->bio_caller_info1.index |= SWBIO_TTC;
1764 bio->bio_done = biodone_sync;
1765 bio->bio_flags |= BIO_SYNC;
1766 vn_strategy(swapdev_vp, bio);
1767 biowait(bio, "swwrt");
1769 for (j = 0; j < n; ++j)
1770 rtvals[i+j] = VM_PAGER_PEND;
1773 * Now that we are through with the bp, we can call the
1774 * normal async completion, which frees everything up.
1776 swp_pager_async_iodone(bio);
1778 vm_object_drop(object);
1784 * Recalculate the low and high-water marks.
1787 swap_pager_newswap(void)
1790 * NOTE: vm_swap_max cannot exceed 1 billion blocks, which is the
1791 * limitation imposed by the blist code. Remember that this
1792 * will be divided by NSWAP_MAX (4), so each swap device is
1793 * limited to around a terrabyte.
1796 nswap_lowat = (int64_t)vm_swap_max * 4 / 100; /* 4% left */
1797 nswap_hiwat = (int64_t)vm_swap_max * 6 / 100; /* 6% left */
1798 kprintf("swap low/high-water marks set to %d/%d\n",
1799 nswap_lowat, nswap_hiwat);
1808 * swp_pager_async_iodone:
1810 * Completion routine for asynchronous reads and writes from/to swap.
1811 * Also called manually by synchronous code to finish up a bp.
1813 * For READ operations, the pages are BUSY'd. For WRITE operations,
1814 * the pages are vm_page_t->busy'd. For READ operations, we BUSY
1815 * unbusy all pages except the 'main' request page. For WRITE
1816 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1817 * because we marked them all VM_PAGER_PEND on return from putpages ).
1819 * This routine may not block.
1824 swp_pager_async_iodone(struct bio *bio)
1826 struct buf *bp = bio->bio_buf;
1827 vm_object_t object = NULL;
1834 if (bp->b_flags & B_ERROR) {
1836 "swap_pager: I/O error - %s failed; offset %lld,"
1837 "size %ld, error %d\n",
1838 ((bio->bio_caller_info1.index & SWBIO_READ) ?
1839 "pagein" : "pageout"),
1840 (long long)bio->bio_offset,
1849 if (bp->b_xio.xio_npages)
1850 object = bp->b_xio.xio_pages[0]->object;
1853 /* PMAP TESTING CODE (useful, keep it in but #if 0'd) */
1854 if (bio->bio_caller_info1.index & SWBIO_WRITE) {
1855 if (bio->bio_crc != iscsi_crc32(bp->b_data, bp->b_bcount)) {
1856 kprintf("SWAPOUT: BADCRC %08x %08x\n",
1858 iscsi_crc32(bp->b_data, bp->b_bcount));
1859 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1860 vm_page_t m = bp->b_xio.xio_pages[i];
1861 if (m->flags & PG_WRITEABLE)
1863 "%d/%d %p writable\n",
1864 i, bp->b_xio.xio_npages, m);
1871 * remove the mapping for kernel virtual
1873 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1876 * cleanup pages. If an error occurs writing to swap, we are in
1877 * very serious trouble. If it happens to be a disk error, though,
1878 * we may be able to recover by reassigning the swap later on. So
1879 * in this case we remove the m->swapblk assignment for the page
1880 * but do not free it in the rlist. The errornous block(s) are thus
1881 * never reallocated as swap. Redirty the page and continue.
1883 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1884 vm_page_t m = bp->b_xio.xio_pages[i];
1886 if (bp->b_flags & B_ERROR) {
1888 * If an error occurs I'd love to throw the swapblk
1889 * away without freeing it back to swapspace, so it
1890 * can never be used again. But I can't from an
1894 if (bio->bio_caller_info1.index & SWBIO_READ) {
1896 * When reading, reqpage needs to stay
1897 * locked for the parent, but all other
1898 * pages can be freed. We still want to
1899 * wakeup the parent waiting on the page,
1900 * though. ( also: pg_reqpage can be -1 and
1901 * not match anything ).
1903 * We have to wake specifically requested pages
1904 * up too because we cleared SWAPINPROG and
1905 * someone may be waiting for that.
1907 * NOTE: For reads, m->dirty will probably
1908 * be overridden by the original caller
1909 * of getpages so don't play cute tricks
1912 * NOTE: We can't actually free the page from
1913 * here, because this is an interrupt.
1914 * It is not legal to mess with
1915 * object->memq from an interrupt.
1916 * Deactivate the page instead.
1918 * WARNING! The instant SWAPINPROG is
1919 * cleared another cpu may start
1920 * using the mreq page (it will
1921 * check m->valid immediately).
1925 atomic_clear_int(&m->busy_count,
1929 * bio_driver_info holds the requested page
1932 if (i != (int)(intptr_t)bio->bio_driver_info) {
1933 vm_page_deactivate(m);
1939 * If i == bp->b_pager.pg_reqpage, do not wake
1940 * the page up. The caller needs to.
1944 * If a write error occurs remove the swap
1945 * assignment (note that PG_SWAPPED may or
1946 * may not be set depending on prior activity).
1948 * Re-dirty OBJT_SWAP pages as there is no
1949 * other backing store, we can't throw the
1952 * Non-OBJT_SWAP pages (aka swapcache) must
1953 * not be dirtied since they may not have
1954 * been dirty in the first place, and they
1955 * do have backing store (the vnode).
1957 vm_page_busy_wait(m, FALSE, "swadpg");
1958 vm_object_hold(m->object);
1959 swp_pager_meta_ctl(m->object, m->pindex,
1961 vm_page_flag_clear(m, PG_SWAPPED);
1962 vm_object_drop(m->object);
1963 if (m->object->type == OBJT_SWAP) {
1965 vm_page_activate(m);
1967 vm_page_io_finish(m);
1968 atomic_clear_int(&m->busy_count,
1972 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
1974 * NOTE: for reads, m->dirty will probably be
1975 * overridden by the original caller of getpages so
1976 * we cannot set them in order to free the underlying
1977 * swap in a low-swap situation. I don't think we'd
1978 * want to do that anyway, but it was an optimization
1979 * that existed in the old swapper for a time before
1980 * it got ripped out due to precisely this problem.
1982 * If not the requested page then deactivate it.
1984 * Note that the requested page, reqpage, is left
1985 * busied, but we still have to wake it up. The
1986 * other pages are released (unbusied) by
1987 * vm_page_wakeup(). We do not set reqpage's
1988 * valid bits here, it is up to the caller.
1992 * NOTE: Can't call pmap_clear_modify(m) from an
1993 * interrupt thread, the pmap code may have to
1994 * map non-kernel pmaps and currently asserts
1997 * WARNING! The instant SWAPINPROG is
1998 * cleared another cpu may start
1999 * using the mreq page (it will
2000 * check m->valid immediately).
2002 /*pmap_clear_modify(m);*/
2003 m->valid = VM_PAGE_BITS_ALL;
2005 vm_page_flag_set(m, PG_SWAPPED);
2006 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG);
2009 * We have to wake specifically requested pages
2010 * up too because we cleared SWAPINPROG and
2011 * could be waiting for it in getpages. However,
2012 * be sure to not unbusy getpages specifically
2013 * requested page - getpages expects it to be
2016 * bio_driver_info holds the requested page
2018 if (i != (int)(intptr_t)bio->bio_driver_info) {
2019 vm_page_deactivate(m);
2026 * Mark the page clean but do not mess with the
2027 * pmap-layer's modified state. That state should
2028 * also be clear since the caller protected the
2029 * page VM_PROT_READ, but allow the case.
2031 * We are in an interrupt, avoid pmap operations.
2033 * If we have a severe page deficit, deactivate the
2034 * page. Do not try to cache it (which would also
2035 * involve a pmap op), because the page might still
2038 * When using the swap to cache clean vnode pages
2039 * we do not mess with the page dirty bits.
2041 * NOTE! Nobody is waiting for the key mreq page
2042 * on write completion.
2044 vm_page_busy_wait(m, FALSE, "swadpg");
2045 if (m->object->type == OBJT_SWAP)
2047 vm_page_flag_set(m, PG_SWAPPED);
2048 atomic_clear_int(&m->busy_count, PBUSY_SWAPINPROG);
2049 if (vm_page_count_severe())
2050 vm_page_deactivate(m);
2051 vm_page_io_finish(m);
2052 if (bio->bio_caller_info1.index & SWBIO_TTC)
2053 vm_page_try_to_cache(m);
2060 * adjust pip. NOTE: the original parent may still have its own
2061 * pip refs on the object.
2065 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
2068 * Release the physical I/O buffer.
2070 * NOTE: Due to synchronous operations in the write case b_cmd may
2071 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
2074 * Use vm_token to interlock nsw_rcount/wcount wakeup?
2076 lwkt_gettoken(&vm_token);
2077 if (bio->bio_caller_info1.index & SWBIO_READ)
2078 nswptr = &nsw_rcount;
2079 else if (bio->bio_caller_info1.index & SWBIO_SYNC)
2080 nswptr = &nsw_wcount_sync;
2082 nswptr = &nsw_wcount_async;
2083 bp->b_cmd = BUF_CMD_DONE;
2084 relpbuf(bp, nswptr);
2085 lwkt_reltoken(&vm_token);
2089 * Fault-in a potentially swapped page and remove the swap reference.
2090 * (used by swapoff code)
2092 * object must be held.
2094 static __inline void
2095 swp_pager_fault_page(vm_object_t object, int *sharedp, vm_pindex_t pindex)
2101 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2103 if (object->type == OBJT_VNODE) {
2105 * Any swap related to a vnode is due to swapcache. We must
2106 * vget() the vnode in case it is not active (otherwise
2107 * vref() will panic). Calling vm_object_page_remove() will
2108 * ensure that any swap ref is removed interlocked with the
2109 * page. clean_only is set to TRUE so we don't throw away
2112 vp = object->handle;
2113 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
2115 vm_object_page_remove(object, pindex, pindex + 1, TRUE);
2120 * Otherwise it is a normal OBJT_SWAP object and we can
2121 * fault the page in and remove the swap.
2123 m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
2125 VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
2133 * This removes all swap blocks related to a particular device. We have
2134 * to be careful of ripups during the scan.
2136 static int swp_pager_swapoff_callback(struct swblock *swap, void *data);
2139 swap_pager_swapoff(int devidx)
2141 struct vm_object_hash *hash;
2142 struct swswapoffinfo info;
2143 struct vm_object marker;
2147 bzero(&marker, sizeof(marker));
2148 marker.type = OBJT_MARKER;
2150 for (n = 0; n < VMOBJ_HSIZE; ++n) {
2151 hash = &vm_object_hash[n];
2153 lwkt_gettoken(&hash->token);
2154 TAILQ_INSERT_HEAD(&hash->list, &marker, object_list);
2156 while ((object = TAILQ_NEXT(&marker, object_list)) != NULL) {
2157 if (object->type == OBJT_MARKER)
2159 if (object->type != OBJT_SWAP &&
2160 object->type != OBJT_VNODE)
2162 vm_object_hold(object);
2163 if (object->type != OBJT_SWAP &&
2164 object->type != OBJT_VNODE) {
2165 vm_object_drop(object);
2170 * Object is special in that we can't just pagein
2171 * into vm_page's in it (tmpfs, vn).
2173 if ((object->flags & OBJ_NOPAGEIN) &&
2174 RB_ROOT(&object->swblock_root)) {
2175 vm_object_drop(object);
2179 info.object = object;
2181 info.devidx = devidx;
2182 swblock_rb_tree_RB_SCAN(&object->swblock_root,
2183 NULL, swp_pager_swapoff_callback,
2185 vm_object_drop(object);
2187 if (object == TAILQ_NEXT(&marker, object_list)) {
2188 TAILQ_REMOVE(&hash->list, &marker, object_list);
2189 TAILQ_INSERT_AFTER(&hash->list, object,
2190 &marker, object_list);
2193 TAILQ_REMOVE(&hash->list, &marker, object_list);
2194 lwkt_reltoken(&hash->token);
2198 * If we fail to locate all swblocks we just fail gracefully and
2199 * do not bother to restore paging on the swap device. If the
2200 * user wants to retry the user can retry.
2202 if (swdevt[devidx].sw_nused)
2210 swp_pager_swapoff_callback(struct swblock *swap, void *data)
2212 struct swswapoffinfo *info = data;
2213 vm_object_t object = info->object;
2218 index = swap->swb_index;
2219 for (i = 0; i < SWAP_META_PAGES; ++i) {
2221 * Make sure we don't race a dying object. This will
2222 * kill the scan of the object's swap blocks entirely.
2224 if (object->flags & OBJ_DEAD)
2228 * Fault the page, which can obviously block. If the swap
2229 * structure disappears break out.
2231 v = swap->swb_pages[i];
2232 if (v != SWAPBLK_NONE && BLK2DEVIDX(v) == info->devidx) {
2233 swp_pager_fault_page(object, &info->shared,
2234 swap->swb_index + i);
2235 /* swap ptr might go away */
2236 if (RB_LOOKUP(swblock_rb_tree,
2237 &object->swblock_root, index) != swap) {
2245 /************************************************************************
2247 ************************************************************************
2249 * These routines manipulate the swap metadata stored in the
2252 * Swap metadata is implemented with a global hash and not directly
2253 * linked into the object. Instead the object simply contains
2254 * appropriate tracking counters.
2258 * Lookup the swblock containing the specified swap block index.
2260 * The caller must hold the object.
2264 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
2266 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2267 index &= ~(vm_pindex_t)SWAP_META_MASK;
2268 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
2272 * Remove a swblock from the RB tree.
2274 * The caller must hold the object.
2278 swp_pager_remove(vm_object_t object, struct swblock *swap)
2280 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2281 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2285 * Convert default object to swap object if necessary
2287 * The caller must hold the object.
2290 swp_pager_meta_convert(vm_object_t object)
2292 if (object->type == OBJT_DEFAULT) {
2293 object->type = OBJT_SWAP;
2294 KKASSERT(object->swblock_count == 0);
2299 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
2301 * We first convert the object to a swap object if it is a default
2302 * object. Vnode objects do not need to be converted.
2304 * The specified swapblk is added to the object's swap metadata. If
2305 * the swapblk is not valid, it is freed instead. Any previously
2306 * assigned swapblk is freed.
2308 * The caller must hold the object.
2311 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
2313 struct swblock *swap;
2314 struct swblock *oswap;
2317 KKASSERT(swapblk != SWAPBLK_NONE);
2318 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2321 * Convert object if necessary
2323 if (object->type == OBJT_DEFAULT)
2324 swp_pager_meta_convert(object);
2327 * Locate swblock. If not found create, but if we aren't adding
2328 * anything just return. If we run out of space in the map we wait
2329 * and, since the hash table may have changed, retry.
2332 swap = swp_pager_lookup(object, index);
2337 swap = zalloc(swap_zone);
2342 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
2343 swap->swb_count = 0;
2345 ++object->swblock_count;
2347 for (i = 0; i < SWAP_META_PAGES; ++i)
2348 swap->swb_pages[i] = SWAPBLK_NONE;
2349 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2350 KKASSERT(oswap == NULL);
2354 * Delete prior contents of metadata.
2356 * NOTE: Decrement swb_count after the freeing operation (which
2357 * might block) to prevent racing destruction of the swblock.
2359 index &= SWAP_META_MASK;
2361 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2362 swap->swb_pages[index] = SWAPBLK_NONE;
2364 swp_pager_freeswapspace(object, v, 1);
2366 --mycpu->gd_vmtotal.t_vm;
2370 * Enter block into metadata
2372 swap->swb_pages[index] = swapblk;
2373 if (swapblk != SWAPBLK_NONE) {
2375 ++mycpu->gd_vmtotal.t_vm;
2380 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2382 * The requested range of blocks is freed, with any associated swap
2383 * returned to the swap bitmap.
2385 * This routine will free swap metadata structures as they are cleaned
2386 * out. This routine does *NOT* operate on swap metadata associated
2387 * with resident pages.
2389 * The caller must hold the object.
2391 static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2394 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
2396 struct swfreeinfo info;
2398 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2403 if (object->swblock_count == 0) {
2404 KKASSERT(RB_EMPTY(&object->swblock_root));
2411 * Setup for RB tree scan. Note that the pindex range can be huge
2412 * due to the 64 bit page index space so we cannot safely iterate.
2414 info.object = object;
2415 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
2417 info.endi = index + count - 1;
2418 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2419 swp_pager_meta_free_callback, &info);
2423 * The caller must hold the object.
2427 swp_pager_meta_free_callback(struct swblock *swap, void *data)
2429 struct swfreeinfo *info = data;
2430 vm_object_t object = info->object;
2435 * Figure out the range within the swblock. The wider scan may
2436 * return edge-case swap blocks when the start and/or end points
2437 * are in the middle of a block.
2439 if (swap->swb_index < info->begi)
2440 index = (int)info->begi & SWAP_META_MASK;
2444 if (swap->swb_index + SWAP_META_PAGES > info->endi)
2445 eindex = (int)info->endi & SWAP_META_MASK;
2447 eindex = SWAP_META_MASK;
2450 * Scan and free the blocks. The loop terminates early
2451 * if (swap) runs out of blocks and could be freed.
2453 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2454 * to deal with a zfree race.
2456 while (index <= eindex) {
2457 swblk_t v = swap->swb_pages[index];
2459 if (v != SWAPBLK_NONE) {
2460 swap->swb_pages[index] = SWAPBLK_NONE;
2462 swp_pager_freeswapspace(object, v, 1);
2463 --mycpu->gd_vmtotal.t_vm;
2464 if (--swap->swb_count == 0) {
2465 swp_pager_remove(object, swap);
2466 zfree(swap_zone, swap);
2467 --object->swblock_count;
2474 /* swap may be invalid here due to zfree above */
2481 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2483 * This routine locates and destroys all swap metadata associated with
2486 * NOTE: Decrement swb_count after the freeing operation (which
2487 * might block) to prevent racing destruction of the swblock.
2489 * The caller must hold the object.
2492 swp_pager_meta_free_all(vm_object_t object)
2494 struct swblock *swap;
2497 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2499 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2500 swp_pager_remove(object, swap);
2501 for (i = 0; i < SWAP_META_PAGES; ++i) {
2502 swblk_t v = swap->swb_pages[i];
2503 if (v != SWAPBLK_NONE) {
2505 swp_pager_freeswapspace(object, v, 1);
2507 --mycpu->gd_vmtotal.t_vm;
2510 if (swap->swb_count != 0)
2511 panic("swap_pager_meta_free_all: swb_count != 0");
2512 zfree(swap_zone, swap);
2513 --object->swblock_count;
2516 KKASSERT(object->swblock_count == 0);
2520 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
2522 * This routine is capable of looking up, popping, or freeing
2523 * swapblk assignments in the swap meta data or in the vm_page_t.
2524 * The routine typically returns the swapblk being looked-up, or popped,
2525 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2526 * was invalid. This routine will automatically free any invalid
2527 * meta-data swapblks.
2529 * It is not possible to store invalid swapblks in the swap meta data
2530 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2532 * When acting on a busy resident page and paging is in progress, we
2533 * have to wait until paging is complete but otherwise can act on the
2536 * SWM_FREE remove and free swap block from metadata
2537 * SWM_POP remove from meta data but do not free.. pop it out
2539 * The caller must hold the object.
2542 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2544 struct swblock *swap;
2547 if (object->swblock_count == 0)
2548 return(SWAPBLK_NONE);
2551 swap = swp_pager_lookup(object, index);
2554 index &= SWAP_META_MASK;
2555 r1 = swap->swb_pages[index];
2557 if (r1 != SWAPBLK_NONE) {
2558 if (flags & (SWM_FREE|SWM_POP)) {
2559 swap->swb_pages[index] = SWAPBLK_NONE;
2560 --mycpu->gd_vmtotal.t_vm;
2561 if (--swap->swb_count == 0) {
2562 swp_pager_remove(object, swap);
2563 zfree(swap_zone, swap);
2564 --object->swblock_count;
2567 /* swap ptr may be invalid */
2568 if (flags & SWM_FREE) {
2569 swp_pager_freeswapspace(object, r1, 1);
2573 /* swap ptr may be invalid */