2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1994 John S. Dyson
35 * Copyright (c) 1990 University of Utah.
36 * Copyright (c) 1991, 1993
37 * The Regents of the University of California. All rights reserved.
39 * This code is derived from software contributed to Berkeley by
40 * the Systems Programming Group of the University of Utah Computer
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * Radix Bitmap 'blists'.
76 * - The new swapper uses the new radix bitmap code. This should scale
77 * to arbitrarily small or arbitrarily large swap spaces and an almost
78 * arbitrary degree of fragmentation.
82 * - on the fly reallocation of swap during putpages. The new system
83 * does not try to keep previously allocated swap blocks for dirty
86 * - on the fly deallocation of swap
88 * - No more garbage collection required. Unnecessarily allocated swap
89 * blocks only exist for dirty vm_page_t's now and these are already
90 * cycled (in a high-load system) by the pager. We also do on-the-fly
91 * removal of invalidated swap blocks when a page is destroyed
94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/conf.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
108 #include <sys/vnode.h>
109 #include <sys/malloc.h>
110 #include <sys/vmmeter.h>
111 #include <sys/sysctl.h>
112 #include <sys/blist.h>
113 #include <sys/lock.h>
114 #include <sys/thread2.h>
116 #ifndef MAX_PAGEOUT_CLUSTER
117 #define MAX_PAGEOUT_CLUSTER 16
120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
122 #include "opt_swap.h"
124 #include <vm/vm_object.h>
125 #include <vm/vm_page.h>
126 #include <vm/vm_pager.h>
127 #include <vm/vm_pageout.h>
128 #include <vm/swap_pager.h>
129 #include <vm/vm_extern.h>
130 #include <vm/vm_zone.h>
131 #include <vm/vnode_pager.h>
133 #include <sys/buf2.h>
134 #include <vm/vm_page2.h>
136 #define SWM_FREE 0x02 /* free, period */
137 #define SWM_POP 0x04 /* pop out */
139 #define SWBIO_READ 0x01
140 #define SWBIO_WRITE 0x02
141 #define SWBIO_SYNC 0x04
144 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
148 extern int vm_swap_size; /* number of free swap blocks, in pages */
150 int swap_pager_full; /* swap space exhaustion (task killing) */
151 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
152 static int nsw_rcount; /* free read buffers */
153 static int nsw_wcount_sync; /* limit write buffers / synchronous */
154 static int nsw_wcount_async; /* limit write buffers / asynchronous */
155 static int nsw_wcount_async_max;/* assigned maximum */
156 static int nsw_cluster_max; /* maximum VOP I/O allowed */
158 struct blist *swapblist;
159 static int swap_async_max = 4; /* maximum in-progress async I/O's */
160 static int swap_burst_read = 0; /* allow burst reading */
162 extern struct vnode *swapdev_vp; /* from vm_swap.c */
164 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
165 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
166 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
167 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
172 * Red-Black tree for swblock entries
174 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
175 vm_pindex_t, swb_index);
178 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
180 if (swb1->swb_index < swb2->swb_index)
182 if (swb1->swb_index > swb2->swb_index)
188 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
189 * calls hooked from other parts of the VM system and do not appear here.
190 * (see vm/swap_pager.h).
194 swap_pager_alloc (void *handle, off_t size,
195 vm_prot_t prot, off_t offset);
196 static void swap_pager_dealloc (vm_object_t object);
197 static int swap_pager_getpage (vm_object_t, vm_page_t *, int);
198 static void swap_pager_init (void);
199 static void swap_pager_unswapped (vm_page_t);
200 static void swap_pager_strategy (vm_object_t, struct bio *);
201 static void swap_chain_iodone(struct bio *biox);
203 struct pagerops swappagerops = {
204 swap_pager_init, /* early system initialization of pager */
205 swap_pager_alloc, /* allocate an OBJT_SWAP object */
206 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
207 swap_pager_getpage, /* pagein */
208 swap_pager_putpages, /* pageout */
209 swap_pager_haspage, /* get backing store status for page */
210 swap_pager_unswapped, /* remove swap related to page */
211 swap_pager_strategy /* pager strategy call */
215 * dmmax is in page-sized chunks with the new swap system. It was
216 * dev-bsized chunks in the old. dmmax is always a power of 2.
218 * swap_*() routines are externally accessible. swp_*() routines are
223 static int dmmax_mask;
224 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
225 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
227 static __inline void swp_sizecheck (void);
228 static void swp_pager_async_iodone (struct bio *bio);
231 * Swap bitmap functions
234 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages);
235 static __inline daddr_t swp_pager_getswapspace (int npages);
241 static void swp_pager_meta_convert (vm_object_t);
242 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t);
243 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t);
244 static void swp_pager_meta_free_all (vm_object_t);
245 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int);
248 * SWP_SIZECHECK() - update swap_pager_full indication
250 * update the swap_pager_almost_full indication and warn when we are
251 * about to run out of swap space, using lowat/hiwat hysteresis.
253 * Clear swap_pager_full ( task killing ) indication when lowat is met.
255 * No restrictions on call
256 * This routine may not block.
257 * This routine must be called at splvm()
263 if (vm_swap_size < nswap_lowat) {
264 if (swap_pager_almost_full == 0) {
265 kprintf("swap_pager: out of swap space\n");
266 swap_pager_almost_full = 1;
270 if (vm_swap_size > nswap_hiwat)
271 swap_pager_almost_full = 0;
276 * SWAP_PAGER_INIT() - initialize the swap pager!
278 * Expected to be started from system init. NOTE: This code is run
279 * before much else so be careful what you depend on. Most of the VM
280 * system has yet to be initialized at this point.
284 swap_pager_init(void)
287 * Device Stripe, in PAGE_SIZE'd blocks
289 dmmax = SWB_NPAGES * 2;
290 dmmax_mask = ~(dmmax - 1);
294 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
296 * Expected to be started from pageout process once, prior to entering
301 swap_pager_swap_init(void)
306 * Number of in-transit swap bp operations. Don't
307 * exhaust the pbufs completely. Make sure we
308 * initialize workable values (0 will work for hysteresis
309 * but it isn't very efficient).
311 * The nsw_cluster_max is constrained by the number of pages an XIO
312 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
313 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
314 * constrained by the swap device interleave stripe size.
316 * Currently we hardwire nsw_wcount_async to 4. This limit is
317 * designed to prevent other I/O from having high latencies due to
318 * our pageout I/O. The value 4 works well for one or two active swap
319 * devices but is probably a little low if you have more. Even so,
320 * a higher value would probably generate only a limited improvement
321 * with three or four active swap devices since the system does not
322 * typically have to pageout at extreme bandwidths. We will want
323 * at least 2 per swap devices, and 4 is a pretty good value if you
324 * have one NFS swap device due to the command/ack latency over NFS.
325 * So it all works out pretty well.
328 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
330 nsw_rcount = (nswbuf + 1) / 2;
331 nsw_wcount_sync = (nswbuf + 3) / 4;
332 nsw_wcount_async = 4;
333 nsw_wcount_async_max = nsw_wcount_async;
336 * The zone is dynamically allocated so generally size it to
337 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
338 * on physical memory of around 8x (each swblock can hold 16 pages).
340 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
341 * has increased dramatically.
343 n = vmstats.v_page_count / 2;
344 if (maxswzone && n < maxswzone / sizeof(struct swblock))
345 n = maxswzone / sizeof(struct swblock);
351 sizeof(struct swblock),
355 if (swap_zone != NULL)
358 * if the allocation failed, try a zone two thirds the
359 * size of the previous attempt.
364 if (swap_zone == NULL)
365 panic("swap_pager_swap_init: swap_zone == NULL");
367 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
371 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
372 * its metadata structures.
374 * This routine is called from the mmap and fork code to create a new
375 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
376 * and then converting it with swp_pager_meta_convert().
378 * This routine may block in vm_object_allocate() and create a named
379 * object lookup race, so we must interlock. We must also run at
380 * splvm() for the object lookup to handle races with interrupts, but
381 * we do not have to maintain splvm() in between the lookup and the
382 * add because (I believe) it is not possible to attempt to create
383 * a new swap object w/handle when a default object with that handle
388 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
392 KKASSERT(handle == NULL);
396 * Reference existing named region or allocate new one. There
397 * should not be a race here against swp_pager_meta_build()
398 * as called from vm_page_remove() in regards to the lookup
401 while (sw_alloc_interlock) {
402 sw_alloc_interlock = -1;
403 tsleep(&sw_alloc_interlock, 0, "swpalc", 0);
405 sw_alloc_interlock = 1;
407 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
409 if (object != NULL) {
410 vm_object_reference(object);
412 object = vm_object_allocate(OBJT_DEFAULT,
413 OFF_TO_IDX(offset + PAGE_MASK + size));
414 object->handle = handle;
415 swp_pager_meta_convert(object);
418 if (sw_alloc_interlock < 0)
419 wakeup(&sw_alloc_interlock);
420 sw_alloc_interlock = 0;
423 object = vm_object_allocate(OBJT_DEFAULT,
424 OFF_TO_IDX(offset + PAGE_MASK + size));
425 swp_pager_meta_convert(object);
431 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
433 * The swap backing for the object is destroyed. The code is
434 * designed such that we can reinstantiate it later, but this
435 * routine is typically called only when the entire object is
436 * about to be destroyed.
438 * This routine may block, but no longer does.
440 * The object must be locked or unreferenceable.
444 swap_pager_dealloc(vm_object_t object)
446 vm_object_pip_wait(object, "swpdea");
449 * Free all remaining metadata. We only bother to free it from
450 * the swap meta data. We do not attempt to free swapblk's still
451 * associated with vm_page_t's for this object. We do not care
452 * if paging is still in progress on some objects.
455 swp_pager_meta_free_all(object);
459 /************************************************************************
460 * SWAP PAGER BITMAP ROUTINES *
461 ************************************************************************/
464 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
466 * Allocate swap for the requested number of pages. The starting
467 * swap block number (a page index) is returned or SWAPBLK_NONE
468 * if the allocation failed.
470 * Also has the side effect of advising that somebody made a mistake
471 * when they configured swap and didn't configure enough.
473 * Must be called at splvm() to avoid races with bitmap frees from
474 * vm_page_remove() aka swap_pager_page_removed().
476 * This routine may not block
477 * This routine must be called at splvm().
480 static __inline daddr_t
481 swp_pager_getswapspace(int npages)
485 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
486 if (swap_pager_full != 2) {
487 kprintf("swap_pager_getswapspace: failed\n");
489 swap_pager_almost_full = 1;
492 vm_swap_size -= npages;
499 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
501 * This routine returns the specified swap blocks back to the bitmap.
503 * Note: This routine may not block (it could in the old swap code),
504 * and through the use of the new blist routines it does not block.
506 * We must be called at splvm() to avoid races with bitmap frees from
507 * vm_page_remove() aka swap_pager_page_removed().
509 * This routine may not block
510 * This routine must be called at splvm().
514 swp_pager_freeswapspace(daddr_t blk, int npages)
516 blist_free(swapblist, blk, npages);
517 vm_swap_size += npages;
522 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
523 * range within an object.
525 * This is a globally accessible routine.
527 * This routine removes swapblk assignments from swap metadata.
529 * The external callers of this routine typically have already destroyed
530 * or renamed vm_page_t's associated with this range in the object so
533 * This routine may be called at any spl. We up our spl to splvm
534 * temporarily in order to perform the metadata removal.
537 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
540 swp_pager_meta_free(object, start, size);
545 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
547 * Assigns swap blocks to the specified range within the object. The
548 * swap blocks are not zerod. Any previous swap assignment is destroyed.
550 * Returns 0 on success, -1 on failure.
553 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
556 daddr_t blk = SWAPBLK_NONE;
557 vm_pindex_t beg = start; /* save start index */
563 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
566 swp_pager_meta_free(object, beg, start - beg);
572 swp_pager_meta_build(object, start, blk);
578 swp_pager_meta_free(object, start, n);
584 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
585 * and destroy the source.
587 * Copy any valid swapblks from the source to the destination. In
588 * cases where both the source and destination have a valid swapblk,
589 * we keep the destination's.
591 * This routine is allowed to block. It may block allocating metadata
592 * indirectly through swp_pager_meta_build() or if paging is still in
593 * progress on the source.
595 * This routine can be called at any spl
597 * XXX vm_page_collapse() kinda expects us not to block because we
598 * supposedly do not need to allocate memory, but for the moment we
599 * *may* have to get a little memory from the zone allocator, but
600 * it is taken from the interrupt memory. We should be ok.
602 * The source object contains no vm_page_t's (which is just as well)
604 * The source object is of type OBJT_SWAP.
606 * The source and destination objects must be locked or
607 * inaccessible (XXX are they ?)
611 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
612 vm_pindex_t offset, int destroysource)
619 * transfer source to destination.
622 for (i = 0; i < dstobject->size; ++i) {
626 * Locate (without changing) the swapblk on the destination,
627 * unless it is invalid in which case free it silently, or
628 * if the destination is a resident page, in which case the
629 * source is thrown away.
632 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
634 if (dstaddr == SWAPBLK_NONE) {
636 * Destination has no swapblk and is not resident,
641 srcaddr = swp_pager_meta_ctl(
647 if (srcaddr != SWAPBLK_NONE)
648 swp_pager_meta_build(dstobject, i, srcaddr);
651 * Destination has valid swapblk or it is represented
652 * by a resident page. We destroy the sourceblock.
655 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
660 * Free left over swap blocks in source.
662 * We have to revert the type to OBJT_DEFAULT so we do not accidently
663 * double-remove the object from the swap queues.
668 * Reverting the type is not necessary, the caller is going
669 * to destroy srcobject directly, but I'm doing it here
670 * for consistency since we've removed the object from its
673 swp_pager_meta_free_all(srcobject);
674 srcobject->type = OBJT_DEFAULT;
680 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
681 * the requested page.
683 * We determine whether good backing store exists for the requested
684 * page and return TRUE if it does, FALSE if it doesn't.
686 * If TRUE, we also try to determine how much valid, contiguous backing
687 * store exists before and after the requested page within a reasonable
688 * distance. We do not try to restrict it to the swap device stripe
689 * (that is handled in getpages/putpages). It probably isn't worth
694 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
699 * do we have good backing store at the requested index ?
703 blk0 = swp_pager_meta_ctl(object, pindex, 0);
705 if (blk0 == SWAPBLK_NONE) {
712 * find backwards-looking contiguous good backing store
714 if (before != NULL) {
717 for (i = 1; i < (SWB_NPAGES/2); ++i) {
722 blk = swp_pager_meta_ctl(object, pindex - i, 0);
730 * find forward-looking contiguous good backing store
736 for (i = 1; i < (SWB_NPAGES/2); ++i) {
739 blk = swp_pager_meta_ctl(object, pindex + i, 0);
751 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
753 * This removes any associated swap backing store, whether valid or
754 * not, from the page.
756 * This routine is typically called when a page is made dirty, at
757 * which point any associated swap can be freed. MADV_FREE also
758 * calls us in a special-case situation
760 * NOTE!!! If the page is clean and the swap was valid, the caller
761 * should make the page dirty before calling this routine. This routine
762 * does NOT change the m->dirty status of the page. Also: MADV_FREE
765 * This routine may not block
766 * This routine must be called at splvm()
770 swap_pager_unswapped(vm_page_t m)
772 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
776 * SWAP_PAGER_STRATEGY() - read, write, free blocks
778 * This implements the vm_pager_strategy() interface to swap and allows
779 * other parts of the system to directly access swap as backing store
780 * through vm_objects of type OBJT_SWAP. This is intended to be a
781 * cacheless interface ( i.e. caching occurs at higher levels ).
782 * Therefore we do not maintain any resident pages. All I/O goes
783 * directly to and from the swap device.
785 * We currently attempt to run I/O synchronously or asynchronously as
786 * the caller requests. This isn't perfect because we loose error
787 * sequencing when we run multiple ops in parallel to satisfy a request.
788 * But this is swap, so we let it all hang out.
792 swap_pager_strategy(vm_object_t object, struct bio *bio)
794 struct buf *bp = bio->bio_buf;
797 vm_pindex_t biox_blkno = 0;
802 struct bio_track *track;
805 * tracking for swapdev vnode I/Os
807 if (bp->b_cmd == BUF_CMD_READ)
808 track = &swapdev_vp->v_track_read;
810 track = &swapdev_vp->v_track_write;
812 if (bp->b_bcount & PAGE_MASK) {
813 bp->b_error = EINVAL;
814 bp->b_flags |= B_ERROR | B_INVAL;
816 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
817 "not page bounded\n",
818 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
823 * Clear error indication, initialize page index, count, data pointer.
826 bp->b_flags &= ~B_ERROR;
827 bp->b_resid = bp->b_bcount;
829 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
830 count = howmany(bp->b_bcount, PAGE_SIZE);
834 * Deal with BUF_CMD_FREEBLKS
836 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
838 * FREE PAGE(s) - destroy underlying swap that is no longer
841 swp_pager_meta_free(object, start, count);
848 * We need to be able to create a new cluster of I/O's. We cannot
849 * use the caller fields of the passed bio so push a new one.
851 * Because nbio is just a placeholder for the cluster links,
852 * we can biodone() the original bio instead of nbio to make
853 * things a bit more efficient.
855 nbio = push_bio(bio);
856 nbio->bio_offset = bio->bio_offset;
857 nbio->bio_caller_info1.cluster_head = NULL;
858 nbio->bio_caller_info2.cluster_tail = NULL;
864 * Execute read or write
870 * Obtain block. If block not found and writing, allocate a
871 * new block and build it into the object.
873 blk = swp_pager_meta_ctl(object, start, 0);
874 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
875 blk = swp_pager_getswapspace(1);
876 if (blk == SWAPBLK_NONE) {
877 bp->b_error = ENOMEM;
878 bp->b_flags |= B_ERROR;
881 swp_pager_meta_build(object, start, blk);
885 * Do we have to flush our current collection? Yes if:
887 * - no swap block at this index
888 * - swap block is not contiguous
889 * - we cross a physical disk boundry in the
893 biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
894 ((biox_blkno ^ blk) & dmmax_mask)
897 if (bp->b_cmd == BUF_CMD_READ) {
898 ++mycpu->gd_cnt.v_swapin;
899 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
901 ++mycpu->gd_cnt.v_swapout;
902 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
903 bufx->b_dirtyend = bufx->b_bcount;
907 * Finished with this buf.
909 KKASSERT(bufx->b_bcount != 0);
910 if (bufx->b_cmd != BUF_CMD_READ)
911 bufx->b_dirtyend = bufx->b_bcount;
917 * Add new swapblk to biox, instantiating biox if necessary.
918 * Zero-fill reads are able to take a shortcut.
920 if (blk == SWAPBLK_NONE) {
922 * We can only get here if we are reading. Since
923 * we are at splvm() we can safely modify b_resid,
924 * even if chain ops are in progress.
926 bzero(data, PAGE_SIZE);
927 bp->b_resid -= PAGE_SIZE;
930 /* XXX chain count > 4, wait to <= 4 */
932 bufx = getpbuf(NULL);
933 biox = &bufx->b_bio1;
934 cluster_append(nbio, bufx);
935 bufx->b_flags |= (bufx->b_flags & B_ORDERED);
936 bufx->b_cmd = bp->b_cmd;
937 biox->bio_done = swap_chain_iodone;
938 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
939 biox->bio_caller_info1.cluster_parent = nbio;
944 bufx->b_bcount += PAGE_SIZE;
952 * Flush out last buffer
955 if (bufx->b_cmd == BUF_CMD_READ) {
956 ++mycpu->gd_cnt.v_swapin;
957 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
959 ++mycpu->gd_cnt.v_swapout;
960 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
961 bufx->b_dirtyend = bufx->b_bcount;
963 KKASSERT(bufx->b_bcount);
964 if (bufx->b_cmd != BUF_CMD_READ)
965 bufx->b_dirtyend = bufx->b_bcount;
966 /* biox, bufx = NULL */
970 * Now initiate all the I/O. Be careful looping on our chain as
971 * I/O's may complete while we are still initiating them.
973 nbio->bio_caller_info2.cluster_tail = NULL;
974 bufx = nbio->bio_caller_info1.cluster_head;
977 biox = &bufx->b_bio1;
979 bufx = bufx->b_cluster_next;
980 vn_strategy(swapdev_vp, biox);
984 * Completion of the cluster will also call biodone_chain(nbio).
985 * We never call biodone(nbio) so we don't have to worry about
986 * setting up a bio_done callback. It's handled in the sub-IO.
992 swap_chain_iodone(struct bio *biox)
995 struct buf *bufx; /* chained sub-buffer */
996 struct bio *nbio; /* parent nbio with chain glue */
997 struct buf *bp; /* original bp associated with nbio */
1000 bufx = biox->bio_buf;
1001 nbio = biox->bio_caller_info1.cluster_parent;
1005 * Update the original buffer
1007 KKASSERT(bp != NULL);
1008 if (bufx->b_flags & B_ERROR) {
1009 atomic_set_int(&bufx->b_flags, B_ERROR);
1010 bp->b_error = bufx->b_error;
1011 } else if (bufx->b_resid != 0) {
1012 atomic_set_int(&bufx->b_flags, B_ERROR);
1013 bp->b_error = EINVAL;
1015 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1019 * Remove us from the chain.
1021 spin_lock_wr(&bp->b_lock.lk_spinlock);
1022 nextp = &nbio->bio_caller_info1.cluster_head;
1023 while (*nextp != bufx) {
1024 KKASSERT(*nextp != NULL);
1025 nextp = &(*nextp)->b_cluster_next;
1027 *nextp = bufx->b_cluster_next;
1028 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1029 spin_unlock_wr(&bp->b_lock.lk_spinlock);
1032 * Clean up bufx. If the chain is now empty we finish out
1033 * the parent. Note that we may be racing other completions
1034 * so we must use the chain_empty status from above.
1037 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1038 atomic_set_int(&bp->b_flags, B_ERROR);
1039 bp->b_error = EINVAL;
1041 biodone_chain(nbio);
1043 relpbuf(bufx, NULL);
1047 * SWAP_PAGER_GETPAGES() - bring page in from swap
1049 * The requested page may have to be brought in from swap. Calculate the
1050 * swap block and bring in additional pages if possible. All pages must
1051 * have contiguous swap block assignments and reside in the same object.
1053 * The caller has a single vm_object_pip_add() reference prior to
1054 * calling us and we should return with the same.
1056 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1057 * and any additinal pages unbusied.
1059 * If the caller encounters a PG_RAM page it will pass it to us even though
1060 * it may be valid and dirty. We cannot overwrite the page in this case!
1061 * The case is used to allow us to issue pure read-aheads.
1063 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1064 * the PG_RAM page is validated at the same time as mreq. What we
1065 * really need to do is issue a separate read-ahead pbuf.
1068 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1079 vm_page_t marray[XIO_INTERNAL_PAGES];
1083 if (mreq->object != object) {
1084 panic("swap_pager_getpages: object mismatch %p/%p",
1091 * We don't want to overwrite a fully valid page as it might be
1092 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1093 * valid page with PG_RAM set.
1095 * In this case we see if the next page is a suitable page-in
1096 * candidate and if it is we issue read-ahead. PG_RAM will be
1097 * set on the last page of the read-ahead to continue the pipeline.
1099 if (mreq->valid == VM_PAGE_BITS_ALL) {
1100 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size)
1101 return(VM_PAGER_OK);
1103 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1104 if (blk == SWAPBLK_NONE) {
1106 return(VM_PAGER_OK);
1108 m = vm_page_lookup(object, mreq->pindex + 1);
1110 m = vm_page_alloc(object, mreq->pindex + 1,
1114 return(VM_PAGER_OK);
1117 if ((m->flags & PG_BUSY) || m->busy || m->valid) {
1119 return(VM_PAGER_OK);
1121 vm_page_unqueue_nowakeup(m);
1132 * Try to block-read contiguous pages from swap if sequential,
1133 * otherwise just read one page. Contiguous pages from swap must
1134 * reside within a single device stripe because the I/O cannot be
1135 * broken up across multiple stripes.
1137 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1138 * set up such that the case(s) are handled implicitly.
1141 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1144 for (i = 1; swap_burst_read &&
1145 i < XIO_INTERNAL_PAGES &&
1146 mreq->pindex + i < object->size; ++i) {
1149 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1150 if (iblk != blk + i)
1152 if ((blk ^ iblk) & dmmax_mask)
1154 m = vm_page_lookup(object, mreq->pindex + i);
1156 m = vm_page_alloc(object, mreq->pindex + i,
1161 if ((m->flags & PG_BUSY) || m->busy || m->valid)
1163 vm_page_unqueue_nowakeup(m);
1169 vm_page_flag_set(marray[i - 1], PG_RAM);
1174 * If mreq is the requested page and we have nothing to do return
1175 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1176 * page and must be cleaned up.
1178 if (blk == SWAPBLK_NONE) {
1181 vnode_pager_freepage(mreq);
1182 return(VM_PAGER_OK);
1184 return(VM_PAGER_FAIL);
1189 * map our page(s) into kva for input
1191 bp = getpbuf(&nsw_rcount);
1193 kva = (vm_offset_t) bp->b_kvabase;
1194 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1195 pmap_qenter(kva, bp->b_xio.xio_pages, i);
1197 bp->b_data = (caddr_t)kva;
1198 bp->b_bcount = PAGE_SIZE * i;
1199 bp->b_xio.xio_npages = i;
1200 bio->bio_done = swp_pager_async_iodone;
1201 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1202 bio->bio_caller_info1.index = SWBIO_READ;
1205 * Set index. If raonly set the index beyond the array so all
1206 * the pages are treated the same, otherwise the original mreq is
1210 bio->bio_driver_info = (void *)(intptr_t)i;
1212 bio->bio_driver_info = (void *)(intptr_t)0;
1214 for (j = 0; j < i; ++j)
1215 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
1217 mycpu->gd_cnt.v_swapin++;
1218 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1221 * We still hold the lock on mreq, and our automatic completion routine
1222 * does not remove it.
1224 vm_object_pip_add(object, bp->b_xio.xio_npages);
1227 * perform the I/O. NOTE!!! bp cannot be considered valid after
1228 * this point because we automatically release it on completion.
1229 * Instead, we look at the one page we are interested in which we
1230 * still hold a lock on even through the I/O completion.
1232 * The other pages in our m[] array are also released on completion,
1233 * so we cannot assume they are valid anymore either.
1235 bp->b_cmd = BUF_CMD_READ;
1237 vn_strategy(swapdev_vp, bio);
1240 * Wait for the page we want to complete. PG_SWAPINPROG is always
1241 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1242 * is set in the meta-data.
1244 * If this is a read-ahead only we return immediately without
1248 return(VM_PAGER_OK);
1251 * Read-ahead includes originally requested page case.
1254 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1255 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1256 mycpu->gd_cnt.v_intrans++;
1257 if (tsleep(mreq, 0, "swread", hz*20)) {
1259 "swap_pager: indefinite wait buffer: "
1260 " offset: %lld, size: %ld\n",
1261 (long long)bio->bio_offset,
1269 * mreq is left bussied after completion, but all the other pages
1270 * are freed. If we had an unrecoverable read error the page will
1273 if (mreq->valid != VM_PAGE_BITS_ALL)
1274 return(VM_PAGER_ERROR);
1276 return(VM_PAGER_OK);
1279 * A final note: in a low swap situation, we cannot deallocate swap
1280 * and mark a page dirty here because the caller is likely to mark
1281 * the page clean when we return, causing the page to possibly revert
1282 * to all-zero's later.
1287 * swap_pager_putpages:
1289 * Assign swap (if necessary) and initiate I/O on the specified pages.
1291 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1292 * are automatically converted to SWAP objects.
1294 * In a low memory situation we may block in vn_strategy(), but the new
1295 * vm_page reservation system coupled with properly written VFS devices
1296 * should ensure that no low-memory deadlock occurs. This is an area
1299 * The parent has N vm_object_pip_add() references prior to
1300 * calling us and will remove references for rtvals[] that are
1301 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1304 * The parent has soft-busy'd the pages it passes us and will unbusy
1305 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1306 * We need to unbusy the rest on I/O completion.
1309 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1310 boolean_t sync, int *rtvals)
1315 if (count && m[0]->object != object) {
1316 panic("swap_pager_getpages: object mismatch %p/%p",
1325 * Turn object into OBJT_SWAP
1326 * check for bogus sysops
1327 * force sync if not pageout process
1329 if (object->type == OBJT_DEFAULT)
1330 swp_pager_meta_convert(object);
1332 if (curthread != pagethread)
1338 * Update nsw parameters from swap_async_max sysctl values.
1339 * Do not let the sysop crash the machine with bogus numbers.
1342 if (swap_async_max != nsw_wcount_async_max) {
1348 if ((n = swap_async_max) > nswbuf / 2)
1355 * Adjust difference ( if possible ). If the current async
1356 * count is too low, we may not be able to make the adjustment
1360 n -= nsw_wcount_async_max;
1361 if (nsw_wcount_async + n >= 0) {
1362 nsw_wcount_async += n;
1363 nsw_wcount_async_max += n;
1364 wakeup(&nsw_wcount_async);
1372 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1373 * The page is left dirty until the pageout operation completes
1377 for (i = 0; i < count; i += n) {
1384 * Maximum I/O size is limited by a number of factors.
1387 n = min(BLIST_MAX_ALLOC, count - i);
1388 n = min(n, nsw_cluster_max);
1393 * Get biggest block of swap we can. If we fail, fall
1394 * back and try to allocate a smaller block. Don't go
1395 * overboard trying to allocate space if it would overly
1399 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1404 if (blk == SWAPBLK_NONE) {
1405 for (j = 0; j < n; ++j)
1406 rtvals[i+j] = VM_PAGER_FAIL;
1412 * The I/O we are constructing cannot cross a physical
1413 * disk boundry in the swap stripe. Note: we are still
1416 if ((blk ^ (blk + n)) & dmmax_mask) {
1417 j = ((blk + dmmax) & dmmax_mask) - blk;
1418 swp_pager_freeswapspace(blk + j, n - j);
1423 * All I/O parameters have been satisfied, build the I/O
1424 * request and assign the swap space.
1428 bp = getpbuf(&nsw_wcount_sync);
1430 bp = getpbuf(&nsw_wcount_async);
1433 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1435 bp->b_bcount = PAGE_SIZE * n;
1436 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1438 for (j = 0; j < n; ++j) {
1439 vm_page_t mreq = m[i+j];
1441 swp_pager_meta_build(
1446 vm_page_dirty(mreq);
1447 rtvals[i+j] = VM_PAGER_OK;
1449 vm_page_flag_set(mreq, PG_SWAPINPROG);
1450 bp->b_xio.xio_pages[j] = mreq;
1452 bp->b_xio.xio_npages = n;
1454 mycpu->gd_cnt.v_swapout++;
1455 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1459 bp->b_dirtyoff = 0; /* req'd for NFS */
1460 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1461 bp->b_cmd = BUF_CMD_WRITE;
1462 bio->bio_caller_info1.index = SWBIO_WRITE;
1467 if (sync == FALSE) {
1468 bio->bio_done = swp_pager_async_iodone;
1470 vn_strategy(swapdev_vp, bio);
1472 for (j = 0; j < n; ++j)
1473 rtvals[i+j] = VM_PAGER_PEND;
1478 * Issue synchrnously.
1480 * Wait for the sync I/O to complete, then update rtvals.
1481 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1482 * our async completion routine at the end, thus avoiding a
1485 bio->bio_caller_info1.index |= SWBIO_SYNC;
1486 bio->bio_done = biodone_sync;
1487 bio->bio_flags |= BIO_SYNC;
1488 vn_strategy(swapdev_vp, bio);
1489 biowait(bio, "swwrt");
1491 for (j = 0; j < n; ++j)
1492 rtvals[i+j] = VM_PAGER_PEND;
1495 * Now that we are through with the bp, we can call the
1496 * normal async completion, which frees everything up.
1498 swp_pager_async_iodone(bio);
1503 swap_pager_newswap(void)
1509 * swp_pager_async_iodone:
1511 * Completion routine for asynchronous reads and writes from/to swap.
1512 * Also called manually by synchronous code to finish up a bp.
1514 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1515 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1516 * unbusy all pages except the 'main' request page. For WRITE
1517 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1518 * because we marked them all VM_PAGER_PEND on return from putpages ).
1520 * This routine may not block.
1523 swp_pager_async_iodone(struct bio *bio)
1525 struct buf *bp = bio->bio_buf;
1526 vm_object_t object = NULL;
1533 if (bp->b_flags & B_ERROR) {
1535 "swap_pager: I/O error - %s failed; offset %lld,"
1536 "size %ld, error %d\n",
1537 ((bio->bio_caller_info1.index & SWBIO_READ) ?
1538 "pagein" : "pageout"),
1539 (long long)bio->bio_offset,
1546 * set object, raise to splvm().
1548 if (bp->b_xio.xio_npages)
1549 object = bp->b_xio.xio_pages[0]->object;
1553 * remove the mapping for kernel virtual
1555 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1558 * cleanup pages. If an error occurs writing to swap, we are in
1559 * very serious trouble. If it happens to be a disk error, though,
1560 * we may be able to recover by reassigning the swap later on. So
1561 * in this case we remove the m->swapblk assignment for the page
1562 * but do not free it in the rlist. The errornous block(s) are thus
1563 * never reallocated as swap. Redirty the page and continue.
1565 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1566 vm_page_t m = bp->b_xio.xio_pages[i];
1568 if (bp->b_flags & B_ERROR) {
1570 * If an error occurs I'd love to throw the swapblk
1571 * away without freeing it back to swapspace, so it
1572 * can never be used again. But I can't from an
1576 if (bio->bio_caller_info1.index & SWBIO_READ) {
1578 * When reading, reqpage needs to stay
1579 * locked for the parent, but all other
1580 * pages can be freed. We still want to
1581 * wakeup the parent waiting on the page,
1582 * though. ( also: pg_reqpage can be -1 and
1583 * not match anything ).
1585 * We have to wake specifically requested pages
1586 * up too because we cleared PG_SWAPINPROG and
1587 * someone may be waiting for that.
1589 * NOTE: for reads, m->dirty will probably
1590 * be overridden by the original caller of
1591 * getpages so don't play cute tricks here.
1593 * NOTE: We can't actually free the page from
1594 * here, because this is an interrupt. It
1595 * is not legal to mess with object->memq
1596 * from an interrupt. Deactivate the page
1601 vm_page_flag_clear(m, PG_ZERO);
1602 vm_page_flag_clear(m, PG_SWAPINPROG);
1605 * bio_driver_info holds the requested page
1608 if (i != (int)(intptr_t)bio->bio_driver_info) {
1609 vm_page_deactivate(m);
1615 * If i == bp->b_pager.pg_reqpage, do not wake
1616 * the page up. The caller needs to.
1620 * If a write error occurs, reactivate page
1621 * so it doesn't clog the inactive list,
1622 * then finish the I/O.
1625 vm_page_flag_clear(m, PG_SWAPINPROG);
1626 vm_page_activate(m);
1627 vm_page_io_finish(m);
1629 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
1631 * NOTE: for reads, m->dirty will probably be
1632 * overridden by the original caller of getpages so
1633 * we cannot set them in order to free the underlying
1634 * swap in a low-swap situation. I don't think we'd
1635 * want to do that anyway, but it was an optimization
1636 * that existed in the old swapper for a time before
1637 * it got ripped out due to precisely this problem.
1639 * clear PG_ZERO in page.
1641 * If not the requested page then deactivate it.
1643 * Note that the requested page, reqpage, is left
1644 * busied, but we still have to wake it up. The
1645 * other pages are released (unbusied) by
1646 * vm_page_wakeup(). We do not set reqpage's
1647 * valid bits here, it is up to the caller.
1651 * NOTE: can't call pmap_clear_modify(m) from an
1652 * interrupt thread, the pmap code may have to map
1653 * non-kernel pmaps and currently asserts the case.
1655 /*pmap_clear_modify(m);*/
1656 m->valid = VM_PAGE_BITS_ALL;
1658 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG);
1661 * We have to wake specifically requested pages
1662 * up too because we cleared PG_SWAPINPROG and
1663 * could be waiting for it in getpages. However,
1664 * be sure to not unbusy getpages specifically
1665 * requested page - getpages expects it to be
1668 * bio_driver_info holds the requested page
1670 if (i != (int)(intptr_t)bio->bio_driver_info) {
1671 vm_page_deactivate(m);
1678 * Mark the page clean but do not mess with the
1679 * pmap-layer's modified state. That state should
1680 * also be clear since the caller protected the
1681 * page VM_PROT_READ, but allow the case.
1683 * We are in an interrupt, avoid pmap operations.
1685 * If we have a severe page deficit, deactivate the
1686 * page. Do not try to cache it (which would also
1687 * involve a pmap op), because the page might still
1691 vm_page_flag_clear(m, PG_SWAPINPROG);
1692 vm_page_io_finish(m);
1693 if (vm_page_count_severe())
1694 vm_page_deactivate(m);
1696 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1697 vm_page_protect(m, VM_PROT_READ);
1703 * adjust pip. NOTE: the original parent may still have its own
1704 * pip refs on the object.
1708 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages);
1711 * Release the physical I/O buffer.
1713 * NOTE: Due to synchronous operations in the write case b_cmd may
1714 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
1717 if (bio->bio_caller_info1.index & SWBIO_READ)
1718 nswptr = &nsw_rcount;
1719 else if (bio->bio_caller_info1.index & SWBIO_SYNC)
1720 nswptr = &nsw_wcount_sync;
1722 nswptr = &nsw_wcount_async;
1723 bp->b_cmd = BUF_CMD_DONE;
1724 relpbuf(bp, nswptr);
1728 /************************************************************************
1730 ************************************************************************
1732 * These routines manipulate the swap metadata stored in the
1733 * OBJT_SWAP object. All swp_*() routines must be called at
1734 * splvm() because swap can be freed up by the low level vm_page
1735 * code which might be called from interrupts beyond what splbio() covers.
1737 * Swap metadata is implemented with a global hash and not directly
1738 * linked into the object. Instead the object simply contains
1739 * appropriate tracking counters.
1743 * Lookup the swblock containing the specified swap block index.
1747 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
1749 index &= ~SWAP_META_MASK;
1750 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
1754 * Remove a swblock from the RB tree.
1758 swp_pager_remove(vm_object_t object, struct swblock *swap)
1760 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
1764 * Convert default object to swap object if necessary
1767 swp_pager_meta_convert(vm_object_t object)
1769 if (object->type == OBJT_DEFAULT) {
1770 object->type = OBJT_SWAP;
1771 KKASSERT(object->swblock_count == 0);
1776 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1778 * We first convert the object to a swap object if it is a default
1779 * object. Vnode objects do not need to be converted.
1781 * The specified swapblk is added to the object's swap metadata. If
1782 * the swapblk is not valid, it is freed instead. Any previously
1783 * assigned swapblk is freed.
1786 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, daddr_t swapblk)
1788 struct swblock *swap;
1789 struct swblock *oswap;
1791 KKASSERT(swapblk != SWAPBLK_NONE);
1794 * Convert object if necessary
1796 if (object->type == OBJT_DEFAULT)
1797 swp_pager_meta_convert(object);
1800 * Locate swblock. If not found create, but if we aren't adding
1801 * anything just return. If we run out of space in the map we wait
1802 * and, since the hash table may have changed, retry.
1805 swap = swp_pager_lookup(object, index);
1810 swap = zalloc(swap_zone);
1815 swap->swb_index = index & ~SWAP_META_MASK;
1816 swap->swb_count = 0;
1818 ++object->swblock_count;
1820 for (i = 0; i < SWAP_META_PAGES; ++i)
1821 swap->swb_pages[i] = SWAPBLK_NONE;
1822 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
1823 KKASSERT(oswap == NULL);
1827 * Delete prior contents of metadata
1830 index &= SWAP_META_MASK;
1832 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1833 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1838 * Enter block into metadata
1840 swap->swb_pages[index] = swapblk;
1841 if (swapblk != SWAPBLK_NONE)
1846 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1848 * The requested range of blocks is freed, with any associated swap
1849 * returned to the swap bitmap.
1851 * This routine will free swap metadata structures as they are cleaned
1852 * out. This routine does *NOT* operate on swap metadata associated
1853 * with resident pages.
1855 * This routine must be called at splvm()
1858 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1860 struct swblock *swap;
1862 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE)
1866 swap = swp_pager_lookup(object, index);
1868 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1870 if (v != SWAPBLK_NONE) {
1871 swp_pager_freeswapspace(v, 1);
1872 swap->swb_pages[index & SWAP_META_MASK] =
1874 if (--swap->swb_count == 0) {
1875 swp_pager_remove(object, swap);
1876 zfree(swap_zone, swap);
1877 --object->swblock_count;
1883 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1891 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1893 * This routine locates and destroys all swap metadata associated with
1896 * This routine must be called at splvm()
1899 swp_pager_meta_free_all(vm_object_t object)
1901 struct swblock *swap;
1904 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE)
1907 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
1908 swp_pager_remove(object, swap);
1909 for (i = 0; i < SWAP_META_PAGES; ++i) {
1910 daddr_t v = swap->swb_pages[i];
1911 if (v != SWAPBLK_NONE) {
1913 swp_pager_freeswapspace(v, 1);
1916 if (swap->swb_count != 0)
1917 panic("swap_pager_meta_free_all: swb_count != 0");
1918 zfree(swap_zone, swap);
1919 --object->swblock_count;
1921 KKASSERT(object->swblock_count == 0);
1925 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1927 * This routine is capable of looking up, popping, or freeing
1928 * swapblk assignments in the swap meta data or in the vm_page_t.
1929 * The routine typically returns the swapblk being looked-up, or popped,
1930 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1931 * was invalid. This routine will automatically free any invalid
1932 * meta-data swapblks.
1934 * It is not possible to store invalid swapblks in the swap meta data
1935 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1937 * When acting on a busy resident page and paging is in progress, we
1938 * have to wait until paging is complete but otherwise can act on the
1941 * This routine must be called at splvm().
1943 * SWM_FREE remove and free swap block from metadata
1944 * SWM_POP remove from meta data but do not free.. pop it out
1947 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
1949 struct swblock *swap;
1953 * The meta data only exists of the object is OBJT_SWAP
1954 * and even then might not be allocated yet.
1957 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE)
1958 return(SWAPBLK_NONE);
1961 swap = swp_pager_lookup(object, index);
1964 index &= SWAP_META_MASK;
1965 r1 = swap->swb_pages[index];
1967 if (r1 != SWAPBLK_NONE) {
1968 if (flags & SWM_FREE) {
1969 swp_pager_freeswapspace(r1, 1);
1972 if (flags & (SWM_FREE|SWM_POP)) {
1973 swap->swb_pages[index] = SWAPBLK_NONE;
1974 if (--swap->swb_count == 0) {
1975 swp_pager_remove(object, swap);
1976 zfree(swap_zone, swap);
1977 --object->swblock_count;