2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1994 John S. Dyson
35 * Copyright (c) 1990 University of Utah.
36 * Copyright (c) 1991, 1993
37 * The Regents of the University of California. All rights reserved.
39 * This code is derived from software contributed to Berkeley by
40 * the Systems Programming Group of the University of Utah Computer
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * Radix Bitmap 'blists'.
76 * - The new swapper uses the new radix bitmap code. This should scale
77 * to arbitrarily small or arbitrarily large swap spaces and an almost
78 * arbitrary degree of fragmentation.
82 * - on the fly reallocation of swap during putpages. The new system
83 * does not try to keep previously allocated swap blocks for dirty
86 * - on the fly deallocation of swap
88 * - No more garbage collection required. Unnecessarily allocated swap
89 * blocks only exist for dirty vm_page_t's now and these are already
90 * cycled (in a high-load system) by the pager. We also do on-the-fly
91 * removal of invalidated swap blocks when a page is destroyed
94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/conf.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
108 #include <sys/vnode.h>
109 #include <sys/malloc.h>
110 #include <sys/vmmeter.h>
111 #include <sys/sysctl.h>
112 #include <sys/blist.h>
113 #include <sys/lock.h>
114 #include <sys/thread2.h>
116 #ifndef MAX_PAGEOUT_CLUSTER
117 #define MAX_PAGEOUT_CLUSTER 16
120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
122 #include "opt_swap.h"
124 #include <vm/vm_object.h>
125 #include <vm/vm_page.h>
126 #include <vm/vm_pager.h>
127 #include <vm/vm_pageout.h>
128 #include <vm/swap_pager.h>
129 #include <vm/vm_extern.h>
130 #include <vm/vm_zone.h>
131 #include <vm/vnode_pager.h>
133 #include <sys/buf2.h>
134 #include <vm/vm_page2.h>
136 #define SWM_FREE 0x02 /* free, period */
137 #define SWM_POP 0x04 /* pop out */
139 #define SWBIO_READ 0x01
140 #define SWBIO_WRITE 0x02
141 #define SWBIO_SYNC 0x04
144 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
148 extern int vm_swap_size; /* number of free swap blocks, in pages */
150 int swap_pager_full; /* swap space exhaustion (task killing) */
151 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
152 static int nsw_rcount; /* free read buffers */
153 static int nsw_wcount_sync; /* limit write buffers / synchronous */
154 static int nsw_wcount_async; /* limit write buffers / asynchronous */
155 static int nsw_wcount_async_max;/* assigned maximum */
156 static int nsw_cluster_max; /* maximum VOP I/O allowed */
157 static int sw_alloc_interlock; /* swap pager allocation interlock */
159 struct blist *swapblist;
160 static struct swblock **swhash;
161 static int swhash_mask;
162 static int swap_async_max = 4; /* maximum in-progress async I/O's */
163 static int swap_burst_read = 0; /* allow burst reading */
165 extern struct vnode *swapdev_vp; /* from vm_swap.c */
167 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
168 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
169 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
170 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
173 * "named" and "unnamed" anon region objects. Try to reduce the overhead
174 * of searching a named list by hashing it just a little.
179 #define NOBJLIST(handle) \
180 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
182 static struct pagerlst swap_pager_object_list[NOBJLISTS];
183 struct pagerlst swap_pager_un_object_list;
187 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
188 * calls hooked from other parts of the VM system and do not appear here.
189 * (see vm/swap_pager.h).
193 swap_pager_alloc (void *handle, off_t size,
194 vm_prot_t prot, off_t offset);
195 static void swap_pager_dealloc (vm_object_t object);
196 static int swap_pager_getpage (vm_object_t, vm_page_t *, int);
197 static void swap_pager_init (void);
198 static void swap_pager_unswapped (vm_page_t);
199 static void swap_pager_strategy (vm_object_t, struct bio *);
200 static void swap_chain_iodone(struct bio *biox);
202 struct pagerops swappagerops = {
203 swap_pager_init, /* early system initialization of pager */
204 swap_pager_alloc, /* allocate an OBJT_SWAP object */
205 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
206 swap_pager_getpage, /* pagein */
207 swap_pager_putpages, /* pageout */
208 swap_pager_haspage, /* get backing store status for page */
209 swap_pager_unswapped, /* remove swap related to page */
210 swap_pager_strategy /* pager strategy call */
214 * dmmax is in page-sized chunks with the new swap system. It was
215 * dev-bsized chunks in the old. dmmax is always a power of 2.
217 * swap_*() routines are externally accessible. swp_*() routines are
222 static int dmmax_mask;
223 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
224 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
226 static __inline void swp_sizecheck (void);
227 static void swp_pager_async_iodone (struct bio *bio);
230 * Swap bitmap functions
233 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages);
234 static __inline daddr_t swp_pager_getswapspace (int npages);
240 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t);
241 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t);
242 static void swp_pager_meta_free_all (vm_object_t);
243 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int);
246 * SWP_SIZECHECK() - update swap_pager_full indication
248 * update the swap_pager_almost_full indication and warn when we are
249 * about to run out of swap space, using lowat/hiwat hysteresis.
251 * Clear swap_pager_full ( task killing ) indication when lowat is met.
253 * No restrictions on call
254 * This routine may not block.
255 * This routine must be called at splvm()
261 if (vm_swap_size < nswap_lowat) {
262 if (swap_pager_almost_full == 0) {
263 kprintf("swap_pager: out of swap space\n");
264 swap_pager_almost_full = 1;
268 if (vm_swap_size > nswap_hiwat)
269 swap_pager_almost_full = 0;
274 * SWAP_PAGER_INIT() - initialize the swap pager!
276 * Expected to be started from system init. NOTE: This code is run
277 * before much else so be careful what you depend on. Most of the VM
278 * system has yet to be initialized at this point.
282 swap_pager_init(void)
285 * Initialize object lists
289 for (i = 0; i < NOBJLISTS; ++i)
290 TAILQ_INIT(&swap_pager_object_list[i]);
291 TAILQ_INIT(&swap_pager_un_object_list);
294 * Device Stripe, in PAGE_SIZE'd blocks
297 dmmax = SWB_NPAGES * 2;
298 dmmax_mask = ~(dmmax - 1);
302 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
304 * Expected to be started from pageout process once, prior to entering
309 swap_pager_swap_init(void)
314 * Number of in-transit swap bp operations. Don't
315 * exhaust the pbufs completely. Make sure we
316 * initialize workable values (0 will work for hysteresis
317 * but it isn't very efficient).
319 * The nsw_cluster_max is constrained by the number of pages an XIO
320 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
321 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
322 * constrained by the swap device interleave stripe size.
324 * Currently we hardwire nsw_wcount_async to 4. This limit is
325 * designed to prevent other I/O from having high latencies due to
326 * our pageout I/O. The value 4 works well for one or two active swap
327 * devices but is probably a little low if you have more. Even so,
328 * a higher value would probably generate only a limited improvement
329 * with three or four active swap devices since the system does not
330 * typically have to pageout at extreme bandwidths. We will want
331 * at least 2 per swap devices, and 4 is a pretty good value if you
332 * have one NFS swap device due to the command/ack latency over NFS.
333 * So it all works out pretty well.
336 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
338 nsw_rcount = (nswbuf + 1) / 2;
339 nsw_wcount_sync = (nswbuf + 3) / 4;
340 nsw_wcount_async = 4;
341 nsw_wcount_async_max = nsw_wcount_async;
344 * The zone is dynamically allocated so generally size it to
345 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
346 * on physical memory of around 8x (each swblock can hold 16 pages).
348 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
349 * has increased dramatically.
351 n = vmstats.v_page_count / 2;
352 if (maxswzone && n < maxswzone / sizeof(struct swblock))
353 n = maxswzone / sizeof(struct swblock);
359 sizeof(struct swblock),
363 if (swap_zone != NULL)
366 * if the allocation failed, try a zone two thirds the
367 * size of the previous attempt.
372 if (swap_zone == NULL)
373 panic("swap_pager_swap_init: swap_zone == NULL");
375 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
379 * Initialize our meta-data hash table. The swapper does not need to
380 * be quite as efficient as the VM system, so we do not use an
381 * oversized hash table.
383 * n: size of hash table, must be power of 2
384 * swhash_mask: hash table index mask
387 for (n = 1; n < n2 / 8; n *= 2)
390 swhash = kmalloc(sizeof(struct swblock *) * n, M_VMPGDATA,
397 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
398 * its metadata structures.
400 * This routine is called from the mmap and fork code to create a new
401 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
402 * and then converting it with swp_pager_meta_build().
404 * This routine may block in vm_object_allocate() and create a named
405 * object lookup race, so we must interlock. We must also run at
406 * splvm() for the object lookup to handle races with interrupts, but
407 * we do not have to maintain splvm() in between the lookup and the
408 * add because (I believe) it is not possible to attempt to create
409 * a new swap object w/handle when a default object with that handle
414 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
420 * Reference existing named region or allocate new one. There
421 * should not be a race here against swp_pager_meta_build()
422 * as called from vm_page_remove() in regards to the lookup
426 while (sw_alloc_interlock) {
427 sw_alloc_interlock = -1;
428 tsleep(&sw_alloc_interlock, 0, "swpalc", 0);
430 sw_alloc_interlock = 1;
432 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
434 if (object != NULL) {
435 vm_object_reference(object);
437 object = vm_object_allocate(OBJT_DEFAULT,
438 OFF_TO_IDX(offset + PAGE_MASK + size));
439 object->handle = handle;
441 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
444 if (sw_alloc_interlock < 0)
445 wakeup(&sw_alloc_interlock);
447 sw_alloc_interlock = 0;
449 object = vm_object_allocate(OBJT_DEFAULT,
450 OFF_TO_IDX(offset + PAGE_MASK + size));
452 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
459 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
461 * The swap backing for the object is destroyed. The code is
462 * designed such that we can reinstantiate it later, but this
463 * routine is typically called only when the entire object is
464 * about to be destroyed.
466 * This routine may block, but no longer does.
468 * The object must be locked or unreferenceable.
472 swap_pager_dealloc(vm_object_t object)
475 * Remove from list right away so lookups will fail if we block for
476 * pageout completion.
479 if (object->handle == NULL) {
480 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
482 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
485 vm_object_pip_wait(object, "swpdea");
488 * Free all remaining metadata. We only bother to free it from
489 * the swap meta data. We do not attempt to free swapblk's still
490 * associated with vm_page_t's for this object. We do not care
491 * if paging is still in progress on some objects.
494 swp_pager_meta_free_all(object);
498 /************************************************************************
499 * SWAP PAGER BITMAP ROUTINES *
500 ************************************************************************/
503 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
505 * Allocate swap for the requested number of pages. The starting
506 * swap block number (a page index) is returned or SWAPBLK_NONE
507 * if the allocation failed.
509 * Also has the side effect of advising that somebody made a mistake
510 * when they configured swap and didn't configure enough.
512 * Must be called at splvm() to avoid races with bitmap frees from
513 * vm_page_remove() aka swap_pager_page_removed().
515 * This routine may not block
516 * This routine must be called at splvm().
519 static __inline daddr_t
520 swp_pager_getswapspace(int npages)
524 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
525 if (swap_pager_full != 2) {
526 kprintf("swap_pager_getswapspace: failed\n");
528 swap_pager_almost_full = 1;
531 vm_swap_size -= npages;
538 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
540 * This routine returns the specified swap blocks back to the bitmap.
542 * Note: This routine may not block (it could in the old swap code),
543 * and through the use of the new blist routines it does not block.
545 * We must be called at splvm() to avoid races with bitmap frees from
546 * vm_page_remove() aka swap_pager_page_removed().
548 * This routine may not block
549 * This routine must be called at splvm().
553 swp_pager_freeswapspace(daddr_t blk, int npages)
555 blist_free(swapblist, blk, npages);
556 vm_swap_size += npages;
561 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
562 * range within an object.
564 * This is a globally accessible routine.
566 * This routine removes swapblk assignments from swap metadata.
568 * The external callers of this routine typically have already destroyed
569 * or renamed vm_page_t's associated with this range in the object so
572 * This routine may be called at any spl. We up our spl to splvm temporarily
573 * in order to perform the metadata removal.
577 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
580 swp_pager_meta_free(object, start, size);
585 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
587 * Assigns swap blocks to the specified range within the object. The
588 * swap blocks are not zerod. Any previous swap assignment is destroyed.
590 * Returns 0 on success, -1 on failure.
594 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
597 daddr_t blk = SWAPBLK_NONE;
598 vm_pindex_t beg = start; /* save start index */
604 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
607 swp_pager_meta_free(object, beg, start - beg);
613 swp_pager_meta_build(object, start, blk);
619 swp_pager_meta_free(object, start, n);
625 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
626 * and destroy the source.
628 * Copy any valid swapblks from the source to the destination. In
629 * cases where both the source and destination have a valid swapblk,
630 * we keep the destination's.
632 * This routine is allowed to block. It may block allocating metadata
633 * indirectly through swp_pager_meta_build() or if paging is still in
634 * progress on the source.
636 * This routine can be called at any spl
638 * XXX vm_page_collapse() kinda expects us not to block because we
639 * supposedly do not need to allocate memory, but for the moment we
640 * *may* have to get a little memory from the zone allocator, but
641 * it is taken from the interrupt memory. We should be ok.
643 * The source object contains no vm_page_t's (which is just as well)
645 * The source object is of type OBJT_SWAP.
647 * The source and destination objects must be locked or
648 * inaccessible (XXX are they ?)
652 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
653 vm_pindex_t offset, int destroysource)
660 * If destroysource is set, we remove the source object from the
661 * swap_pager internal queue now.
665 if (srcobject->handle == NULL) {
667 &swap_pager_un_object_list,
673 NOBJLIST(srcobject->handle),
681 * transfer source to destination.
684 for (i = 0; i < dstobject->size; ++i) {
688 * Locate (without changing) the swapblk on the destination,
689 * unless it is invalid in which case free it silently, or
690 * if the destination is a resident page, in which case the
691 * source is thrown away.
694 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
696 if (dstaddr == SWAPBLK_NONE) {
698 * Destination has no swapblk and is not resident,
703 srcaddr = swp_pager_meta_ctl(
709 if (srcaddr != SWAPBLK_NONE)
710 swp_pager_meta_build(dstobject, i, srcaddr);
713 * Destination has valid swapblk or it is represented
714 * by a resident page. We destroy the sourceblock.
717 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
722 * Free left over swap blocks in source.
724 * We have to revert the type to OBJT_DEFAULT so we do not accidently
725 * double-remove the object from the swap queues.
729 swp_pager_meta_free_all(srcobject);
731 * Reverting the type is not necessary, the caller is going
732 * to destroy srcobject directly, but I'm doing it here
733 * for consistency since we've removed the object from its
736 srcobject->type = OBJT_DEFAULT;
742 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
743 * the requested page.
745 * We determine whether good backing store exists for the requested
746 * page and return TRUE if it does, FALSE if it doesn't.
748 * If TRUE, we also try to determine how much valid, contiguous backing
749 * store exists before and after the requested page within a reasonable
750 * distance. We do not try to restrict it to the swap device stripe
751 * (that is handled in getpages/putpages). It probably isn't worth
756 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
761 * do we have good backing store at the requested index ?
765 blk0 = swp_pager_meta_ctl(object, pindex, 0);
767 if (blk0 == SWAPBLK_NONE) {
774 * find backwards-looking contiguous good backing store
776 if (before != NULL) {
779 for (i = 1; i < (SWB_NPAGES/2); ++i) {
784 blk = swp_pager_meta_ctl(object, pindex - i, 0);
792 * find forward-looking contiguous good backing store
798 for (i = 1; i < (SWB_NPAGES/2); ++i) {
801 blk = swp_pager_meta_ctl(object, pindex + i, 0);
813 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
815 * This removes any associated swap backing store, whether valid or
816 * not, from the page.
818 * This routine is typically called when a page is made dirty, at
819 * which point any associated swap can be freed. MADV_FREE also
820 * calls us in a special-case situation
822 * NOTE!!! If the page is clean and the swap was valid, the caller
823 * should make the page dirty before calling this routine. This routine
824 * does NOT change the m->dirty status of the page. Also: MADV_FREE
827 * This routine may not block
828 * This routine must be called at splvm()
832 swap_pager_unswapped(vm_page_t m)
834 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
838 * SWAP_PAGER_STRATEGY() - read, write, free blocks
840 * This implements the vm_pager_strategy() interface to swap and allows
841 * other parts of the system to directly access swap as backing store
842 * through vm_objects of type OBJT_SWAP. This is intended to be a
843 * cacheless interface ( i.e. caching occurs at higher levels ).
844 * Therefore we do not maintain any resident pages. All I/O goes
845 * directly to and from the swap device.
847 * We currently attempt to run I/O synchronously or asynchronously as
848 * the caller requests. This isn't perfect because we loose error
849 * sequencing when we run multiple ops in parallel to satisfy a request.
850 * But this is swap, so we let it all hang out.
854 swap_pager_strategy(vm_object_t object, struct bio *bio)
856 struct buf *bp = bio->bio_buf;
859 vm_pindex_t biox_blkno = 0;
864 struct bio_track *track;
867 * tracking for swapdev vnode I/Os
869 if (bp->b_cmd == BUF_CMD_READ)
870 track = &swapdev_vp->v_track_read;
872 track = &swapdev_vp->v_track_write;
874 if (bp->b_bcount & PAGE_MASK) {
875 bp->b_error = EINVAL;
876 bp->b_flags |= B_ERROR | B_INVAL;
878 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
879 "not page bounded\n",
880 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
885 * Clear error indication, initialize page index, count, data pointer.
888 bp->b_flags &= ~B_ERROR;
889 bp->b_resid = bp->b_bcount;
891 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
892 count = howmany(bp->b_bcount, PAGE_SIZE);
896 * Deal with BUF_CMD_FREEBLKS
898 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
900 * FREE PAGE(s) - destroy underlying swap that is no longer
903 swp_pager_meta_free(object, start, count);
910 * We need to be able to create a new cluster of I/O's. We cannot
911 * use the caller fields of the passed bio so push a new one.
913 * Because nbio is just a placeholder for the cluster links,
914 * we can biodone() the original bio instead of nbio to make
915 * things a bit more efficient.
917 nbio = push_bio(bio);
918 nbio->bio_offset = bio->bio_offset;
919 nbio->bio_caller_info1.cluster_head = NULL;
920 nbio->bio_caller_info2.cluster_tail = NULL;
926 * Execute read or write
932 * Obtain block. If block not found and writing, allocate a
933 * new block and build it into the object.
935 blk = swp_pager_meta_ctl(object, start, 0);
936 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
937 blk = swp_pager_getswapspace(1);
938 if (blk == SWAPBLK_NONE) {
939 bp->b_error = ENOMEM;
940 bp->b_flags |= B_ERROR;
943 swp_pager_meta_build(object, start, blk);
947 * Do we have to flush our current collection? Yes if:
949 * - no swap block at this index
950 * - swap block is not contiguous
951 * - we cross a physical disk boundry in the
955 biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
956 ((biox_blkno ^ blk) & dmmax_mask)
959 if (bp->b_cmd == BUF_CMD_READ) {
960 ++mycpu->gd_cnt.v_swapin;
961 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
963 ++mycpu->gd_cnt.v_swapout;
964 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
965 bufx->b_dirtyend = bufx->b_bcount;
969 * Finished with this buf.
971 KKASSERT(bufx->b_bcount != 0);
972 if (bufx->b_cmd != BUF_CMD_READ)
973 bufx->b_dirtyend = bufx->b_bcount;
979 * Add new swapblk to biox, instantiating biox if necessary.
980 * Zero-fill reads are able to take a shortcut.
982 if (blk == SWAPBLK_NONE) {
984 * We can only get here if we are reading. Since
985 * we are at splvm() we can safely modify b_resid,
986 * even if chain ops are in progress.
988 bzero(data, PAGE_SIZE);
989 bp->b_resid -= PAGE_SIZE;
992 /* XXX chain count > 4, wait to <= 4 */
994 bufx = getpbuf(NULL);
995 biox = &bufx->b_bio1;
996 cluster_append(nbio, bufx);
997 bufx->b_flags |= (bufx->b_flags & B_ORDERED);
998 bufx->b_cmd = bp->b_cmd;
999 biox->bio_done = swap_chain_iodone;
1000 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1001 biox->bio_caller_info1.cluster_parent = nbio;
1004 bufx->b_data = data;
1006 bufx->b_bcount += PAGE_SIZE;
1014 * Flush out last buffer
1017 if (bufx->b_cmd == BUF_CMD_READ) {
1018 ++mycpu->gd_cnt.v_swapin;
1019 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1021 ++mycpu->gd_cnt.v_swapout;
1022 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1023 bufx->b_dirtyend = bufx->b_bcount;
1025 KKASSERT(bufx->b_bcount);
1026 if (bufx->b_cmd != BUF_CMD_READ)
1027 bufx->b_dirtyend = bufx->b_bcount;
1028 /* biox, bufx = NULL */
1032 * Now initiate all the I/O. Be careful looping on our chain as
1033 * I/O's may complete while we are still initiating them.
1035 nbio->bio_caller_info2.cluster_tail = NULL;
1036 bufx = nbio->bio_caller_info1.cluster_head;
1039 biox = &bufx->b_bio1;
1041 bufx = bufx->b_cluster_next;
1042 vn_strategy(swapdev_vp, biox);
1046 * Completion of the cluster will also call biodone_chain(nbio).
1047 * We never call biodone(nbio) so we don't have to worry about
1048 * setting up a bio_done callback. It's handled in the sub-IO.
1054 swap_chain_iodone(struct bio *biox)
1057 struct buf *bufx; /* chained sub-buffer */
1058 struct bio *nbio; /* parent nbio with chain glue */
1059 struct buf *bp; /* original bp associated with nbio */
1062 bufx = biox->bio_buf;
1063 nbio = biox->bio_caller_info1.cluster_parent;
1067 * Update the original buffer
1069 KKASSERT(bp != NULL);
1070 if (bufx->b_flags & B_ERROR) {
1071 atomic_set_int(&bufx->b_flags, B_ERROR);
1072 bp->b_error = bufx->b_error;
1073 } else if (bufx->b_resid != 0) {
1074 atomic_set_int(&bufx->b_flags, B_ERROR);
1075 bp->b_error = EINVAL;
1077 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1081 * Remove us from the chain.
1083 spin_lock_wr(&bp->b_lock.lk_spinlock);
1084 nextp = &nbio->bio_caller_info1.cluster_head;
1085 while (*nextp != bufx) {
1086 KKASSERT(*nextp != NULL);
1087 nextp = &(*nextp)->b_cluster_next;
1089 *nextp = bufx->b_cluster_next;
1090 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1091 spin_unlock_wr(&bp->b_lock.lk_spinlock);
1094 * Clean up bufx. If the chain is now empty we finish out
1095 * the parent. Note that we may be racing other completions
1096 * so we must use the chain_empty status from above.
1099 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1100 atomic_set_int(&bp->b_flags, B_ERROR);
1101 bp->b_error = EINVAL;
1103 biodone_chain(nbio);
1105 relpbuf(bufx, NULL);
1109 * SWAP_PAGER_GETPAGES() - bring page in from swap
1111 * The requested page may have to be brought in from swap. Calculate the
1112 * swap block and bring in additional pages if possible. All pages must
1113 * have contiguous swap block assignments and reside in the same object.
1115 * The caller has a single vm_object_pip_add() reference prior to
1116 * calling us and we should return with the same.
1118 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1119 * and any additinal pages unbusied.
1121 * If the caller encounters a PG_RAM page it will pass it to us even though
1122 * it may be valid and dirty. We cannot overwrite the page in this case!
1123 * The case is used to allow us to issue pure read-aheads.
1125 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1126 * the PG_RAM page is validated at the same time as mreq. What we
1127 * really need to do is issue a separate read-ahead pbuf.
1130 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1141 vm_page_t marray[XIO_INTERNAL_PAGES];
1145 if (mreq->object != object) {
1146 panic("swap_pager_getpages: object mismatch %p/%p",
1153 * We don't want to overwrite a fully valid page as it might be
1154 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1155 * valid page with PG_RAM set.
1157 * In this case we see if the next page is a suitable page-in
1158 * candidate and if it is we issue read-ahead. PG_RAM will be
1159 * set on the last page of the read-ahead to continue the pipeline.
1161 if (mreq->valid == VM_PAGE_BITS_ALL) {
1162 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size)
1163 return(VM_PAGER_OK);
1165 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1166 if (blk == SWAPBLK_NONE) {
1168 return(VM_PAGER_OK);
1170 m = vm_page_lookup(object, mreq->pindex + 1);
1172 m = vm_page_alloc(object, mreq->pindex + 1,
1176 return(VM_PAGER_OK);
1179 if ((m->flags & PG_BUSY) || m->busy || m->valid) {
1181 return(VM_PAGER_OK);
1183 vm_page_unqueue_nowakeup(m);
1194 * Try to block-read contiguous pages from swap if sequential,
1195 * otherwise just read one page. Contiguous pages from swap must
1196 * reside within a single device stripe because the I/O cannot be
1197 * broken up across multiple stripes.
1199 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1200 * set up such that the case(s) are handled implicitly.
1203 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1206 for (i = 1; swap_burst_read &&
1207 i < XIO_INTERNAL_PAGES &&
1208 mreq->pindex + i < object->size; ++i) {
1211 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1212 if (iblk != blk + i)
1214 if ((blk ^ iblk) & dmmax_mask)
1216 m = vm_page_lookup(object, mreq->pindex + i);
1218 m = vm_page_alloc(object, mreq->pindex + i,
1223 if ((m->flags & PG_BUSY) || m->busy || m->valid)
1225 vm_page_unqueue_nowakeup(m);
1231 vm_page_flag_set(marray[i - 1], PG_RAM);
1236 * If mreq is the requested page and we have nothing to do return
1237 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1238 * page and must be cleaned up.
1240 if (blk == SWAPBLK_NONE) {
1243 vnode_pager_freepage(mreq);
1244 return(VM_PAGER_OK);
1246 return(VM_PAGER_FAIL);
1251 * map our page(s) into kva for input
1253 bp = getpbuf(&nsw_rcount);
1255 kva = (vm_offset_t) bp->b_kvabase;
1256 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1257 pmap_qenter(kva, bp->b_xio.xio_pages, i);
1259 bp->b_data = (caddr_t)kva;
1260 bp->b_bcount = PAGE_SIZE * i;
1261 bp->b_xio.xio_npages = i;
1262 bio->bio_done = swp_pager_async_iodone;
1263 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1264 bio->bio_caller_info1.index = SWBIO_READ;
1267 * Set index. If raonly set the index beyond the array so all
1268 * the pages are treated the same, otherwise the original mreq is
1272 bio->bio_driver_info = (void *)(intptr_t)i;
1274 bio->bio_driver_info = (void *)(intptr_t)0;
1276 for (j = 0; j < i; ++j)
1277 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
1279 mycpu->gd_cnt.v_swapin++;
1280 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1283 * We still hold the lock on mreq, and our automatic completion routine
1284 * does not remove it.
1286 vm_object_pip_add(object, bp->b_xio.xio_npages);
1289 * perform the I/O. NOTE!!! bp cannot be considered valid after
1290 * this point because we automatically release it on completion.
1291 * Instead, we look at the one page we are interested in which we
1292 * still hold a lock on even through the I/O completion.
1294 * The other pages in our m[] array are also released on completion,
1295 * so we cannot assume they are valid anymore either.
1297 bp->b_cmd = BUF_CMD_READ;
1299 vn_strategy(swapdev_vp, bio);
1302 * Wait for the page we want to complete. PG_SWAPINPROG is always
1303 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1304 * is set in the meta-data.
1306 * If this is a read-ahead only we return immediately without
1310 return(VM_PAGER_OK);
1313 * Read-ahead includes originally requested page case.
1316 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1317 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1318 mycpu->gd_cnt.v_intrans++;
1319 if (tsleep(mreq, 0, "swread", hz*20)) {
1321 "swap_pager: indefinite wait buffer: "
1322 " offset: %lld, size: %ld\n",
1323 (long long)bio->bio_offset,
1331 * mreq is left bussied after completion, but all the other pages
1332 * are freed. If we had an unrecoverable read error the page will
1335 if (mreq->valid != VM_PAGE_BITS_ALL)
1336 return(VM_PAGER_ERROR);
1338 return(VM_PAGER_OK);
1341 * A final note: in a low swap situation, we cannot deallocate swap
1342 * and mark a page dirty here because the caller is likely to mark
1343 * the page clean when we return, causing the page to possibly revert
1344 * to all-zero's later.
1349 * swap_pager_putpages:
1351 * Assign swap (if necessary) and initiate I/O on the specified pages.
1353 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1354 * are automatically converted to SWAP objects.
1356 * In a low memory situation we may block in vn_strategy(), but the new
1357 * vm_page reservation system coupled with properly written VFS devices
1358 * should ensure that no low-memory deadlock occurs. This is an area
1361 * The parent has N vm_object_pip_add() references prior to
1362 * calling us and will remove references for rtvals[] that are
1363 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1366 * The parent has soft-busy'd the pages it passes us and will unbusy
1367 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1368 * We need to unbusy the rest on I/O completion.
1371 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1372 boolean_t sync, int *rtvals)
1377 if (count && m[0]->object != object) {
1378 panic("swap_pager_getpages: object mismatch %p/%p",
1387 * Turn object into OBJT_SWAP
1388 * check for bogus sysops
1389 * force sync if not pageout process
1392 if (object->type != OBJT_SWAP)
1393 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1395 if (curthread != pagethread)
1401 * Update nsw parameters from swap_async_max sysctl values.
1402 * Do not let the sysop crash the machine with bogus numbers.
1405 if (swap_async_max != nsw_wcount_async_max) {
1411 if ((n = swap_async_max) > nswbuf / 2)
1418 * Adjust difference ( if possible ). If the current async
1419 * count is too low, we may not be able to make the adjustment
1423 n -= nsw_wcount_async_max;
1424 if (nsw_wcount_async + n >= 0) {
1425 nsw_wcount_async += n;
1426 nsw_wcount_async_max += n;
1427 wakeup(&nsw_wcount_async);
1435 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1436 * The page is left dirty until the pageout operation completes
1440 for (i = 0; i < count; i += n) {
1447 * Maximum I/O size is limited by a number of factors.
1450 n = min(BLIST_MAX_ALLOC, count - i);
1451 n = min(n, nsw_cluster_max);
1456 * Get biggest block of swap we can. If we fail, fall
1457 * back and try to allocate a smaller block. Don't go
1458 * overboard trying to allocate space if it would overly
1462 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1467 if (blk == SWAPBLK_NONE) {
1468 for (j = 0; j < n; ++j)
1469 rtvals[i+j] = VM_PAGER_FAIL;
1475 * The I/O we are constructing cannot cross a physical
1476 * disk boundry in the swap stripe. Note: we are still
1479 if ((blk ^ (blk + n)) & dmmax_mask) {
1480 j = ((blk + dmmax) & dmmax_mask) - blk;
1481 swp_pager_freeswapspace(blk + j, n - j);
1486 * All I/O parameters have been satisfied, build the I/O
1487 * request and assign the swap space.
1491 bp = getpbuf(&nsw_wcount_sync);
1493 bp = getpbuf(&nsw_wcount_async);
1496 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1498 bp->b_bcount = PAGE_SIZE * n;
1499 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1501 for (j = 0; j < n; ++j) {
1502 vm_page_t mreq = m[i+j];
1504 swp_pager_meta_build(
1509 vm_page_dirty(mreq);
1510 rtvals[i+j] = VM_PAGER_OK;
1512 vm_page_flag_set(mreq, PG_SWAPINPROG);
1513 bp->b_xio.xio_pages[j] = mreq;
1515 bp->b_xio.xio_npages = n;
1517 mycpu->gd_cnt.v_swapout++;
1518 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1522 bp->b_dirtyoff = 0; /* req'd for NFS */
1523 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1524 bp->b_cmd = BUF_CMD_WRITE;
1525 bio->bio_caller_info1.index = SWBIO_WRITE;
1530 if (sync == FALSE) {
1531 bio->bio_done = swp_pager_async_iodone;
1533 vn_strategy(swapdev_vp, bio);
1535 for (j = 0; j < n; ++j)
1536 rtvals[i+j] = VM_PAGER_PEND;
1541 * Issue synchrnously.
1543 * Wait for the sync I/O to complete, then update rtvals.
1544 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1545 * our async completion routine at the end, thus avoiding a
1548 bio->bio_caller_info1.index |= SWBIO_SYNC;
1549 bio->bio_done = biodone_sync;
1550 bio->bio_flags |= BIO_SYNC;
1551 vn_strategy(swapdev_vp, bio);
1552 biowait(bio, "swwrt");
1554 for (j = 0; j < n; ++j)
1555 rtvals[i+j] = VM_PAGER_PEND;
1558 * Now that we are through with the bp, we can call the
1559 * normal async completion, which frees everything up.
1561 swp_pager_async_iodone(bio);
1566 swap_pager_newswap(void)
1572 * swp_pager_async_iodone:
1574 * Completion routine for asynchronous reads and writes from/to swap.
1575 * Also called manually by synchronous code to finish up a bp.
1577 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1578 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1579 * unbusy all pages except the 'main' request page. For WRITE
1580 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1581 * because we marked them all VM_PAGER_PEND on return from putpages ).
1583 * This routine may not block.
1586 swp_pager_async_iodone(struct bio *bio)
1588 struct buf *bp = bio->bio_buf;
1589 vm_object_t object = NULL;
1596 if (bp->b_flags & B_ERROR) {
1598 "swap_pager: I/O error - %s failed; offset %lld,"
1599 "size %ld, error %d\n",
1600 ((bio->bio_caller_info1.index & SWBIO_READ) ?
1601 "pagein" : "pageout"),
1602 (long long)bio->bio_offset,
1609 * set object, raise to splvm().
1611 if (bp->b_xio.xio_npages)
1612 object = bp->b_xio.xio_pages[0]->object;
1616 * remove the mapping for kernel virtual
1618 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1621 * cleanup pages. If an error occurs writing to swap, we are in
1622 * very serious trouble. If it happens to be a disk error, though,
1623 * we may be able to recover by reassigning the swap later on. So
1624 * in this case we remove the m->swapblk assignment for the page
1625 * but do not free it in the rlist. The errornous block(s) are thus
1626 * never reallocated as swap. Redirty the page and continue.
1628 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1629 vm_page_t m = bp->b_xio.xio_pages[i];
1631 if (bp->b_flags & B_ERROR) {
1633 * If an error occurs I'd love to throw the swapblk
1634 * away without freeing it back to swapspace, so it
1635 * can never be used again. But I can't from an
1639 if (bio->bio_caller_info1.index & SWBIO_READ) {
1641 * When reading, reqpage needs to stay
1642 * locked for the parent, but all other
1643 * pages can be freed. We still want to
1644 * wakeup the parent waiting on the page,
1645 * though. ( also: pg_reqpage can be -1 and
1646 * not match anything ).
1648 * We have to wake specifically requested pages
1649 * up too because we cleared PG_SWAPINPROG and
1650 * someone may be waiting for that.
1652 * NOTE: for reads, m->dirty will probably
1653 * be overridden by the original caller of
1654 * getpages so don't play cute tricks here.
1656 * NOTE: We can't actually free the page from
1657 * here, because this is an interrupt. It
1658 * is not legal to mess with object->memq
1659 * from an interrupt. Deactivate the page
1664 vm_page_flag_clear(m, PG_ZERO);
1665 vm_page_flag_clear(m, PG_SWAPINPROG);
1668 * bio_driver_info holds the requested page
1671 if (i != (int)(intptr_t)bio->bio_driver_info) {
1672 vm_page_deactivate(m);
1678 * If i == bp->b_pager.pg_reqpage, do not wake
1679 * the page up. The caller needs to.
1683 * If a write error occurs, reactivate page
1684 * so it doesn't clog the inactive list,
1685 * then finish the I/O.
1688 vm_page_flag_clear(m, PG_SWAPINPROG);
1689 vm_page_activate(m);
1690 vm_page_io_finish(m);
1692 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
1694 * NOTE: for reads, m->dirty will probably be
1695 * overridden by the original caller of getpages so
1696 * we cannot set them in order to free the underlying
1697 * swap in a low-swap situation. I don't think we'd
1698 * want to do that anyway, but it was an optimization
1699 * that existed in the old swapper for a time before
1700 * it got ripped out due to precisely this problem.
1702 * clear PG_ZERO in page.
1704 * If not the requested page then deactivate it.
1706 * Note that the requested page, reqpage, is left
1707 * busied, but we still have to wake it up. The
1708 * other pages are released (unbusied) by
1709 * vm_page_wakeup(). We do not set reqpage's
1710 * valid bits here, it is up to the caller.
1714 * NOTE: can't call pmap_clear_modify(m) from an
1715 * interrupt thread, the pmap code may have to map
1716 * non-kernel pmaps and currently asserts the case.
1718 /*pmap_clear_modify(m);*/
1719 m->valid = VM_PAGE_BITS_ALL;
1721 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG);
1724 * We have to wake specifically requested pages
1725 * up too because we cleared PG_SWAPINPROG and
1726 * could be waiting for it in getpages. However,
1727 * be sure to not unbusy getpages specifically
1728 * requested page - getpages expects it to be
1731 * bio_driver_info holds the requested page
1733 if (i != (int)(intptr_t)bio->bio_driver_info) {
1734 vm_page_deactivate(m);
1741 * Mark the page clean but do not mess with the
1742 * pmap-layer's modified state. That state should
1743 * also be clear since the caller protected the
1744 * page VM_PROT_READ, but allow the case.
1746 * We are in an interrupt, avoid pmap operations.
1748 * If we have a severe page deficit, deactivate the
1749 * page. Do not try to cache it (which would also
1750 * involve a pmap op), because the page might still
1754 vm_page_flag_clear(m, PG_SWAPINPROG);
1755 vm_page_io_finish(m);
1756 if (vm_page_count_severe())
1757 vm_page_deactivate(m);
1759 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1760 vm_page_protect(m, VM_PROT_READ);
1766 * adjust pip. NOTE: the original parent may still have its own
1767 * pip refs on the object.
1771 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages);
1774 * Release the physical I/O buffer.
1776 * NOTE: Due to synchronous operations in the write case b_cmd may
1777 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
1780 if (bio->bio_caller_info1.index & SWBIO_READ)
1781 nswptr = &nsw_rcount;
1782 else if (bio->bio_caller_info1.index & SWBIO_SYNC)
1783 nswptr = &nsw_wcount_sync;
1785 nswptr = &nsw_wcount_async;
1786 bp->b_cmd = BUF_CMD_DONE;
1787 relpbuf(bp, nswptr);
1791 /************************************************************************
1793 ************************************************************************
1795 * These routines manipulate the swap metadata stored in the
1796 * OBJT_SWAP object. All swp_*() routines must be called at
1797 * splvm() because swap can be freed up by the low level vm_page
1798 * code which might be called from interrupts beyond what splbio() covers.
1800 * Swap metadata is implemented with a global hash and not directly
1801 * linked into the object. Instead the object simply contains
1802 * appropriate tracking counters.
1806 * SWP_PAGER_HASH() - hash swap meta data
1808 * This is an inline helper function which hashes the swapblk given
1809 * the object and page index. It returns a pointer to a pointer
1810 * to the object, or a pointer to a NULL pointer if it could not
1813 * This routine must be called at splvm().
1816 static __inline struct swblock **
1817 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1819 struct swblock **pswap;
1820 struct swblock *swap;
1822 index &= ~SWAP_META_MASK;
1823 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1825 while ((swap = *pswap) != NULL) {
1826 if (swap->swb_object == object &&
1827 swap->swb_index == index
1831 pswap = &swap->swb_hnext;
1837 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1839 * We first convert the object to a swap object if it is a default
1842 * The specified swapblk is added to the object's swap metadata. If
1843 * the swapblk is not valid, it is freed instead. Any previously
1844 * assigned swapblk is freed.
1846 * This routine must be called at splvm(), except when used to convert
1847 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1852 swp_pager_meta_build(
1857 struct swblock *swap;
1858 struct swblock **pswap;
1861 * Convert default object to swap object if necessary
1864 if (object->type != OBJT_SWAP) {
1865 object->type = OBJT_SWAP;
1866 object->un_pager.swp.swp_bcount = 0;
1868 if (object->handle != NULL) {
1870 NOBJLIST(object->handle),
1876 &swap_pager_un_object_list,
1884 * Locate hash entry. If not found create, but if we aren't adding
1885 * anything just return. If we run out of space in the map we wait
1886 * and, since the hash table may have changed, retry.
1890 pswap = swp_pager_hash(object, index);
1892 if ((swap = *pswap) == NULL) {
1895 if (swapblk == SWAPBLK_NONE)
1898 swap = *pswap = zalloc(swap_zone);
1903 swap->swb_hnext = NULL;
1904 swap->swb_object = object;
1905 swap->swb_index = index & ~SWAP_META_MASK;
1906 swap->swb_count = 0;
1908 ++object->un_pager.swp.swp_bcount;
1910 for (i = 0; i < SWAP_META_PAGES; ++i)
1911 swap->swb_pages[i] = SWAPBLK_NONE;
1915 * Delete prior contents of metadata
1918 index &= SWAP_META_MASK;
1920 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1921 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1926 * Enter block into metadata
1929 swap->swb_pages[index] = swapblk;
1930 if (swapblk != SWAPBLK_NONE)
1935 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1937 * The requested range of blocks is freed, with any associated swap
1938 * returned to the swap bitmap.
1940 * This routine will free swap metadata structures as they are cleaned
1941 * out. This routine does *NOT* operate on swap metadata associated
1942 * with resident pages.
1944 * This routine must be called at splvm()
1948 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1950 if (object->type != OBJT_SWAP)
1954 struct swblock **pswap;
1955 struct swblock *swap;
1957 pswap = swp_pager_hash(object, index);
1959 if ((swap = *pswap) != NULL) {
1960 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1962 if (v != SWAPBLK_NONE) {
1963 swp_pager_freeswapspace(v, 1);
1964 swap->swb_pages[index & SWAP_META_MASK] =
1966 if (--swap->swb_count == 0) {
1967 *pswap = swap->swb_hnext;
1968 zfree(swap_zone, swap);
1969 --object->un_pager.swp.swp_bcount;
1975 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1983 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1985 * This routine locates and destroys all swap metadata associated with
1988 * This routine must be called at splvm()
1992 swp_pager_meta_free_all(vm_object_t object)
1996 if (object->type != OBJT_SWAP)
1999 while (object->un_pager.swp.swp_bcount) {
2000 struct swblock **pswap;
2001 struct swblock *swap;
2003 pswap = swp_pager_hash(object, index);
2004 if ((swap = *pswap) != NULL) {
2007 for (i = 0; i < SWAP_META_PAGES; ++i) {
2008 daddr_t v = swap->swb_pages[i];
2009 if (v != SWAPBLK_NONE) {
2011 swp_pager_freeswapspace(v, 1);
2014 if (swap->swb_count != 0)
2015 panic("swap_pager_meta_free_all: swb_count != 0");
2016 *pswap = swap->swb_hnext;
2017 zfree(swap_zone, swap);
2018 --object->un_pager.swp.swp_bcount;
2020 index += SWAP_META_PAGES;
2021 if (index > 0x20000000)
2022 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
2027 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
2029 * This routine is capable of looking up, popping, or freeing
2030 * swapblk assignments in the swap meta data or in the vm_page_t.
2031 * The routine typically returns the swapblk being looked-up, or popped,
2032 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2033 * was invalid. This routine will automatically free any invalid
2034 * meta-data swapblks.
2036 * It is not possible to store invalid swapblks in the swap meta data
2037 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2039 * When acting on a busy resident page and paging is in progress, we
2040 * have to wait until paging is complete but otherwise can act on the
2043 * This routine must be called at splvm().
2045 * SWM_FREE remove and free swap block from metadata
2046 * SWM_POP remove from meta data but do not free.. pop it out
2055 struct swblock **pswap;
2056 struct swblock *swap;
2060 * The meta data only exists of the object is OBJT_SWAP
2061 * and even then might not be allocated yet.
2064 if (object->type != OBJT_SWAP)
2065 return(SWAPBLK_NONE);
2068 pswap = swp_pager_hash(object, index);
2070 if ((swap = *pswap) != NULL) {
2071 index &= SWAP_META_MASK;
2072 r1 = swap->swb_pages[index];
2074 if (r1 != SWAPBLK_NONE) {
2075 if (flags & SWM_FREE) {
2076 swp_pager_freeswapspace(r1, 1);
2079 if (flags & (SWM_FREE|SWM_POP)) {
2080 swap->swb_pages[index] = SWAPBLK_NONE;
2081 if (--swap->swb_count == 0) {
2082 *pswap = swap->swb_hnext;
2083 zfree(swap_zone, swap);
2084 --object->un_pager.swp.swp_bcount;